From 158ba1a1d95159fb3562559dbda394a8bf81af0e Mon Sep 17 00:00:00 2001 From: Weizhen Wang Date: Mon, 1 Aug 2022 14:02:06 +0800 Subject: [PATCH] *: enable part revive for all code (#36703) --- DEPS.bzl | 163 ++++++------ Makefile | 2 +- bindinfo/handle.go | 1 - br/pkg/backup/client.go | 8 +- br/pkg/backup/schema.go | 1 - br/pkg/glue/console_glue_test.go | 1 - br/pkg/gluetidb/glue.go | 2 - br/pkg/lightning/backend/kv/kv2sql.go | 2 +- br/pkg/lightning/backend/kv/sql2kv.go | 8 +- br/pkg/lightning/backend/kv/sql2kv_test.go | 1 - br/pkg/lightning/backend/local/engine.go | 2 +- br/pkg/lightning/backend/local/local_test.go | 1 - br/pkg/lightning/backend/local/localhelper.go | 39 ++- .../backend/local/localhelper_test.go | 1 - br/pkg/lightning/backend/tidb/tidb.go | 1 - .../lightning/checkpoints/checkpoints_test.go | 1 - br/pkg/lightning/common/errors.go | 3 +- br/pkg/lightning/mydump/parquet_parser.go | 3 +- br/pkg/lightning/mydump/region.go | 4 +- br/pkg/lightning/restore/checksum_test.go | 3 +- .../lightning/restore/chunk_restore_test.go | 2 +- br/pkg/lightning/restore/get_pre_info.go | 14 +- br/pkg/lightning/restore/meta_manager.go | 1 - br/pkg/lightning/restore/meta_manager_test.go | 6 +- br/pkg/lightning/restore/precheck_impl.go | 4 +- .../lightning/restore/precheck_impl_test.go | 1 - br/pkg/lightning/restore/table_restore.go | 7 +- br/pkg/restore/client.go | 12 +- br/pkg/restore/client_test.go | 2 +- br/pkg/restore/db.go | 1 - br/pkg/restore/db_test.go | 2 - br/pkg/restore/split.go | 3 +- br/pkg/restore/split/client.go | 4 +- br/pkg/restore/split_test.go | 2 - br/pkg/restore/stream_metas.go | 5 +- br/pkg/storage/azblob_test.go | 1 - br/pkg/storage/gcs_test.go | 4 +- br/pkg/stream/rewrite_meta_rawkv_test.go | 1 - br/pkg/stream/stream_mgr.go | 6 +- br/pkg/stream/stream_status.go | 1 - br/pkg/streamhelper/advancer.go | 3 +- br/pkg/streamhelper/advancer_cliext.go | 2 +- br/pkg/streamhelper/basic_lib_for_test.go | 34 +-- br/pkg/streamhelper/regioniter_test.go | 1 - br/pkg/streamhelper/tsheap_test.go | 1 - br/pkg/task/restore.go | 5 +- br/pkg/task/stream.go | 3 +- br/pkg/utils/backoff_test.go | 7 +- br/pkg/version/version_test.go | 2 - build/BUILD.bazel | 1 + build/linter/allrevive/BUILD.bazel | 17 ++ build/linter/allrevive/analyzer.go | 198 +++++++++++++++ build/linter/gci/BUILD.bazel | 2 +- build/linter/gci/analysis.go | 14 +- build/linter/revive/analyzer.go | 3 + build/nogo_config.json | 13 +- cmd/ddltest/ddl_test.go | 2 - cmd/ddltest/index_test.go | 1 + cmd/explaintest/main.go | 5 + config/config_test.go | 1 - ddl/backfilling.go | 4 +- ddl/db_partition_test.go | 236 +++++++++--------- ddl/db_rename_test.go | 20 +- ddl/db_table_test.go | 1 - ddl/db_test.go | 4 - ddl/ddl_api.go | 29 +-- ddl/ddl_error_test.go | 1 - ddl/ddl_tiflash_api.go | 14 +- ddl/ddl_tiflash_test.go | 2 +- ddl/index.go | 12 +- ddl/index_modify_test.go | 2 +- ddl/job_table.go | 6 +- ddl/modify_column_test.go | 1 - ddl/placement_policy_test.go | 1 + ddl/placement_sql_test.go | 1 - ddl/schematracker/dm_tracker.go | 47 ++-- ddl/sequence_test.go | 1 + ddl/serial_test.go | 1 + ddl/tiflash_replica_test.go | 2 - domain/schema_checker.go | 1 - dumpling/export/config_test.go | 1 - dumpling/export/prepare_test.go | 2 +- dumpling/export/sql_test.go | 1 - executor/admin_test.go | 1 - executor/aggfuncs/func_lead_lag_test.go | 2 - executor/aggregate.go | 4 +- executor/aggregate_test.go | 7 +- executor/analyze_test.go | 1 + executor/analyzetest/analyze_test.go | 114 ++++----- executor/benchmark_test.go | 2 - executor/builder.go | 4 +- executor/compact_table.go | 3 +- executor/ddl_test.go | 1 - executor/executor_test.go | 13 +- executor/executor_txn_test.go | 1 - executor/explainfor_test.go | 1 - executor/grant_test.go | 1 - executor/index_lookup_merge_join.go | 9 +- executor/infoschema_reader.go | 4 +- executor/insert.go | 4 +- executor/insert_common.go | 2 +- executor/insert_test.go | 1 - executor/join_test.go | 5 +- executor/joiner.go | 7 +- executor/mem_reader.go | 2 +- executor/memtable_reader.go | 1 - executor/partition_table_test.go | 13 +- executor/plan_replayer.go | 2 +- executor/point_get_test.go | 4 +- executor/prepared_test.go | 5 +- executor/projection.go | 1 - executor/set_test.go | 1 - executor/show.go | 1 - executor/showtest/show_test.go | 3 - executor/shuffle.go | 1 - executor/simple.go | 12 +- executor/simpletest/simple_test.go | 2 - executor/splittest/split_table_test.go | 1 - executor/tiflashtest/tiflash_test.go | 2 - executor/write.go | 1 - expression/aggregation/base_func.go | 1 - expression/bench_test.go | 5 +- expression/builtin_arithmetic.go | 1 - expression/builtin_arithmetic_vec.go | 4 +- expression/builtin_cast.go | 5 +- expression/builtin_control.go | 1 - expression/builtin_encryption_test.go | 1 - expression/builtin_encryption_vec.go | 2 + expression/builtin_json_vec.go | 1 + expression/builtin_string.go | 1 - expression/builtin_string_test.go | 1 - expression/builtin_string_vec.go | 1 + expression/builtin_time.go | 2 - expression/builtin_vectorized_test.go | 1 - expression/constant_propagation.go | 1 - expression/constant_propagation_test.go | 2 +- expression/expr_to_pb_test.go | 1 - expression/expression.go | 2 +- expression/flag_simplify_test.go | 2 +- expression/integration_test.go | 5 - expression/partition_pruner.go | 2 +- expression/util.go | 2 +- go.mod | 28 +-- go.sum | 54 ++-- infoschema/cache_test.go | 1 - infoschema/cluster_tables_test.go | 2 - infoschema/tables_test.go | 1 - parser/ast/ddl.go | 1 - parser/ast/dml.go | 15 +- parser/ast/expressions.go | 1 - parser/ast/expressions_test.go | 1 - parser/ast/misc.go | 6 +- parser/ast/misc_test.go | 1 - parser/ast/util.go | 3 +- parser/charset/charset_test.go | 2 +- parser/goyacc/format_yacc.go | 13 +- parser/parser_test.go | 4 +- parser/types/field_type_test.go | 1 - planner/cascades/integration_test.go | 26 +- planner/cascades/stringer_test.go | 2 +- planner/cascades/transformation_rules_test.go | 34 +-- planner/core/binary_plan_test.go | 2 +- planner/core/cacheable_checker_test.go | 1 - planner/core/cbo_test.go | 34 +-- planner/core/enforce_mpp_test.go | 14 +- planner/core/exhaust_physical_plans.go | 1 - planner/core/expression_rewriter.go | 2 +- planner/core/expression_rewriter_test.go | 4 +- planner/core/flat_plan_test.go | 2 +- planner/core/indexmerge_test.go | 2 +- planner/core/integration_partition_test.go | 8 +- planner/core/integration_test.go | 167 ++++++------- planner/core/logical_plan_builder.go | 1 + planner/core/logical_plan_test.go | 38 +-- planner/core/optimizer_test.go | 1 - planner/core/partition_pruner_test.go | 8 +- planner/core/physical_plan_test.go | 84 +++---- planner/core/plan_test.go | 6 +- planner/core/planbuilder.go | 1 - planner/core/point_get_plan.go | 1 - planner/core/point_get_plan_test.go | 4 +- planner/core/prepare_test.go | 3 - planner/core/rule_aggregation_push_down.go | 2 +- planner/core/rule_join_reorder_test.go | 2 +- planner/core/rule_partition_processor.go | 2 - planner/core/rule_predicate_push_down.go | 2 - planner/core/rule_result_reorder_test.go | 2 +- planner/core/stats_test.go | 4 +- planner/core/task.go | 1 - planner/core/window_push_down_test.go | 6 +- planner/funcdep/fd_graph_test.go | 1 - planner/implementation/base_test.go | 1 - planner/optimize.go | 55 ++-- privilege/privileges/cache.go | 3 +- privilege/privileges/privileges_test.go | 7 +- server/conn.go | 2 - server/conn_stmt.go | 1 - server/conn_test.go | 1 - server/http_handler.go | 3 +- server/server_test.go | 6 - server/tidb_test.go | 4 - session/clustered_index_test.go | 2 +- session/nontransactional.go | 1 - session/schema_amender.go | 2 +- session/session_test/session_test.go | 2 +- .../variable/mock_globalaccessor_test.go | 1 - sessionctx/variable/sysvar.go | 5 +- sessionctx/variable/sysvar_test.go | 2 - sessionctx/variable/variable.go | 3 +- sessionctx/variable/varsutil_test.go | 2 - sessiontxn/failpoint.go | 2 +- sessiontxn/isolation/repeatable_read.go | 11 +- sessiontxn/txn_context_test.go | 1 - statistics/builder.go | 2 +- statistics/column.go | 2 +- statistics/feedback.go | 2 +- statistics/handle/handle.go | 8 +- statistics/handle/handle_hist.go | 4 +- statistics/handle/handle_test.go | 128 +++++----- statistics/index.go | 2 +- statistics/integration_test.go | 4 +- statistics/selectivity_test.go | 28 +-- statistics/trace_test.go | 2 +- store/copr/batch_coprocessor.go | 2 +- store/copr/batch_coprocessor_test.go | 1 - store/copr/coprocessor_cache.go | 2 +- store/driver/txn/error.go | 2 +- store/gcworker/gc_worker.go | 2 +- store/gcworker/gc_worker_test.go | 2 +- store/helper/helper_test.go | 1 - .../unistore/cophandler/cop_handler_test.go | 1 - .../mockstore/unistore/cophandler/mpp_exec.go | 2 +- store/mockstore/unistore/tikv/detector.go | 1 - table/tables/mutation_checker.go | 8 +- table/tables/partition_test.go | 1 - table/tables/tables.go | 2 +- testkit/testdata/testdata.go | 8 +- testkit/testkit.go | 1 - tidb-binlog/node/registry_test.go | 12 - types/const_test.go | 1 - types/convert_test.go | 2 +- types/json/binary_test.go | 1 - types/time_test.go | 2 - util/chunk/chunk_test.go | 1 - util/chunk/column_test.go | 1 - util/dbutil/common_test.go | 1 - util/deadlockhistory/deadlock_history_test.go | 1 - util/expensivequery/memory_usage_alarm.go | 52 ++-- util/fastrand/random_test.go | 2 +- util/generatedexpr/generated_expr.go | 2 +- util/localpool/localpool_test.go | 11 - util/memory/tracker_test.go | 1 - util/misc.go | 3 +- util/ranger/ranger_test.go | 19 +- util/schemacmp/lattice_test.go | 1 - util/schemacmp/type_test.go | 1 - util/sqlexec/utils_test.go | 1 - util/stmtsummary/statement_summary_test.go | 4 +- 258 files changed, 1186 insertions(+), 1184 deletions(-) create mode 100644 build/linter/allrevive/BUILD.bazel create mode 100644 build/linter/allrevive/analyzer.go diff --git a/DEPS.bzl b/DEPS.bzl index 55f0f13716eeb..542c1bbb8a3b8 100644 --- a/DEPS.bzl +++ b/DEPS.bzl @@ -26,8 +26,8 @@ def go_deps(): name = "cc_mvdan_unparam", build_file_proto_mode = "disable", importpath = "mvdan.cc/unparam", - sum = "h1:Jh3LAeMt1eGpxomyu3jVkmVZWW2MxZ1qIIV2TZ/nRio=", - version = "v0.0.0-20211214103731-d0ef000c54e5", + sum = "h1:seuXWbRB1qPrS3NQnHmFKLJLtskWyueeIzmLXghMGgk=", + version = "v0.0.0-20220706161116-678bad134442", ) go_repository( @@ -81,6 +81,13 @@ def go_deps(): sum = "h1:Hbq0/3fJPQhNkN0dR95AVrr6R7tou91y0uHG5pOcUuw=", version = "v1.0.0", ) + go_repository( + name = "com_github_alingse_asasalint", + build_file_proto_mode = "disable", + importpath = "github.com/alingse/asasalint", + sum = "h1:qqGPDTV0ff0tWHN/nnIlSdjlU/EwRPaUY4SfpE1rnms=", + version = "v0.0.10", + ) go_repository( name = "com_github_aliyun_alibaba_cloud_sdk_go", @@ -108,8 +115,8 @@ def go_deps(): name = "com_github_antonboom_errname", build_file_proto_mode = "disable", importpath = "github.com/Antonboom/errname", - sum = "h1:LzIJZlyLOCSu51o3/t2n9Ck7PcoP9wdbrdaW6J8fX24=", - version = "v0.1.6", + sum = "h1:mBBDKvEYwPl4WFFNwec1CZO096G6vzK9vvDQzAwkako=", + version = "v0.1.7", ) go_repository( name = "com_github_antonboom_nilnil", @@ -630,8 +637,8 @@ def go_deps(): name = "com_github_cpuguy83_go_md2man_v2", build_file_proto_mode = "disable_global", importpath = "github.com/cpuguy83/go-md2man/v2", - sum = "h1:r/myEWzV9lfsM1tFLgDyu0atFtJ1fXn261LKYj/3DxU=", - version = "v2.0.1", + sum = "h1:p1EgwI/C7NhT0JmVkwCD2ZBK8j4aeHQX2pMHHBfMQ6w=", + version = "v2.0.2", ) go_repository( name = "com_github_creack_pty", @@ -665,8 +672,8 @@ def go_deps(): name = "com_github_daixiang0_gci", build_file_proto_mode = "disable", importpath = "github.com/daixiang0/gci", - sum = "h1:+EZ83znNs73C9ZBTM7xhNagMP6gJs5wlptiFiuce5BM=", - version = "v0.3.4", + sum = "h1:wf7x0xRjQqTlA2dzHTI0A/xPyp7VcBatBG9nwGatwbQ=", + version = "v0.4.3", ) go_repository( @@ -911,8 +918,8 @@ def go_deps(): name = "com_github_firefart_nonamedreturns", build_file_proto_mode = "disable", importpath = "github.com/firefart/nonamedreturns", - sum = "h1:fSvcq6ZpK/uBAgJEGMvzErlzyM4NELLqqdTofVjVNag=", - version = "v1.0.1", + sum = "h1:abzI1p7mAEPYuR4A+VLKn4eNDOycjYo2phmY9sfv40Y=", + version = "v1.0.4", ) go_repository( @@ -968,15 +975,15 @@ def go_deps(): name = "com_github_fzipp_gocyclo", build_file_proto_mode = "disable_global", importpath = "github.com/fzipp/gocyclo", - sum = "h1:L66amyuYogbxl0j2U+vGqJXusPF2IkduvXLnYD5TFgw=", - version = "v0.5.1", + sum = "h1:lsblElZG7d3ALtGMx9fmxeTKZaLLpU8mET09yN4BBLo=", + version = "v0.6.0", ) go_repository( name = "com_github_gaijinentertainment_go_exhaustruct_v2", build_file_proto_mode = "disable", importpath = "github.com/GaijinEntertainment/go-exhaustruct/v2", - sum = "h1:LAPPhJ4KR5Z8aKVZF5S48csJkxL5RMKmE/98fMs1u5M=", - version = "v2.1.0", + sum = "h1:V9xVvhKbLt7unNEGAruK1xXglyc668Pq3Xx0MNTNqpo=", + version = "v2.2.0", ) go_repository( @@ -1329,8 +1336,8 @@ def go_deps(): name = "com_github_golangci_golangci_lint", build_file_proto_mode = "disable", importpath = "github.com/golangci/golangci-lint", - sum = "h1:o90t/Xa6dhJbvy8Bz2RpzUXqrkigp19DLStMolTZbyo=", - version = "v1.46.2", + sum = "h1:qvMDVv49Hrx3PSEXZ0bD/yhwSbhsOihQjFYCKieegIw=", + version = "v1.47.2", ) go_repository( name = "com_github_golangci_gosec", @@ -1403,8 +1410,8 @@ def go_deps(): name = "com_github_google_go_cmp", build_file_proto_mode = "disable_global", importpath = "github.com/google/go-cmp", - sum = "h1:81/ik6ipDQS2aGcBfIN5dHDB36BwrStyeAQquSYCV4o=", - version = "v0.5.7", + sum = "h1:e6P7q2lk1O+qJJb4BtCQXlK8vWEO8V1ZeuEdJNOqZyg=", + version = "v0.5.8", ) go_repository( name = "com_github_google_go_querystring", @@ -1685,8 +1692,8 @@ def go_deps(): name = "com_github_hashicorp_go_version", build_file_proto_mode = "disable_global", importpath = "github.com/hashicorp/go-version", - sum = "h1:aAQzgqIrRKRa7w75CKpbBxYsmUoPjzVm1W59ca1L0J4=", - version = "v1.4.0", + sum = "h1:feTTfFNnjP967rlCxM/I9g701jU+RN74YKx2mOkIeek=", + version = "v1.6.0", ) go_repository( name = "com_github_hashicorp_golang_lru", @@ -2143,15 +2150,15 @@ def go_deps(): name = "com_github_kulti_thelper", build_file_proto_mode = "disable", importpath = "github.com/kulti/thelper", - sum = "h1:K4xulKkwOCnT1CDms6Ex3uG1dvSMUUQe9zxgYQgbRXs=", - version = "v0.6.2", + sum = "h1:ElhKf+AlItIu+xGnI990no4cE2+XaSu1ULymV2Yulxs=", + version = "v0.6.3", ) go_repository( name = "com_github_kunwardeep_paralleltest", build_file_proto_mode = "disable", importpath = "github.com/kunwardeep/paralleltest", - sum = "h1:UdKIkImEAXjR1chUWLn+PNXqWUGs//7tzMeWuP7NhmI=", - version = "v1.0.3", + sum = "h1:FCKYMF1OF2+RveWlABsdnmsvJrei5aoyZoaGS+Ugg8g=", + version = "v1.0.6", ) go_repository( @@ -2238,8 +2245,8 @@ def go_deps(): name = "com_github_maratori_testpackage", build_file_proto_mode = "disable", importpath = "github.com/maratori/testpackage", - sum = "h1:QtJ5ZjqapShm0w5DosRjg0PRlSdAdlx+W6cCKoALdbQ=", - version = "v1.0.1", + sum = "h1:GJY4wlzQhuBusMF1oahQCBtUV/AQ/k69IZ68vxaac2Q=", + version = "v1.1.0", ) go_repository( name = "com_github_masterminds_semver", @@ -2529,8 +2536,8 @@ def go_deps(): name = "com_github_nishanths_exhaustive", build_file_proto_mode = "disable", importpath = "github.com/nishanths/exhaustive", - sum = "h1:xV/WU3Vdwh5BUH4N06JNUznb6d5zhRPOnlgCrpNYNKA=", - version = "v0.7.11", + sum = "h1:0QKNascWv9qIHY7zRoZSxeRr6kuk5aAT3YXLTiDmjTo=", + version = "v0.8.1", ) go_repository( @@ -2661,8 +2668,8 @@ def go_deps(): name = "com_github_pelletier_go_toml_v2", build_file_proto_mode = "disable", importpath = "github.com/pelletier/go-toml/v2", - sum = "h1:P7Bq0SaI8nsexyay5UAyDo+ICWy5MQPgEZ5+l8JQTKo=", - version = "v2.0.0", + sum = "h1:+jQXlF3scKIcSEKkdHzXhCTDLPFi5r1wnK6yPS+49Gw=", + version = "v2.0.2", ) go_repository( name = "com_github_peterbourgon_g2s", @@ -2875,8 +2882,8 @@ def go_deps(): name = "com_github_quasilyte_go_ruleguard_dsl", build_file_proto_mode = "disable", importpath = "github.com/quasilyte/go-ruleguard/dsl", - sum = "h1:5+KTKb2YREUYiqZFEIuifFyBxlcCUPWgNZkWy71XS0Q=", - version = "v0.3.19", + sum = "h1:vNkC6fC6qMLzCOGbnIHOd5ixUGgTbp3Z4fGnUgULlDA=", + version = "v0.3.21", ) go_repository( name = "com_github_quasilyte_gogrep", @@ -3035,8 +3042,8 @@ def go_deps(): name = "com_github_securego_gosec_v2", build_file_proto_mode = "disable", importpath = "github.com/securego/gosec/v2", - sum = "h1:+PDkpzR41OI2jrw1q6AdXZCbsNGNGT7pQjal0H0cArI=", - version = "v2.11.0", + sum = "h1:CQWdW7ATFpvLSohMVsajscfyHJ5rsGmEXmsNcsDNmAg=", + version = "v2.12.0", ) go_repository( @@ -3058,8 +3065,8 @@ def go_deps(): name = "com_github_shirou_gopsutil_v3", build_file_proto_mode = "disable_global", importpath = "github.com/shirou/gopsutil/v3", - sum = "h1:srAQaiX6jX/cYL6q29aE0m8lOskT9CurZ9N61YR3yoI=", - version = "v3.22.4", + sum = "h1:FnHOFOh+cYAM0C30P+zysPISzlknLC5Z1G4EAElznfQ=", + version = "v3.22.6", ) go_repository( name = "com_github_shopify_goreferrer", @@ -3132,12 +3139,20 @@ def go_deps(): sum = "h1:0hLQKpgC53OVF1VT7CeoFHk9YKstur1XOgfYIc1yrHI=", version = "v1.0.2", ) + go_repository( + name = "com_github_sivchari_nosnakecase", + build_file_proto_mode = "disable", + importpath = "github.com/sivchari/nosnakecase", + sum = "h1:ZBvAu1H3uteN0KQ0IsLpIFOwYgPEhKLyv2ahrVkub6M=", + version = "v1.5.0", + ) + go_repository( name = "com_github_sivchari_tenv", build_file_proto_mode = "disable", importpath = "github.com/sivchari/tenv", - sum = "h1:wxW0mFpKI6DIb3s6m1jCDYvkWXCskrimXMuGd0K/kSQ=", - version = "v1.5.0", + sum = "h1:d4laZMBK6jpe5PWepxlV9S+LC0yXqvYHiq8E6ceoVVE=", + version = "v1.7.0", ) go_repository( @@ -3194,15 +3209,15 @@ def go_deps(): name = "com_github_spf13_cast", build_file_proto_mode = "disable_global", importpath = "github.com/spf13/cast", - sum = "h1:s0hze+J0196ZfEMTs80N7UlFt0BDuQ7Q+JDnHiMWKdA=", - version = "v1.4.1", + sum = "h1:rj3WzYc11XZaIZMPKmwP96zkFEnnAmV8s6XbB2aY32w=", + version = "v1.5.0", ) go_repository( name = "com_github_spf13_cobra", build_file_proto_mode = "disable_global", importpath = "github.com/spf13/cobra", - sum = "h1:y+wJpx64xcgO1V+RcnwW0LEHxTKRi2ZDPSBjWnrg88Q=", - version = "v1.4.0", + sum = "h1:X+jTBEBqF0bHN+9cSMgmfuvv2VHJ9ezmFNf9Y/XstYU=", + version = "v1.5.0", ) go_repository( name = "com_github_spf13_jwalterweatherman", @@ -3222,8 +3237,8 @@ def go_deps(): name = "com_github_spf13_viper", build_file_proto_mode = "disable_global", importpath = "github.com/spf13/viper", - sum = "h1:7OX/1FS6n7jHD1zGrZTM7WtY13ZELRyosK4k93oPr44=", - version = "v1.11.0", + sum = "h1:CZ7eSOd3kZoaYDLbXnmzgQI5RlciuXBMA+18HwHRfZQ=", + version = "v1.12.0", ) go_repository( name = "com_github_ssgreg_nlreturn_v2", @@ -3259,22 +3274,22 @@ def go_deps(): name = "com_github_stretchr_objx", build_file_proto_mode = "disable_global", importpath = "github.com/stretchr/objx", - sum = "h1:2vfRuCMp5sSVIDSqO8oNnWJq7mPa6KVP3iPIwFBuy8A=", - version = "v0.1.1", + sum = "h1:M2gUjqZET1qApGOWNSnZ49BAIMX4F/1plDv3+l31EJ4=", + version = "v0.4.0", ) go_repository( name = "com_github_stretchr_testify", build_file_proto_mode = "disable_global", importpath = "github.com/stretchr/testify", - sum = "h1:rh3VYpfvzXRbJ90ymx1yfhGl/wq8ac2m/cUbao61kwY=", - version = "v1.7.2-0.20220504104629-106ec21d14df", + sum = "h1:pSgiaMZlXftHpm5L7V1+rVB+AZJydKsMxsQBIJw4PKk=", + version = "v1.8.0", ) go_repository( name = "com_github_subosito_gotenv", build_file_proto_mode = "disable_global", importpath = "github.com/subosito/gotenv", - sum = "h1:Slr1R9HxAlEKefgq5jn9U+DnETlIUa6HfgEzj0g5d7s=", - version = "v1.2.0", + sum = "h1:yAzM1+SmVcz5R4tXGsNMu1jUl2aOJXoiWUCEwwnGrvs=", + version = "v1.4.0", ) go_repository( name = "com_github_sylvia7788_contextcheck", @@ -3353,8 +3368,8 @@ def go_deps(): name = "com_github_tomarrell_wrapcheck_v2", build_file_proto_mode = "disable", importpath = "github.com/tomarrell/wrapcheck/v2", - sum = "h1:Cf4a/iwuMp9s7kKrh74GTgijRVim0wEpKjgAsT7Wctw=", - version = "v2.6.1", + sum = "h1:3dI6YNcrJTQ/CJQ6M/DUkc0gnqYSIk6o0rChn9E/D0M=", + version = "v2.6.2", ) go_repository( name = "com_github_tommy_muehle_go_mnd_v2", @@ -3425,8 +3440,8 @@ def go_deps(): name = "com_github_uudashr_gocognit", build_file_proto_mode = "disable", importpath = "github.com/uudashr/gocognit", - sum = "h1:rrSex7oHr3/pPLQ0xoWq108XMU8s678FJcQ+aSfOHa4=", - version = "v1.0.5", + sum = "h1:2Cgi6MweCsdB6kpcVQp7EW4U23iBFQWfTXiWlyp842Y=", + version = "v1.0.6", ) go_repository( @@ -3590,8 +3605,8 @@ def go_deps(): name = "com_github_yuin_goldmark", build_file_proto_mode = "disable_global", importpath = "github.com/yuin/goldmark", - sum = "h1:/vn0k+RBvwlxEmP5E7SZMqNxPhfMVFEJiykr15/0XKM=", - version = "v1.4.1", + sum = "h1:fVcFKWvrslecOb/tg+Cc05dkeYx540o0FuFt3nUVDoE=", + version = "v1.4.13", ) go_repository( name = "com_github_yusufpapurcu_wmi", @@ -3604,8 +3619,8 @@ def go_deps(): name = "com_gitlab_bosi_decorder", build_file_proto_mode = "disable", importpath = "gitlab.com/bosi/decorder", - sum = "h1:ehqZe8hI4w7O4b1vgsDZw1YU1PE7iJXrQWFMsocbQ1w=", - version = "v0.2.1", + sum = "h1:LRfb3lP6mZWjUzpMOCLTVjcnl/SqZWBWmKNqQvMocQs=", + version = "v0.2.2", ) go_repository( @@ -3755,8 +3770,8 @@ def go_deps(): name = "in_gopkg_ini_v1", build_file_proto_mode = "disable_global", importpath = "gopkg.in/ini.v1", - sum = "h1:SsAcf+mM7mRZo2nJNGt8mZCjG8ZRaNGMURJw7BsIST4=", - version = "v1.66.4", + sum = "h1:LATuAqN/shcYAOkv3wl2L4rkaKqkcgTBQjOyYDvcPKI=", + version = "v1.66.6", ) go_repository( name = "in_gopkg_jcmturner_aescts_v1", @@ -3832,8 +3847,8 @@ def go_deps(): name = "in_gopkg_yaml_v3", build_file_proto_mode = "disable_global", importpath = "gopkg.in/yaml.v3", - sum = "h1:h8qDotaEPuJATrMmW04NCwg7v22aHH28wwpauUhK9Oo=", - version = "v3.0.0-20210107192922-496545a6307b", + sum = "h1:fxVm/GzAzEWqLHuvctI91KS9hhNmmWOoWu0XTYJS7CA=", + version = "v3.0.1", ) go_repository( name = "io_etcd_go_bbolt", @@ -4137,15 +4152,15 @@ def go_deps(): name = "org_golang_x_exp", build_file_proto_mode = "disable_global", importpath = "golang.org/x/exp", - sum = "h1:TfdoLivD44QwvssI9Sv1xwa5DcL5XQr4au4sZ2F2NV4=", - version = "v0.0.0-20220428152302-39d4317da171", + sum = "h1:+WEEuIdZHnUeJJmEUjyYC2gfUMj69yZXw17EnHg/otA=", + version = "v0.0.0-20220722155223-a9213eeb770e", ) go_repository( name = "org_golang_x_exp_typeparams", build_file_proto_mode = "disable", importpath = "golang.org/x/exp/typeparams", - sum = "h1:qyrTQ++p1afMkO4DPEeLGq/3oTsdlvdH4vqZUBWzUKM=", - version = "v0.0.0-20220218215828-6cf2b201936e", + sum = "h1:+W8Qf4iJtMGKkyAygcKohjxTk4JPsL9DpzApJ22m5Ic=", + version = "v0.0.0-20220613132600-b0d781184e0d", ) go_repository( @@ -4180,8 +4195,8 @@ def go_deps(): name = "org_golang_x_net", build_file_proto_mode = "disable_global", importpath = "golang.org/x/net", - sum = "h1:bRb386wvrE+oBNdF1d/Xh9mQrfQ4ecYhW5qJ5GvTGT4=", - version = "v0.0.0-20220412020605-290c469a71a5", + sum = "h1:PxfKdU9lEEDYjdIzOtC4qFWgkU2rGHdKlKowJSMN9h0=", + version = "v0.0.0-20220722155237-a158d28d115b", ) go_repository( name = "org_golang_x_oauth2", @@ -4194,15 +4209,15 @@ def go_deps(): name = "org_golang_x_sync", build_file_proto_mode = "disable_global", importpath = "golang.org/x/sync", - sum = "h1:w8s32wxx3sY+OjLlv9qltkLU5yvJzxjjgiHWLjdIcw4=", - version = "v0.0.0-20220513210516-0976fa681c29", + sum = "h1:uVc8UZUe6tr40fFVnUP5Oj+veunVezqYl9z7DYw9xzw=", + version = "v0.0.0-20220722155255-886fb9371eb4", ) go_repository( name = "org_golang_x_sys", build_file_proto_mode = "disable_global", importpath = "golang.org/x/sys", - sum = "h1:wEZYwx+kK+KlZ0hpvP2Ls1Xr4+RWnlzGFwPP0aiDjIU=", - version = "v0.0.0-20220622161953-175b2fd9d664", + sum = "h1:WIoqL4EROvwiPdUtaip4VcDdpZ4kha7wBWZrbVKCIZg=", + version = "v0.0.0-20220728004956-3c1f35247d10", ) go_repository( name = "org_golang_x_term", @@ -4229,8 +4244,8 @@ def go_deps(): name = "org_golang_x_tools", build_file_proto_mode = "disable_global", importpath = "golang.org/x/tools", - sum = "h1:loJ25fNOEhSXfHrpoGj91eCUThwdNX6u24rO1xnNteY=", - version = "v0.1.11", + sum = "h1:VveCTK38A2rkS8ZqFY25HIDFscX5X9OoEhJd3quQmXU=", + version = "v0.1.12", ) go_repository( name = "org_golang_x_xerrors", diff --git a/Makefile b/Makefile index b103eaf1f0602..43d734d349454 100644 --- a/Makefile +++ b/Makefile @@ -415,7 +415,7 @@ bazel_coverage_test: failpoint-enable bazel_ci_prepare bazel_build: bazel_ci_prepare mkdir -p bin - bazel --output_user_root=/home/jenkins/.tidb/tmp build -k --config=ci //tidb-server/... //br/cmd/... //cmd/... //util/... //dumpling/cmd/... //tidb-binlog/... --//build:with_nogo_flag=true + bazel --output_user_root=/home/jenkins/.tidb/tmp build -k --config=ci //... --//build:with_nogo_flag=true cp bazel-out/k8-fastbuild/bin/tidb-server/tidb-server_/tidb-server ./bin cp bazel-out/k8-fastbuild/bin/cmd/importer/importer_/importer ./bin cp bazel-out/k8-fastbuild/bin/tidb-server/tidb-server-check_/tidb-server-check ./bin diff --git a/bindinfo/handle.go b/bindinfo/handle.go index c6c5fe8677359..13e7a1abf50e8 100644 --- a/bindinfo/handle.go +++ b/bindinfo/handle.go @@ -1058,7 +1058,6 @@ func getEvolveParameters(sctx sessionctx.Context) (time.Duration, time.Time, tim startTime, err := time.ParseInLocation(variable.FullDayTimeFormat, startTimeStr, time.UTC) if err != nil { return 0, time.Time{}, time.Time{}, err - } endTime, err := time.ParseInLocation(variable.FullDayTimeFormat, endTimeStr, time.UTC) if err != nil { diff --git a/br/pkg/backup/client.go b/br/pkg/backup/client.go index a5240a3f026e0..c8c74b89c621d 100644 --- a/br/pkg/backup/client.go +++ b/br/pkg/backup/client.go @@ -581,9 +581,8 @@ func (bc *Client) BackupRanges( // The error due to context cancel, stack trace is meaningless, the stack shall be suspended (also clear) if errors.Cause(err) == context.Canceled { return errors.SuspendStack(err) - } else { - return errors.Trace(err) } + return errors.Trace(err) } return nil }) @@ -1072,10 +1071,9 @@ func SendBackup( } logutil.CL(ctx).Error("fail to backup", zap.Uint64("StoreID", storeID), zap.Int("retry", retry)) return berrors.ErrFailedToConnect.Wrap(errBackup).GenWithStack("failed to create backup stream to store %d", storeID) - } else { - // finish backup - break } + // finish backup + break } return nil } diff --git a/br/pkg/backup/schema.go b/br/pkg/backup/schema.go index 385adbad6f55c..d3269980fdbb2 100644 --- a/br/pkg/backup/schema.go +++ b/br/pkg/backup/schema.go @@ -202,7 +202,6 @@ func (s *schemaInfo) encodeToSchema() (*backuppb.Schema, error) { if err != nil { return nil, errors.Trace(err) } - } var statsBytes []byte if s.stats != nil { diff --git a/br/pkg/glue/console_glue_test.go b/br/pkg/glue/console_glue_test.go index d902498729956..61a07ac6fc7ed 100644 --- a/br/pkg/glue/console_glue_test.go +++ b/br/pkg/glue/console_glue_test.go @@ -77,7 +77,6 @@ func testPrettyString(t *testing.T) { } } } - } func testPrettyStringSlicing(t *testing.T) { diff --git a/br/pkg/gluetidb/glue.go b/br/pkg/gluetidb/glue.go index dfe5ae62639bd..459437e33b091 100644 --- a/br/pkg/gluetidb/glue.go +++ b/br/pkg/gluetidb/glue.go @@ -193,7 +193,6 @@ func (gs *tidbSession) CreateDatabase(ctx context.Context, schema *model.DBInfo) schema.Charset = mysql.DefaultCharset } return d.CreateSchemaWithInfo(gs.se, schema, ddl.OnExistIgnore) - } // CreatePlacementPolicy implements glue.Session. @@ -341,7 +340,6 @@ func (s *mockSession) ExecuteInternal(ctx context.Context, sql string, args ...i func (s *mockSession) CreateDatabase(ctx context.Context, schema *model.DBInfo) error { log.Fatal("unimplemented CreateDatabase for mock session") return nil - } // CreatePlacementPolicy implements glue.Session. diff --git a/br/pkg/lightning/backend/kv/kv2sql.go b/br/pkg/lightning/backend/kv/kv2sql.go index 1a436bbe822ea..7a06de830416d 100644 --- a/br/pkg/lightning/backend/kv/kv2sql.go +++ b/br/pkg/lightning/backend/kv/kv2sql.go @@ -76,7 +76,7 @@ func (t *TableKVDecoder) IterRawIndexKeys(h kv.Handle, rawRow []byte, fn func([] row[i] = types.GetMinValue(&col.FieldType) } } - if err, _ := evaluateGeneratedColumns(t.se, row, t.tbl.Cols(), t.genCols); err != nil { + if _, err := evaluateGeneratedColumns(t.se, row, t.tbl.Cols(), t.genCols); err != nil { return err } } diff --git a/br/pkg/lightning/backend/kv/sql2kv.go b/br/pkg/lightning/backend/kv/sql2kv.go index 5c310b6ed14f0..ca7a7bba767b5 100644 --- a/br/pkg/lightning/backend/kv/sql2kv.go +++ b/br/pkg/lightning/backend/kv/sql2kv.go @@ -333,17 +333,17 @@ func KvPairsFromRow(row Row) []common.KvPair { return row.(*KvPairs).pairs } -func evaluateGeneratedColumns(se *session, record []types.Datum, cols []*table.Column, genCols []genCol) (err error, errCol *model.ColumnInfo) { +func evaluateGeneratedColumns(se *session, record []types.Datum, cols []*table.Column, genCols []genCol) (errCol *model.ColumnInfo, err error) { mutRow := chunk.MutRowFromDatums(record) for _, gc := range genCols { col := cols[gc.index].ToInfo() evaluated, err := gc.expr.Eval(mutRow.ToRow()) if err != nil { - return err, col + return col, err } value, err := table.CastValue(se, evaluated, col, false, false) if err != nil { - return err, col + return col, err } mutRow.SetDatum(gc.index, value) record[gc.index] = value @@ -426,7 +426,7 @@ func (kvcodec *tableKVEncoder) Encode( } if len(kvcodec.genCols) > 0 { - if err, errCol := evaluateGeneratedColumns(kvcodec.se, record, cols, kvcodec.genCols); err != nil { + if errCol, err := evaluateGeneratedColumns(kvcodec.se, record, cols, kvcodec.genCols); err != nil { return nil, logEvalGenExprFailed(logger, row, errCol, err) } } diff --git a/br/pkg/lightning/backend/kv/sql2kv_test.go b/br/pkg/lightning/backend/kv/sql2kv_test.go index ccd49b7e31806..52f9e7c67e9f1 100644 --- a/br/pkg/lightning/backend/kv/sql2kv_test.go +++ b/br/pkg/lightning/backend/kv/sql2kv_test.go @@ -431,7 +431,6 @@ func TestEncodeMissingAutoValue(t *testing.T) { require.NoError(t, err) require.Equalf(t, pairsExpect, pairs, "test table info: %+v", testTblInfo) require.Equalf(t, rowID, tbl.Allocators(lkv.GetEncoderSe(encoder)).Get(testTblInfo.AllocType).Base(), "test table info: %+v", testTblInfo) - } } diff --git a/br/pkg/lightning/backend/local/engine.go b/br/pkg/lightning/backend/local/engine.go index 90254f3332fd0..ccb0c636295c9 100644 --- a/br/pkg/lightning/backend/local/engine.go +++ b/br/pkg/lightning/backend/local/engine.go @@ -1416,7 +1416,7 @@ func (i dbSSTIngester) mergeSSTs(metas []*sstMeta, dir string) (*sstMeta, error) return nil, err } if key == nil { - return nil, errors.New("all ssts are empty!") + return nil, errors.New("all ssts are empty") } newMeta.minKey = append(newMeta.minKey[:0], key...) lastKey := make([]byte, 0) diff --git a/br/pkg/lightning/backend/local/local_test.go b/br/pkg/lightning/backend/local/local_test.go index 8546e7f892d99..041be6d86b49e 100644 --- a/br/pkg/lightning/backend/local/local_test.go +++ b/br/pkg/lightning/backend/local/local_test.go @@ -835,7 +835,6 @@ func testMergeSSTs(t *testing.T, kvs [][]common.KvPair, meta *sstMeta) { func TestMergeSSTs(t *testing.T) { kvs := make([][]common.KvPair, 0, 5) for i := 0; i < 5; i++ { - var pairs []common.KvPair for j := 0; j < 10; j++ { var kv common.KvPair diff --git a/br/pkg/lightning/backend/local/localhelper.go b/br/pkg/lightning/backend/local/localhelper.go index 8e0cd12723612..9768edd5cdc72 100644 --- a/br/pkg/lightning/backend/local/localhelper.go +++ b/br/pkg/lightning/backend/local/localhelper.go @@ -261,21 +261,21 @@ func (local *local) SplitAndScatterRegionByRanges( err = multierr.Append(err, err1) syncLock.Unlock() break - } else { - log.FromContext(ctx).Info("batch split region", zap.Uint64("region_id", splitRegion.Region.Id), - zap.Int("keys", endIdx-startIdx), zap.Binary("firstKey", keys[startIdx]), - zap.Binary("end", keys[endIdx-1])) - slices.SortFunc(newRegions, func(i, j *split.RegionInfo) bool { - return bytes.Compare(i.Region.StartKey, j.Region.StartKey) < 0 - }) - syncLock.Lock() - scatterRegions = append(scatterRegions, newRegions...) - syncLock.Unlock() - // the region with the max start key is the region need to be further split. - if bytes.Compare(splitRegion.Region.StartKey, newRegions[len(newRegions)-1].Region.StartKey) < 0 { - splitRegion = newRegions[len(newRegions)-1] - } } + log.FromContext(ctx).Info("batch split region", zap.Uint64("region_id", splitRegion.Region.Id), + zap.Int("keys", endIdx-startIdx), zap.Binary("firstKey", keys[startIdx]), + zap.Binary("end", keys[endIdx-1])) + slices.SortFunc(newRegions, func(i, j *split.RegionInfo) bool { + return bytes.Compare(i.Region.StartKey, j.Region.StartKey) < 0 + }) + syncLock.Lock() + scatterRegions = append(scatterRegions, newRegions...) + syncLock.Unlock() + // the region with the max start key is the region need to be further split. + if bytes.Compare(splitRegion.Region.StartKey, newRegions[len(newRegions)-1].Region.StartKey) < 0 { + splitRegion = newRegions[len(newRegions)-1] + } + batchKeySize = 0 startIdx = endIdx } @@ -319,13 +319,12 @@ func (local *local) SplitAndScatterRegionByRanges( if len(retryKeys) == 0 { break - } else { - slices.SortFunc(retryKeys, func(i, j []byte) bool { - return bytes.Compare(i, j) < 0 - }) - minKey = codec.EncodeBytes([]byte{}, retryKeys[0]) - maxKey = codec.EncodeBytes([]byte{}, nextKey(retryKeys[len(retryKeys)-1])) } + slices.SortFunc(retryKeys, func(i, j []byte) bool { + return bytes.Compare(i, j) < 0 + }) + minKey = codec.EncodeBytes([]byte{}, retryKeys[0]) + maxKey = codec.EncodeBytes([]byte{}, nextKey(retryKeys[len(retryKeys)-1])) } if err != nil { return errors.Trace(err) diff --git a/br/pkg/lightning/backend/local/localhelper_test.go b/br/pkg/lightning/backend/local/localhelper_test.go index fcdc49078a2b4..f3584870b7d96 100644 --- a/br/pkg/lightning/backend/local/localhelper_test.go +++ b/br/pkg/lightning/backend/local/localhelper_test.go @@ -811,7 +811,6 @@ func TestStoreWriteLimiter(t *testing.T) { // In theory, gotTokens should be less than or equal to maxTokens. // But we allow a little of error to avoid the test being flaky. require.LessOrEqual(t, gotTokens, maxTokens+1) - }(uint64(i)) } wg.Wait() diff --git a/br/pkg/lightning/backend/tidb/tidb.go b/br/pkg/lightning/backend/tidb/tidb.go index 826a14bfeb4a9..8706fa2ab05f0 100644 --- a/br/pkg/lightning/backend/tidb/tidb.go +++ b/br/pkg/lightning/backend/tidb/tidb.go @@ -227,7 +227,6 @@ func (b *targetInfoGetter) FetchRemoteTableModels(ctx context.Context, schemaNam } } } - } return nil }) diff --git a/br/pkg/lightning/checkpoints/checkpoints_test.go b/br/pkg/lightning/checkpoints/checkpoints_test.go index 7676809312d56..61f8d6971718a 100644 --- a/br/pkg/lightning/checkpoints/checkpoints_test.go +++ b/br/pkg/lightning/checkpoints/checkpoints_test.go @@ -303,7 +303,6 @@ func TestCheckpointMarshallUnmarshall(t *testing.T) { } func TestSeparateCompletePath(t *testing.T) { - testCases := []struct { complete string expectFileName string diff --git a/br/pkg/lightning/common/errors.go b/br/pkg/lightning/common/errors.go index 4f1c598a84535..78cc7a4fba844 100644 --- a/br/pkg/lightning/common/errors.go +++ b/br/pkg/lightning/common/errors.go @@ -210,7 +210,6 @@ func NormalizeOrWrapErr(rfcErr *errors.Error, err error, args ...interface{}) er normalizedErr := NormalizeError(err) if berrors.Is(normalizedErr, ErrUnknown) { return rfcErr.Wrap(err).GenWithStackByArgs(args...) - } else { - return normalizedErr } + return normalizedErr } diff --git a/br/pkg/lightning/mydump/parquet_parser.go b/br/pkg/lightning/mydump/parquet_parser.go index 45a0277b360d0..428fc879361af 100644 --- a/br/pkg/lightning/mydump/parquet_parser.go +++ b/br/pkg/lightning/mydump/parquet_parser.go @@ -388,9 +388,8 @@ func getDatumLen(v reflect.Value) int { if v.Kind() == reflect.Ptr { if v.IsNil() { return 0 - } else { - return getDatumLen(v.Elem()) } + return getDatumLen(v.Elem()) } if v.Kind() == reflect.String { return len(v.String()) diff --git a/br/pkg/lightning/mydump/region.go b/br/pkg/lightning/mydump/region.go index b4f2537fb2507..b58cbed215285 100644 --- a/br/pkg/lightning/mydump/region.go +++ b/br/pkg/lightning/mydump/region.go @@ -161,19 +161,19 @@ func MakeTableRegions( for i := 0; i < concurrency; i++ { wg.Add(1) go func() { + defer wg.Done() for info := range fileChan { regions, sizes, err := makeSourceFileRegion(execCtx, meta, info, columns, cfg, ioWorkers, store) select { case resultChan <- fileRegionRes{info: info, regions: regions, sizes: sizes, err: err}: case <-ctx.Done(): - break + return } if err != nil { log.FromContext(ctx).Error("make source file region error", zap.Error(err), zap.String("file_path", info.FileMeta.Path)) break } } - wg.Done() }() } diff --git a/br/pkg/lightning/restore/checksum_test.go b/br/pkg/lightning/restore/checksum_test.go index 32a6b820dbbf8..20acc23fe6be0 100644 --- a/br/pkg/lightning/restore/checksum_test.go +++ b/br/pkg/lightning/restore/checksum_test.go @@ -185,9 +185,8 @@ func TestDoChecksumWithTikv(t *testing.T) { if i >= maxErrorRetryCount { require.Equal(t, mockChecksumKVClientErr, errors.Cause(err)) continue - } else { - require.NoError(t, err) } + require.NoError(t, err) // after checksum, safepint should be small than start ts ts := pdClient.currentSafePoint() diff --git a/br/pkg/lightning/restore/chunk_restore_test.go b/br/pkg/lightning/restore/chunk_restore_test.go index 2a9a42434c77b..7a0d0826c6a07 100644 --- a/br/pkg/lightning/restore/chunk_restore_test.go +++ b/br/pkg/lightning/restore/chunk_restore_test.go @@ -353,7 +353,7 @@ func (s *chunkRestoreSuite) TestEncodeLoopDeliverLimit() { if !ok { break } - count += 1 + count++ if count <= 3 { require.Len(s.T(), kvs, 1) } diff --git a/br/pkg/lightning/restore/get_pre_info.go b/br/pkg/lightning/restore/get_pre_info.go index 61e9ea6414d6a..7d36539ec4b6d 100644 --- a/br/pkg/lightning/restore/get_pre_info.go +++ b/br/pkg/lightning/restore/get_pre_info.go @@ -520,14 +520,12 @@ func (p *PreRestoreInfoGetterImpl) ReadFirstNRowsByFileMeta(ctx context.Context, if err != nil { if errors.Cause(err) != io.EOF { return nil, nil, errors.Trace(err) - } else { - break } + break } rows = append(rows, parser.LastRow().Row) } return parser.Columns(), rows, nil - } // EstimateSourceDataSize estimates the datasize to generate during the import as well as some other sub-informaiton. @@ -598,7 +596,6 @@ func (p *PreRestoreInfoGetterImpl) EstimateSourceDataSize(ctx context.Context) ( HasUnsortedBigTables: (unSortedBigTableCount > 0), } return result, nil - } // sampleDataFromTable samples the source data file to get the extra data ratio for the index @@ -713,7 +710,7 @@ outloop: return 0.0, false, errors.Trace(err) } lastRow := parser.LastRow() - rowCount += 1 + rowCount++ var dataChecksum, indexChecksum verification.KVChecksum kvs, encodeErr := kvEncoder.Encode(logTask.Logger, lastRow.Row, lastRow.RowID, columnPermutation, sampleFile.Path, offset) @@ -725,9 +722,8 @@ outloop: } if rowCount < maxSampleRowCount { continue - } else { - break } + break } if isRowOrdered { kvs.ClassifyAndAppend(&dataKVs, &dataChecksum, &indexKVs, &indexChecksum) @@ -794,8 +790,8 @@ func (p *PreRestoreInfoGetterImpl) FetchRemoteTableModels(ctx context.Context, s // CheckVersionRequirements performs the check whether the target satisfies the version requirements. // It implements the PreRestoreInfoGetter interface. // Mydump database metas are retrieved from the context. -func (g *PreRestoreInfoGetterImpl) CheckVersionRequirements(ctx context.Context) error { - return g.targetInfoGetter.CheckVersionRequirements(ctx) +func (p *PreRestoreInfoGetterImpl) CheckVersionRequirements(ctx context.Context) error { + return p.targetInfoGetter.CheckVersionRequirements(ctx) } // GetTargetSysVariablesForImport gets some important systam variables for importing on the target. diff --git a/br/pkg/lightning/restore/meta_manager.go b/br/pkg/lightning/restore/meta_manager.go index a066f309c766f..659a33c579ef0 100644 --- a/br/pkg/lightning/restore/meta_manager.go +++ b/br/pkg/lightning/restore/meta_manager.go @@ -325,7 +325,6 @@ func (m *dbTableMetaMgr) AllocTableRowIDs(ctx context.Context, rawRowIDMax int64 ck := verify.MakeKVChecksum(remoteCk.TotalBytes, remoteCk.TotalKVs, remoteCk.Checksum) checksum = &ck } - } if checksum != nil { diff --git a/br/pkg/lightning/restore/meta_manager_test.go b/br/pkg/lightning/restore/meta_manager_test.go index 33780114ab01d..0133986fc794b 100644 --- a/br/pkg/lightning/restore/meta_manager_test.go +++ b/br/pkg/lightning/restore/meta_manager_test.go @@ -66,10 +66,7 @@ func newTableRestore(t *testing.T, kvStore kv.Storage) *TableRestore { if err := m.CreateDatabase(&model.DBInfo{ID: dbInfo.ID}); err != nil { return err } - if err := m.CreateTableOrView(dbInfo.ID, ti.Core); err != nil { - return err - } - return nil + return m.CreateTableOrView(dbInfo.ID, ti.Core) }) require.NoError(t, err) @@ -426,7 +423,6 @@ func TestCheckTasksExclusively(t *testing.T) { return newTasks, nil }) require.NoError(t, err) - } type testChecksumMgr struct { diff --git a/br/pkg/lightning/restore/precheck_impl.go b/br/pkg/lightning/restore/precheck_impl.go index d5a2c7fe3ff71..910ecea780681 100644 --- a/br/pkg/lightning/restore/precheck_impl.go +++ b/br/pkg/lightning/restore/precheck_impl.go @@ -589,9 +589,8 @@ func (ci *checkpointCheckItem) checkpointIsValid(ctx context.Context, tableInfo // there is no checkpoint log.FromContext(ctx).Debug("no checkpoint detected", zap.String("table", uniqueName)) return nil, nil - } else { - return nil, errors.Trace(err) } + return nil, errors.Trace(err) } // if checkpoint enable and not missing, we skip the check table empty progress. if tableCheckPoint.Status <= checkpoints.CheckpointStatusMissing { @@ -1142,7 +1141,6 @@ loop: case <-gCtx.Done(): break loop } - } } close(ch) diff --git a/br/pkg/lightning/restore/precheck_impl_test.go b/br/pkg/lightning/restore/precheck_impl_test.go index 2fadb9326555a..4a0aedefd325d 100644 --- a/br/pkg/lightning/restore/precheck_impl_test.go +++ b/br/pkg/lightning/restore/precheck_impl_test.go @@ -51,7 +51,6 @@ func (s *precheckImplSuite) SetupTest() { s.cfg = config.NewConfig() s.cfg.TikvImporter.Backend = config.BackendLocal s.Require().NoError(s.setMockImportData(nil)) - } func (s *precheckImplSuite) setMockImportData(mockDataMap map[string]*mock.MockDBSourceData) error { diff --git a/br/pkg/lightning/restore/table_restore.go b/br/pkg/lightning/restore/table_restore.go index b32c5e82b7345..7972c388d9ea8 100644 --- a/br/pkg/lightning/restore/table_restore.go +++ b/br/pkg/lightning/restore/table_restore.go @@ -766,9 +766,8 @@ func (tr *TableRestore) postProcess( if err != nil { tr.logger.Error("collect local duplicate keys failed", log.ShortError(err)) return false, err - } else { - hasDupe = hasLocalDupe } + hasDupe = hasLocalDupe } needChecksum, needRemoteDupe, baseTotalChecksum, err := metaMgr.CheckAndUpdateLocalChecksum(ctx, &localChecksum, hasDupe) @@ -785,9 +784,9 @@ func (tr *TableRestore) postProcess( if e != nil { tr.logger.Error("collect remote duplicate keys failed", log.ShortError(e)) return false, e - } else { - hasDupe = hasDupe || hasRemoteDupe } + hasDupe = hasDupe || hasRemoteDupe + if err = rc.backend.ResolveDuplicateRows(ctx, tr.encTable, tr.tableName, rc.cfg.TikvImporter.DuplicateResolution); err != nil { tr.logger.Error("resolve remote duplicate keys failed", log.ShortError(err)) return false, err diff --git a/br/pkg/restore/client.go b/br/pkg/restore/client.go index 1c2a01909a9c1..b05ec3fcb4b23 100644 --- a/br/pkg/restore/client.go +++ b/br/pkg/restore/client.go @@ -355,7 +355,6 @@ func (rc *Client) InitBackupMeta( backupMeta *backuppb.BackupMeta, backend *backuppb.StorageBackend, reader *metautil.MetaReader) error { - if !backupMeta.IsRawKv { databases, err := utils.LoadBackupTables(c, reader) if err != nil { @@ -732,22 +731,20 @@ func (rc *Client) GoCreateTables( var err error if rc.batchDdlSize > minBatchDdlSize && len(rc.dbPool) > 0 { - err = rc.createTablesInWorkerPool(ctx, dom, tables, newTS, outCh) if err == nil { defer log.Debug("all tables are created") close(outCh) return outCh - // fall back to old create table (sequential create table) } else if utils.FallBack2CreateTable(err) { + // fall back to old create table (sequential create table) log.Info("fall back to the sequential create table") } else { errCh <- err close(outCh) return outCh } - } createOneTable := func(c context.Context, db *DB, t *metautil.Table) error { @@ -755,7 +752,6 @@ func (rc *Client) GoCreateTables( case <-c.Done(): return c.Err() default: - } rt, err := rc.createTable(c, db, dom, t, newTS) if err != nil { @@ -1776,9 +1772,8 @@ func (rc *Client) ReadStreamDataFiles( slices.SortFunc(mFiles, func(i, j *backuppb.DataFileInfo) bool { if i.ResolvedTs > 0 && j.ResolvedTs > 0 { return i.ResolvedTs < j.ResolvedTs - } else { - return i.MaxTs < j.MaxTs } + return i.MaxTs < j.MaxTs }) return dFiles, mFiles, nil } @@ -2123,7 +2118,7 @@ func (rc *Client) RestoreMetaKVFile( return 0, 0, errors.Trace(err) } - kvCount += 1 + kvCount++ size += uint64(len(newEntry.Key) + len(newEntry.Value)) } @@ -2394,5 +2389,4 @@ func TidyOldSchemas(sr *stream.SchemasReplace) *backup.Schemas { } } return schemas - } diff --git a/br/pkg/restore/client_test.go b/br/pkg/restore/client_test.go index 64da7f2a9e579..4686871265a0a 100644 --- a/br/pkg/restore/client_test.go +++ b/br/pkg/restore/client_test.go @@ -439,7 +439,7 @@ func (fakeImportCli FakeImporterClient) SetDownloadSpeedLimit( req *import_sstpb.SetDownloadSpeedLimitRequest, ) (*import_sstpb.SetDownloadSpeedLimitResponse, error) { if storeID == SET_SPEED_LIMIT_ERROR { - return nil, fmt.Errorf("storeID:%v ERROR.", storeID) + return nil, fmt.Errorf("storeID:%v ERROR", storeID) } time.Sleep(WORKING_TIME * time.Millisecond) // simulate doing 100 ms work diff --git a/br/pkg/restore/db.go b/br/pkg/restore/db.go index 1e45725f920fc..664f081e56db0 100644 --- a/br/pkg/restore/db.go +++ b/br/pkg/restore/db.go @@ -230,7 +230,6 @@ func (db *DB) restoreSequence(ctx context.Context, table *metautil.Table) error } func (db *DB) CreateTablePostRestore(ctx context.Context, table *metautil.Table, toBeCorrectedTables map[UniqueTableName]bool) error { - var restoreMetaSQL string var err error switch { diff --git a/br/pkg/restore/db_test.go b/br/pkg/restore/db_test.go index b5c52895c0ac1..315085f42f4c6 100644 --- a/br/pkg/restore/db_test.go +++ b/br/pkg/restore/db_test.go @@ -127,7 +127,6 @@ func TestRestoreAutoIncID(t *testing.T) { autoIncID, err = strconv.ParseUint(tk.MustQuery("admin show `\"t\"` next_row_id").Rows()[0][3].(string), 10, 64) require.NoErrorf(t, err, "Error query auto inc id: %s", err) require.Equal(t, uint64(globalAutoID+300), autoIncID) - } func TestCreateTablesInDb(t *testing.T) { @@ -166,7 +165,6 @@ func TestCreateTablesInDb(t *testing.T) { err = db.CreateTables(context.Background(), tables, ddlJobMap, false, nil) require.NoError(t, err) - } func TestFilterDDLJobs(t *testing.T) { diff --git a/br/pkg/restore/split.go b/br/pkg/restore/split.go index 147625dbb241c..e03d3187426cd 100644 --- a/br/pkg/restore/split.go +++ b/br/pkg/restore/split.go @@ -319,7 +319,6 @@ func (rs *RegionSplitter) ScatterRegionsWithBackoffer(ctx context.Context, newRe }), ) } - } // isUnsupportedError checks whether we should fallback to ScatterRegion API when meeting the error. @@ -350,6 +349,7 @@ func (rs *RegionSplitter) ScatterRegions(ctx context.Context, newRegions []*spli rs.waitForSplit(ctx, region.Region.Id) } + // the retry is for the temporary network errors during sending request. err := utils.WithRetry(ctx, func() error { err := rs.client.ScatterRegions(ctx, newRegions) if isUnsupportedError(err) { @@ -364,7 +364,6 @@ func (rs *RegionSplitter) ScatterRegions(ctx context.Context, newRegions []*spli return nil } return err - // the retry is for the temporary network errors during sending request. }, &split.ExponentialBackoffer{Attempts: 3, BaseBackoff: 500 * time.Millisecond}) if err != nil { diff --git a/br/pkg/restore/split/client.go b/br/pkg/restore/split/client.go index 6d9dde426f9a3..43d8502f84e44 100644 --- a/br/pkg/restore/split/client.go +++ b/br/pkg/restore/split/client.go @@ -300,7 +300,7 @@ func (c *pdClient) sendSplitRegionRequest( ) (*kvrpcpb.SplitRegionResponse, error) { var splitErrors error for i := 0; i < splitRegionMaxRetryTime; i++ { - retry, result, err := sendSplitRegionRequest(c, ctx, regionInfo, keys, &splitErrors, i) + retry, result, err := sendSplitRegionRequest(ctx, c, regionInfo, keys, &splitErrors, i) if retry { continue } @@ -315,7 +315,7 @@ func (c *pdClient) sendSplitRegionRequest( return nil, errors.Trace(splitErrors) } -func sendSplitRegionRequest(c *pdClient, ctx context.Context, regionInfo *RegionInfo, keys [][]byte, splitErrors *error, retry int) (bool, *kvrpcpb.SplitRegionResponse, error) { +func sendSplitRegionRequest(ctx context.Context, c *pdClient, regionInfo *RegionInfo, keys [][]byte, splitErrors *error, retry int) (bool, *kvrpcpb.SplitRegionResponse, error) { var peer *metapb.Peer // scanRegions may return empty Leader in https://github.com/tikv/pd/blob/v4.0.8/server/grpc_service.go#L524 // so wee also need check Leader.Id != 0 diff --git a/br/pkg/restore/split_test.go b/br/pkg/restore/split_test.go index 8e74bd30782cd..3cc635021d327 100644 --- a/br/pkg/restore/split_test.go +++ b/br/pkg/restore/split_test.go @@ -87,7 +87,6 @@ func (c *TestClient) ScatterRegions(ctx context.Context, regionInfo []*split.Reg delete(regions, id) } err = multierr.Append(err, splitErr) - } } return nil @@ -315,7 +314,6 @@ func TestScatterFinishInTime(t *testing.T) { regionSplitter.ScatterRegionsWithBackoffer(ctx, regionInfos, assertRetryLessThan(t, 40)) - } // region: [, aay), [aay, bba), [bba, bbh), [bbh, cca), [cca, ) diff --git a/br/pkg/restore/stream_metas.go b/br/pkg/restore/stream_metas.go index ed31965682c00..b4c2c85e76d20 100644 --- a/br/pkg/restore/stream_metas.go +++ b/br/pkg/restore/stream_metas.go @@ -196,10 +196,7 @@ func swapAndOverrideFile(ctx context.Context, s storage.ExternalStorage, path st if err := s.WriteFile(ctx, path, data); err != nil { return err } - if err := s.DeleteFile(ctx, backup); err != nil { - return err - } - return nil + return s.DeleteFile(ctx, backup) } const ( diff --git a/br/pkg/storage/azblob_test.go b/br/pkg/storage/azblob_test.go index 03b0618b04320..c099037ea51b2 100644 --- a/br/pkg/storage/azblob_test.go +++ b/br/pkg/storage/azblob_test.go @@ -297,5 +297,4 @@ func TestNewAzblobStorage(t *testing.T) { require.Equal(t, "user", b.GetAccountName()) require.Equal(t, "http://127.0.0.1:1000", b.serviceURL) } - } diff --git a/br/pkg/storage/gcs_test.go b/br/pkg/storage/gcs_test.go index 7a324ee6df7f5..5801adccf04b7 100644 --- a/br/pkg/storage/gcs_test.go +++ b/br/pkg/storage/gcs_test.go @@ -158,7 +158,7 @@ func TestGCS(t *testing.T) { // test 1003 files var totalSize int64 = 0 - for i := 0; i < 1000; i += 1 { + for i := 0; i < 1000; i++ { err = stg.WriteFile(ctx, fmt.Sprintf("f%d", i), []byte("data")) require.NoError(t, err) } @@ -176,7 +176,7 @@ func TestGCS(t *testing.T) { require.True(t, ok) _, ok = filesSet["key2"] require.True(t, ok) - for i := 0; i < 1000; i += 1 { + for i := 0; i < 1000; i++ { _, ok = filesSet[fmt.Sprintf("f%d", i)] require.True(t, ok) } diff --git a/br/pkg/stream/rewrite_meta_rawkv_test.go b/br/pkg/stream/rewrite_meta_rawkv_test.go index e49cbd3860585..fcf8a19524c48 100644 --- a/br/pkg/stream/rewrite_meta_rawkv_test.go +++ b/br/pkg/stream/rewrite_meta_rawkv_test.go @@ -92,7 +92,6 @@ func TestRewriteValueForDB(t *testing.T) { } func TestRewriteValueForTable(t *testing.T) { - var ( dbId int64 = 40 tableID int64 = 100 diff --git a/br/pkg/stream/stream_mgr.go b/br/pkg/stream/stream_mgr.go index f0c52f533a275..5608b2cd66151 100644 --- a/br/pkg/stream/stream_mgr.go +++ b/br/pkg/stream/stream_mgr.go @@ -142,9 +142,8 @@ func BuildObserveDataRanges( ) ([]kv.KeyRange, error) { if len(filterStr) == 1 && filterStr[0] == string("*.*") { return buildObserverAllRange(), nil - } else { - return buildObserveTableRanges(storage, tableFilter, backupTS) } + return buildObserveTableRanges(storage, tableFilter, backupTS) } // BuildObserveMetaRange specifies build key ranges to observe meta KV(contains all of metas) @@ -182,9 +181,8 @@ func FastUnmarshalMetaData( if err != nil { if !strings.HasSuffix(readPath, ".meta") { return nil - } else { - return err } + return err } return fn(readPath, m) }) diff --git a/br/pkg/stream/stream_status.go b/br/pkg/stream/stream_status.go index 23ef2d2bdc6f7..3792380d99f3c 100644 --- a/br/pkg/stream/stream_status.go +++ b/br/pkg/stream/stream_status.go @@ -158,7 +158,6 @@ func (p *printByTable) addCheckpoints(task *TaskStatus, table *glue.Table, forma } } } - } func (p *printByTable) PrintTasks() { diff --git a/br/pkg/streamhelper/advancer.go b/br/pkg/streamhelper/advancer.go index a74ee9b623c26..89abd721242c8 100644 --- a/br/pkg/streamhelper/advancer.go +++ b/br/pkg/streamhelper/advancer.go @@ -478,9 +478,8 @@ func (c *CheckpointAdvancer) onConsistencyCheckTick(s *updateSmallTree) error { log.Error("consistency check failed! log backup may lose data! rolling back to full scan for saving.", logutil.ShortError(err)) c.state = &fullScan{} return err - } else { - log.Debug("consistency check passed.") } + log.Debug("consistency check passed.") s.consistencyCheckTick = config.DefaultConsistencyCheckTick return nil } diff --git a/br/pkg/streamhelper/advancer_cliext.go b/br/pkg/streamhelper/advancer_cliext.go index b9a6308327199..6ac9bfc5694c8 100644 --- a/br/pkg/streamhelper/advancer_cliext.go +++ b/br/pkg/streamhelper/advancer_cliext.go @@ -149,7 +149,7 @@ func (t AdvancerExt) getFullTasksAsEvent(ctx context.Context) ([]TaskEvent, int6 te := TaskEvent{ Type: EventAdd, Name: task.Info.Name, - Info: &task.Info, + Info: &(task.Info), } events = append(events, te) } diff --git a/br/pkg/streamhelper/basic_lib_for_test.go b/br/pkg/streamhelper/basic_lib_for_test.go index 7877077a03312..e2beb6e36cf3d 100644 --- a/br/pkg/streamhelper/basic_lib_for_test.go +++ b/br/pkg/streamhelper/basic_lib_for_test.go @@ -88,23 +88,23 @@ func overlaps(a, b kv.KeyRange) bool { return bytes.Compare(a.StartKey, b.EndKey) < 0 && bytes.Compare(b.StartKey, a.EndKey) < 0 } -func (f *region) splitAt(newID uint64, k string) *region { +func (r *region) splitAt(newID uint64, k string) *region { newRegion := ®ion{ - rng: kv.KeyRange{StartKey: []byte(k), EndKey: f.rng.EndKey}, - leader: f.leader, - epoch: f.epoch + 1, + rng: kv.KeyRange{StartKey: []byte(k), EndKey: r.rng.EndKey}, + leader: r.leader, + epoch: r.epoch + 1, id: newID, - checkpoint: f.checkpoint, - fsim: f.fsim.fork(), + checkpoint: r.checkpoint, + fsim: r.fsim.fork(), } - f.rng.EndKey = []byte(k) - f.epoch += 1 - f.fsim = f.fsim.fork() + r.rng.EndKey = []byte(k) + r.epoch += 1 + r.fsim = r.fsim.fork() return newRegion } -func (f *region) flush() { - f.fsim.flushedEpoch = f.epoch +func (r *region) flush() { + r.fsim.flushedEpoch = r.epoch } func (f *fakeStore) GetLastFlushTSOfRegion(ctx context.Context, in *logbackup.GetLastFlushTSOfRegionRequest, opts ...grpc.CallOption) (*logbackup.GetLastFlushTSOfRegionResponse, error) { @@ -357,10 +357,10 @@ func (r *region) String() string { return fmt.Sprintf("%d(%d):[%s,%s);%dL%d", r.id, r.epoch, hex.EncodeToString(r.rng.StartKey), hex.EncodeToString(r.rng.EndKey), r.checkpoint, r.leader) } -func (s *fakeStore) String() string { +func (f *fakeStore) String() string { buf := new(strings.Builder) - fmt.Fprintf(buf, "%d: ", s.id) - for _, r := range s.regions { + fmt.Fprintf(buf, "%d: ", f.id) + for _, r := range f.regions { fmt.Fprintf(buf, "%s ", r) } return buf.String() @@ -372,9 +372,9 @@ func (f *fakeCluster) flushAll() { } } -func (s *fakeStore) flush() { - for _, r := range s.regions { - if r.leader == s.id { +func (f *fakeStore) flush() { + for _, r := range f.regions { + if r.leader == f.id { r.flush() } } diff --git a/br/pkg/streamhelper/regioniter_test.go b/br/pkg/streamhelper/regioniter_test.go index 42d4445f70679..04ccc04da8a66 100644 --- a/br/pkg/streamhelper/regioniter_test.go +++ b/br/pkg/streamhelper/regioniter_test.go @@ -86,7 +86,6 @@ func makeSubrangeRegions(keys ...string) constantRegions { regions = append(regions, region) } return constantRegions(regions) - } func useRegions(keys ...string) constantRegions { diff --git a/br/pkg/streamhelper/tsheap_test.go b/br/pkg/streamhelper/tsheap_test.go index 6453ee36a60ab..461e7f76c87e7 100644 --- a/br/pkg/streamhelper/tsheap_test.go +++ b/br/pkg/streamhelper/tsheap_test.go @@ -113,7 +113,6 @@ func TestMergeRanges(t *testing.T) { }) require.Equal(t, c.expected, result, "case = %d", i) } - } func TestInsertRanges(t *testing.T) { diff --git a/br/pkg/task/restore.go b/br/pkg/task/restore.go index f890364bf67b4..4cec79e7acf62 100644 --- a/br/pkg/task/restore.go +++ b/br/pkg/task/restore.go @@ -359,10 +359,9 @@ func CheckNewCollationEnable( "you can use \"show config WHERE name='new_collations_enabled_on_first_bootstrap';\" to manually check the config. "+ "if you ensure the config 'new_collations_enabled_on_first_bootstrap' in backup cluster is as same as restore cluster, "+ "use --check-requirements=false to skip this check") - } else { - log.Warn("the config 'new_collations_enabled_on_first_bootstrap' is not in backupmeta") - return nil } + log.Warn("the config 'new_collations_enabled_on_first_bootstrap' is not in backupmeta") + return nil } se, err := g.CreateSession(storage) diff --git a/br/pkg/task/stream.go b/br/pkg/task/stream.go index 07b1c9d97b8ff..3c3d20967f58e 100644 --- a/br/pkg/task/stream.go +++ b/br/pkg/task/stream.go @@ -1531,9 +1531,8 @@ func ShiftTS(startTS uint64) uint64 { shiftPhysical := physical - streamShiftDuration.Milliseconds() if shiftPhysical < 0 { return 0 - } else { - return oracle.ComposeTS(shiftPhysical, logical) } + return oracle.ComposeTS(shiftPhysical, logical) } func buildPauseSafePointName(taskName string) string { diff --git a/br/pkg/utils/backoff_test.go b/br/pkg/utils/backoff_test.go index 78f329708211b..1ddff4b7d7ca7 100644 --- a/br/pkg/utils/backoff_test.go +++ b/br/pkg/utils/backoff_test.go @@ -131,9 +131,8 @@ func TestNewImportSSTBackofferWithSucess(t *testing.T) { defer func() { counter++ }() if counter == 15 { return nil - } else { - return berrors.ErrKVDownloadFailed } + return berrors.ErrKVDownloadFailed }, backoffer) require.Equal(t, 16, counter) require.NoError(t, err) @@ -146,10 +145,8 @@ func TestNewDownloadSSTBackofferWithCancel(t *testing.T) { defer func() { counter++ }() if counter == 3 { return context.Canceled - } else { - return berrors.ErrKVIngestFailed } - + return berrors.ErrKVIngestFailed }, backoffer) require.Equal(t, 4, counter) require.Equal(t, []error{ diff --git a/br/pkg/version/version_test.go b/br/pkg/version/version_test.go index 9527836fa500b..12a95eaaa4918 100644 --- a/br/pkg/version/version_test.go +++ b/br/pkg/version/version_test.go @@ -416,7 +416,6 @@ func TestCheckVersion(t *testing.T) { } func versionEqualCheck(source *semver.Version, target *semver.Version) (result bool) { - if source == nil || target == nil { return target == source } @@ -536,5 +535,4 @@ Check Table Before Drop: false` _, err = FetchVersion(ctx, db) require.Error(t, err) require.Regexp(t, "mock failure$", err.Error()) - } diff --git a/build/BUILD.bazel b/build/BUILD.bazel index 2da990828fcd3..b61f149d8fca8 100644 --- a/build/BUILD.bazel +++ b/build/BUILD.bazel @@ -134,6 +134,7 @@ nogo( ] + staticcheck_analyzers(STATICHECK_ANALYZERS) + select({ "//build:with_nogo": [ + "//build/linter/allrevive:allrevive", "//build/linter/errcheck:errcheck", "//build/linter/revive:revive", ], diff --git a/build/linter/allrevive/BUILD.bazel b/build/linter/allrevive/BUILD.bazel new file mode 100644 index 0000000000000..b13604af6eef3 --- /dev/null +++ b/build/linter/allrevive/BUILD.bazel @@ -0,0 +1,17 @@ +load("@io_bazel_rules_go//go:def.bzl", "go_library") + +go_library( + name = "allrevive", + srcs = ["analyzer.go"], + importpath = "github.com/pingcap/tidb/build/linter/allrevive", + visibility = ["//visibility:public"], + deps = [ + "//build/linter/util", + "@com_github_mgechev_revive//config", + "@com_github_mgechev_revive//lint", + "@com_github_mgechev_revive//rule", + "@com_github_pingcap_log//:log", + "@org_golang_x_tools//go/analysis", + "@org_uber_go_zap//:zap", + ], +) diff --git a/build/linter/allrevive/analyzer.go b/build/linter/allrevive/analyzer.go new file mode 100644 index 0000000000000..efdccf67d21a7 --- /dev/null +++ b/build/linter/allrevive/analyzer.go @@ -0,0 +1,198 @@ +// Copyright 2022 PingCAP, Inc. +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package allrevive + +import ( + "encoding/json" + "fmt" + "go/token" + "os" + + "github.com/mgechev/revive/config" + "github.com/mgechev/revive/lint" + "github.com/mgechev/revive/rule" + "github.com/pingcap/log" + "github.com/pingcap/tidb/build/linter/util" + "go.uber.org/zap" + "golang.org/x/tools/go/analysis" +) + +// Analyzer is the analyzer struct of gofmt. +var Analyzer = &analysis.Analyzer{ + Name: "all_revive", + Doc: "~6x faster, stricter, configurable, extensible, and beautiful drop-in replacement for golint", + Run: run, +} + +func init() { + util.SkipAnalyzer(Analyzer) +} + +// jsonObject defines a JSON object of a failure +type jsonObject struct { + Severity lint.Severity + lint.Failure `json:",inline"` +} + +var defaultRules = []lint.Rule{ + //&rule.VarDeclarationsRule{}, + //&rule.PackageCommentsRule{}, + //&rule.DotImportsRule{}, + &rule.BlankImportsRule{}, + //&rule.ExportedRule{}, + //&rule.VarNamingRule{}, + &rule.IndentErrorFlowRule{}, + &rule.RangeRule{}, + &rule.ErrorfRule{}, + &rule.ErrorNamingRule{}, + &rule.ErrorStringsRule{}, + &rule.ReceiverNamingRule{}, + //&rule.IncrementDecrementRule{}, + &rule.ErrorReturnRule{}, + //&rule.UnexportedReturnRule{}, + &rule.TimeNamingRule{}, + //&rule.ContextKeysType{}, + &rule.ContextAsArgumentRule{}, +} + +var allRules = append([]lint.Rule{ + //&rule.ArgumentsLimitRule{}, + //&rule.CyclomaticRule{}, + //&rule.FileHeaderRule{}, + //&rule.EmptyBlockRule{}, + &rule.SuperfluousElseRule{}, + //&rule.ConfusingNamingRule{}, + &rule.GetReturnRule{}, + &rule.ModifiesParamRule{}, + //&rule.ConfusingResultsRule{}, + //&rule.DeepExitRule{}, + //&rule.UnusedParamRule{}, + &rule.UnreachableCodeRule{}, + //&rule.AddConstantRule{}, + //&rule.FlagParamRule{}, + //&rule.UnnecessaryStmtRule{}, + //&rule.StructTagRule{}, + //&rule.ModifiesValRecRule{}, + &rule.ConstantLogicalExprRule{}, + &rule.BoolLiteralRule{}, + //&rule.RedefinesBuiltinIDRule{}, + &rule.ImportsBlacklistRule{}, + //&rule.FunctionResultsLimitRule{}, + //&rule.MaxPublicStructsRule{}, + &rule.RangeValInClosureRule{}, + &rule.RangeValAddress{}, + &rule.WaitGroupByValueRule{}, + &rule.AtomicRule{}, + &rule.EmptyLinesRule{}, + //&rule.LineLengthLimitRule{}, + //&rule.CallToGCRule{}, + &rule.DuplicatedImportsRule{}, + //&rule.ImportShadowingRule{}, + //&rule.BareReturnRule{}, + //&rule.UnusedReceiverRule{}, + //&rule.UnhandledErrorRule{}, + //&rule.CognitiveComplexityRule{}, + &rule.StringOfIntRule{}, + &rule.StringFormatRule{}, + //&rule.EarlyReturnRule{}, + &rule.UnconditionalRecursionRule{}, + &rule.IdenticalBranchesRule{}, + &rule.DeferRule{}, + //&rule.UnexportedNamingRule{}, + //&rule.FunctionLength{}, + //&rule.NestedStructs{}, + &rule.IfReturnRule{}, + //&rule.UselessBreak{}, + &rule.TimeEqualRule{}, + //&rule.BannedCharsRule{}, + &rule.OptimizeOperandsOrderRule{}, +}, defaultRules...) + +func run(pass *analysis.Pass) (any, error) { + files := make([]string, 0, len(pass.Files)) + for _, file := range pass.Files { + files = append(files, pass.Fset.PositionFor(file.Pos(), false).Filename) + } + packages := [][]string{files} + + revive := lint.New(os.ReadFile, 1024) + conf := lint.Config{ + IgnoreGeneratedHeader: false, + Confidence: 0.8, + Severity: "error", + ErrorCode: -1, + WarningCode: -1, + Rules: map[string]lint.RuleConfig{}, + } + for _, r := range allRules { + conf.Rules[r.Name()] = lint.RuleConfig{} + } + conf.Rules["defer"] = lint.RuleConfig{ + Arguments: []interface{}{[]interface{}{"loop", "method-call", "immediate-recover", "return"}}, + } + + lintingRules, err := config.GetLintingRules(&conf, []lint.Rule{}) + if err != nil { + return nil, err + } + + failures, err := revive.Lint(packages, lintingRules, conf) + if err != nil { + return nil, err + } + + formatChan := make(chan lint.Failure) + exitChan := make(chan bool) + + formatter, err := config.GetFormatter("json") + if err != nil { + return nil, err + } + var output string + go func() { + output, err = formatter.Format(formatChan, conf) + if err != nil { + log.Error("Format error", zap.Error(err)) + } + exitChan <- true + }() + + for f := range failures { + if f.Confidence < conf.Confidence { + continue + } + + formatChan <- f + } + + close(formatChan) + <-exitChan + + var results []jsonObject + err = json.Unmarshal([]byte(output), &results) + if err != nil { + return nil, err + } + for i := range results { + res := &results[i] + text := fmt.Sprintf("%s: %s", res.RuleName, res.Failure.Failure) + fileContent, tf, err := util.ReadFile(pass.Fset, res.Position.Start.Filename) + if err != nil { + panic(err) + } + pass.Reportf(token.Pos(tf.Base()+util.FindOffset(string(fileContent), res.Position.Start.Line, res.Position.Start.Column)), text) + } + return nil, nil +} diff --git a/build/linter/gci/BUILD.bazel b/build/linter/gci/BUILD.bazel index 9318955d516bd..b1495627c85a7 100644 --- a/build/linter/gci/BUILD.bazel +++ b/build/linter/gci/BUILD.bazel @@ -6,7 +6,7 @@ go_library( importpath = "github.com/pingcap/tidb/build/linter/gci", visibility = ["//visibility:public"], deps = [ - "@com_github_daixiang0_gci//pkg/configuration", + "@com_github_daixiang0_gci//pkg/config", "@com_github_daixiang0_gci//pkg/gci", "@org_golang_x_tools//go/analysis", ], diff --git a/build/linter/gci/analysis.go b/build/linter/gci/analysis.go index 6ac0854302160..a7ad2f9e32bd4 100644 --- a/build/linter/gci/analysis.go +++ b/build/linter/gci/analysis.go @@ -18,7 +18,7 @@ import ( "fmt" "sync" - "github.com/daixiang0/gci/pkg/configuration" + "github.com/daixiang0/gci/pkg/config" "github.com/daixiang0/gci/pkg/gci" "golang.org/x/tools/go/analysis" ) @@ -36,11 +36,13 @@ func run(pass *analysis.Pass) (any, error) { pos := pass.Fset.PositionFor(f.Pos(), false) fileNames = append(fileNames, pos.Filename) } - var rawCfg gci.GciStringConfiguration - rawCfg.Cfg = configuration.FormatterConfiguration{ - NoInlineComments: false, - NoPrefixComments: false, - Debug: false, + rawCfg := config.YamlConfig{ + Cfg: config.BoolConfig{ + NoInlineComments: false, + NoPrefixComments: false, + Debug: false, + SkipGenerated: true, + }, } cfg, _ := rawCfg.Parse() var diffs []string diff --git a/build/linter/revive/analyzer.go b/build/linter/revive/analyzer.go index c1d8c02c27b66..0ebf311492e5c 100644 --- a/build/linter/revive/analyzer.go +++ b/build/linter/revive/analyzer.go @@ -139,6 +139,9 @@ func run(pass *analysis.Pass) (any, error) { for _, r := range allRules { conf.Rules[r.Name()] = lint.RuleConfig{} } + conf.Rules["defer"] = lint.RuleConfig{ + Arguments: []interface{}{[]interface{}{"loop", "method-call", "immediate-recover", "return"}}, + } lintingRules, err := config.GetLintingRules(&conf, []lint.Rule{}) if err != nil { return nil, err diff --git a/build/nogo_config.json b/build/nogo_config.json index 564622992c361..8bf9876601a4e 100644 --- a/build/nogo_config.json +++ b/build/nogo_config.json @@ -1,4 +1,12 @@ { + "all_revive": { + "exclude_files": { + "/external/": "no need to vet third party code", + ".*_generated\\.go$": "ignore generated code", + "/rules_go_work-*": "ignore generated code", + ".*_/testmain\\.go$": "ignore code" + } + }, "asciicheck": { "exclude_files": { "/external/": "no need to vet third party code", @@ -151,7 +159,9 @@ "/rules_go_work-*": "ignore generated code", ".*test_/testmain\\.go$": "ignore generated code", ".*failpoint_binding__.go$": "ignore generated code", - "util/printer/printer.go": "ignore util/printer code" + "util/printer/printer.go": "ignore util/printer code", + "parser/parser.go": "ignore parser code", + "parser/hintparser.go": "ignore parser/hintparser code" } }, "gosec": { @@ -244,6 +254,7 @@ ".*_generated\\.go$": "ignore generated code" }, "only_files": { + "ddl/index.go": "ddl/index code", "planner/core/rule_partition_eliminate.go": "planner/core/rule_partition_eliminate code", "distsql/": "ignore distsql code", "dumpling/export": "dumpling/export code", diff --git a/cmd/ddltest/ddl_test.go b/cmd/ddltest/ddl_test.go index bbc16fbc983a6..6fa05c1ddec21 100644 --- a/cmd/ddltest/ddl_test.go +++ b/cmd/ddltest/ddl_test.go @@ -1032,7 +1032,6 @@ func TestSimpleDelete(t *testing.T) { for _, test := range tests { tblName := test.name t.Run(test.name, func(t *testing.T) { - workerNum := 10 rowCount := 1000 batch := rowCount / workerNum @@ -1083,7 +1082,6 @@ func TestSimpleDelete(t *testing.T) { for _, test := range tests { tblName := test.name t.Run(test.name, func(t *testing.T) { - var mu sync.Mutex keysMap := make(map[int64]int64) diff --git a/cmd/ddltest/index_test.go b/cmd/ddltest/index_test.go index 7472ddca22a8a..7d3206197eba4 100644 --- a/cmd/ddltest/index_test.go +++ b/cmd/ddltest/index_test.go @@ -87,6 +87,7 @@ func TestIndex(t *testing.T) { done := s.runDDL(col.Query) ticker := time.NewTicker(time.Duration(*lease) * time.Second / 2) + //nolint:all_revive,revive defer ticker.Stop() LOOP: for { diff --git a/cmd/explaintest/main.go b/cmd/explaintest/main.go index 02f6fd6f2f90b..b91eb344fd4f5 100644 --- a/cmd/explaintest/main.go +++ b/cmd/explaintest/main.go @@ -250,10 +250,12 @@ func (t *tester) parserErrorHandle(query query, err error) error { gotBuf := t.buf.Bytes()[offset:] buf := make([]byte, t.buf.Len()-offset) if _, err = t.resultFD.ReadAt(buf, int64(offset)); err != nil { + //nolint: all_revive,revive return errors.Trace(errors.Errorf("run \"%v\" at line %d err, we got \n%s\nbut read result err %s", query.Query, query.Line, gotBuf, err)) } if !bytes.Equal(gotBuf, buf) { + //nolint: all_revive,revive return errors.Trace(errors.Errorf("run \"%v\" at line %d err, we need(%v):\n%s\nbut got(%v):\n%s\n", query.Query, query.Line, len(buf), buf, len(gotBuf), gotBuf)) } } @@ -391,6 +393,7 @@ func (t *tester) execute(query query) error { return errors.Trace(errors.Errorf("run \"%v\" at line %d err, we got \n%s\nbut read result err %s", qText, query.Line, gotBuf, err)) } if !skipCheckErrMsg && !bytes.Equal(gotBuf, buf) { + //nolint: all_revive,revive return errors.Trace(errors.Errorf("run \"%v\" at line %d err, we need:\n%s\nbut got:\n%s\n", qText, query.Line, buf, gotBuf)) } t.outputLen = t.buf.Len() @@ -606,9 +609,11 @@ func (t *tester) checkLastResult() error { } buf := make([]byte, int(size)-t.outputLen+len(t.lastResult)) if _, err = t.resultFD.ReadAt(buf, int64(t.outputLen-len(t.lastResult))); !(err == nil || err == io.EOF) { + //nolint: all_revive,revive return errors.Trace(errors.Errorf("run \"%v\" at line %d err, we got \n%s\nbut read result err %s", t.lastText, t.lastQuery.Line, t.lastResult, err)) } if !bytes.Equal(t.lastResult, buf) { + //nolint: all_revive,revive return errors.Trace(errors.Errorf("run \"%v\" at line %d err, we need:\n%s\nbut got:\n%s\n", t.lastText, t.lastQuery.Line, buf, t.lastResult)) } return nil diff --git a/config/config_test.go b/config/config_test.go index 27d2ebbec2111..1471ff719b4e6 100644 --- a/config/config_test.go +++ b/config/config_test.go @@ -164,7 +164,6 @@ disable-timestamp = true enable-error-stack = false disable-error-stack = false `, nbFalse, nbUnset, nbUnset, nbUnset, false, true) - } func TestRemovedVariableCheck(t *testing.T) { diff --git a/ddl/backfilling.go b/ddl/backfilling.go index 432de43f77aac..83102dc50c512 100644 --- a/ddl/backfilling.go +++ b/ddl/backfilling.go @@ -648,7 +648,7 @@ func (w *worker) writePhysicalTableRecord(t table.PhysicalTable, bfWorkerType ba switch bfWorkerType { case typeAddIndexWorker: - idxWorker := newAddIndexWorker(sessCtx, w, i, t, indexInfo, decodeColMap, reorgInfo, jc) + idxWorker := newAddIndexWorker(sessCtx, i, t, indexInfo, decodeColMap, reorgInfo, jc) idxWorker.priority = job.Priority backfillWorkers = append(backfillWorkers, idxWorker.backfillWorker) go idxWorker.backfillWorker.run(reorgInfo.d, idxWorker, job) @@ -660,7 +660,7 @@ func (w *worker) writePhysicalTableRecord(t table.PhysicalTable, bfWorkerType ba backfillWorkers = append(backfillWorkers, updateWorker.backfillWorker) go updateWorker.backfillWorker.run(reorgInfo.d, updateWorker, job) case typeCleanUpIndexWorker: - idxWorker := newCleanUpIndexWorker(sessCtx, w, i, t, decodeColMap, reorgInfo, jc) + idxWorker := newCleanUpIndexWorker(sessCtx, i, t, decodeColMap, reorgInfo, jc) idxWorker.priority = job.Priority backfillWorkers = append(backfillWorkers, idxWorker.backfillWorker) go idxWorker.backfillWorker.run(reorgInfo.d, idxWorker, job) diff --git a/ddl/db_partition_test.go b/ddl/db_partition_test.go index ef86a903aa78a..158f2f2aa4a15 100644 --- a/ddl/db_partition_test.go +++ b/ddl/db_partition_test.go @@ -31,7 +31,6 @@ import ( "github.com/pingcap/tidb/ddl/testutil" "github.com/pingcap/tidb/domain" "github.com/pingcap/tidb/errno" - tmysql "github.com/pingcap/tidb/errno" "github.com/pingcap/tidb/kv" "github.com/pingcap/tidb/parser/ast" "github.com/pingcap/tidb/parser/model" @@ -128,7 +127,7 @@ func TestCreateTableWithPartition(t *testing.T) { partition p2 values less than (1996), partition p2 values less than (2001) );` - tk.MustGetErrCode(sql1, tmysql.ErrSameNamePartition) + tk.MustGetErrCode(sql1, errno.ErrSameNamePartition) sql2 := `create table employees ( id int not null, @@ -139,7 +138,7 @@ func TestCreateTableWithPartition(t *testing.T) { partition p2 values less than (1996), partition p3 values less than (2001) );` - tk.MustGetErrCode(sql2, tmysql.ErrRangeNotIncreasing) + tk.MustGetErrCode(sql2, errno.ErrRangeNotIncreasing) sql3 := `create table employees ( id int not null, @@ -150,7 +149,7 @@ func TestCreateTableWithPartition(t *testing.T) { partition p2 values less than maxvalue, partition p3 values less than (2001) );` - tk.MustGetErrCode(sql3, tmysql.ErrPartitionMaxvalue) + tk.MustGetErrCode(sql3, errno.ErrPartitionMaxvalue) sql4 := `create table t4 ( a int not null, @@ -161,7 +160,7 @@ func TestCreateTableWithPartition(t *testing.T) { partition p2 values less than (1991), partition p3 values less than (1995) );` - tk.MustGetErrCode(sql4, tmysql.ErrPartitionMaxvalue) + tk.MustGetErrCode(sql4, errno.ErrPartitionMaxvalue) tk.MustExec(`CREATE TABLE rc ( a INT NOT NULL, @@ -182,7 +181,7 @@ func TestCreateTableWithPartition(t *testing.T) { partition by range( hired ) ( partition p0 values less than (6 , 10) );` - tk.MustGetErrCode(sql6, tmysql.ErrTooManyValues) + tk.MustGetErrCode(sql6, errno.ErrTooManyValues) sql7 := `create table t7 ( a int not null, @@ -195,7 +194,7 @@ func TestCreateTableWithPartition(t *testing.T) { partition p4 values less than (1995), partition p5 values less than maxvalue );` - tk.MustGetErrCode(sql7, tmysql.ErrPartitionMaxvalue) + tk.MustGetErrCode(sql7, errno.ErrPartitionMaxvalue) sql18 := `create table t8 ( a int not null, @@ -214,7 +213,7 @@ func TestCreateTableWithPartition(t *testing.T) { partition p0 values less than (2), partition p1 values less than (6) );` - tk.MustGetErrCode(sql9, tmysql.ErrPartitionFunctionIsNotAllowed) + tk.MustGetErrCode(sql9, errno.ErrPartitionFunctionIsNotAllowed) tk.MustGetDBError(`CREATE TABLE t9 ( a INT NOT NULL, @@ -227,7 +226,7 @@ func TestCreateTableWithPartition(t *testing.T) { partition p3 values less than (20) );`, dbterror.ErrRangeNotIncreasing) - tk.MustGetErrCode(`create TABLE t10 (c1 int,c2 int) partition by range(c1 / c2 ) (partition p0 values less than (2));`, tmysql.ErrPartitionFunctionIsNotAllowed) + tk.MustGetErrCode(`create TABLE t10 (c1 int,c2 int) partition by range(c1 / c2 ) (partition p0 values less than (2));`, errno.ErrPartitionFunctionIsNotAllowed) tk.MustExec(`create TABLE t11 (c1 int,c2 int) partition by range(c1 div c2 ) (partition p0 values less than (2));`) tk.MustExec(`create TABLE t12 (c1 int,c2 int) partition by range(c1 + c2 ) (partition p0 values less than (2));`) @@ -236,17 +235,17 @@ func TestCreateTableWithPartition(t *testing.T) { tk.MustExec(`create TABLE t15 (c1 int,c2 int) partition by range( abs(c1) ) (partition p0 values less than (2));`) tk.MustExec(`create TABLE t16 (c1 int) partition by range( c1) (partition p0 values less than (10));`) - tk.MustGetErrCode(`create TABLE t17 (c1 int,c2 float) partition by range(c1 + c2 ) (partition p0 values less than (2));`, tmysql.ErrPartitionFuncNotAllowed) - tk.MustGetErrCode(`create TABLE t18 (c1 int,c2 float) partition by range( floor(c2) ) (partition p0 values less than (2));`, tmysql.ErrPartitionFuncNotAllowed) + tk.MustGetErrCode(`create TABLE t17 (c1 int,c2 float) partition by range(c1 + c2 ) (partition p0 values less than (2));`, errno.ErrPartitionFuncNotAllowed) + tk.MustGetErrCode(`create TABLE t18 (c1 int,c2 float) partition by range( floor(c2) ) (partition p0 values less than (2));`, errno.ErrPartitionFuncNotAllowed) tk.MustExec(`create TABLE t19 (c1 int,c2 float) partition by range( floor(c1) ) (partition p0 values less than (2));`) tk.MustExec(`create TABLE t20 (c1 int,c2 bit(10)) partition by range(c2) (partition p0 values less than (10));`) tk.MustExec(`create TABLE t21 (c1 int,c2 year) partition by range( c2 ) (partition p0 values less than (2000));`) - tk.MustGetErrCode(`create TABLE t24 (c1 float) partition by range( c1 ) (partition p0 values less than (2000));`, tmysql.ErrFieldTypeNotAllowedAsPartitionField) + tk.MustGetErrCode(`create TABLE t24 (c1 float) partition by range( c1 ) (partition p0 values less than (2000));`, errno.ErrFieldTypeNotAllowedAsPartitionField) // test check order. The sql below have 2 problem: 1. ErrFieldTypeNotAllowedAsPartitionField 2. ErrPartitionMaxvalue , mysql will return ErrPartitionMaxvalue. - tk.MustGetErrCode(`create TABLE t25 (c1 float) partition by range( c1 ) (partition p1 values less than maxvalue,partition p0 values less than (2000));`, tmysql.ErrPartitionMaxvalue) + tk.MustGetErrCode(`create TABLE t25 (c1 float) partition by range( c1 ) (partition p1 values less than maxvalue,partition p0 values less than (2000));`, errno.ErrPartitionMaxvalue) // Fix issue 7362. tk.MustExec("create table test_partition(id bigint, name varchar(255), primary key(id)) ENGINE=InnoDB DEFAULT CHARSET=utf8 PARTITION BY RANGE COLUMNS(id) (PARTITION p1 VALUES LESS THAN (10) ENGINE = InnoDB);") @@ -286,14 +285,14 @@ func TestCreateTableWithPartition(t *testing.T) { (partition p0 values less than (10, 10.0))`) tk.MustQuery("show warnings").Check(testkit.Rows("Warning 8200 Unsupported partition type RANGE, treat as normal table")) - tk.MustGetErrCode(`create table t31 (a int not null) partition by range( a );`, tmysql.ErrPartitionsMustBeDefined) - tk.MustGetErrCode(`create table t32 (a int not null) partition by range columns( a );`, tmysql.ErrPartitionsMustBeDefined) - tk.MustGetErrCode(`create table t33 (a int, b int) partition by hash(a) partitions 0;`, tmysql.ErrNoParts) - tk.MustGetErrCode(`create table t33 (a timestamp, b int) partition by hash(a) partitions 30;`, tmysql.ErrFieldTypeNotAllowedAsPartitionField) - tk.MustGetErrCode(`CREATE TABLE t34 (c0 INT) PARTITION BY HASH((CASE WHEN 0 THEN 0 ELSE c0 END )) PARTITIONS 1;`, tmysql.ErrPartitionFunctionIsNotAllowed) - tk.MustGetErrCode(`CREATE TABLE t0(c0 INT) PARTITION BY HASH((c0 0 && !ifExists { return dropExistErr.GenWithStackByArgs(strings.Join(notExistTables, ",")) @@ -6900,9 +6897,9 @@ func handleDatabasePlacement(ctx sessionctx.Context, dbInfo *model.DBInfo) error sessVars := ctx.GetSessionVars() if sessVars.PlacementMode == variable.PlacementModeIgnore { dbInfo.PlacementPolicyRef = nil - sessVars.StmtCtx.AppendNote(errors.New( - fmt.Sprintf("Placement is ignored when TIDB_PLACEMENT_MODE is '%s'", variable.PlacementModeIgnore), - )) + sessVars.StmtCtx.AppendNote( + fmt.Errorf("Placement is ignored when TIDB_PLACEMENT_MODE is '%s'", variable.PlacementModeIgnore), + ) return nil } @@ -6914,9 +6911,9 @@ func handleDatabasePlacement(ctx sessionctx.Context, dbInfo *model.DBInfo) error func handleTablePlacement(ctx sessionctx.Context, tbInfo *model.TableInfo) error { sessVars := ctx.GetSessionVars() if sessVars.PlacementMode == variable.PlacementModeIgnore && removeTablePlacement(tbInfo) { - sessVars.StmtCtx.AppendNote(errors.New( - fmt.Sprintf("Placement is ignored when TIDB_PLACEMENT_MODE is '%s'", variable.PlacementModeIgnore), - )) + sessVars.StmtCtx.AppendNote( + fmt.Errorf("Placement is ignored when TIDB_PLACEMENT_MODE is '%s'", variable.PlacementModeIgnore), + ) return nil } @@ -6941,9 +6938,9 @@ func handleTablePlacement(ctx sessionctx.Context, tbInfo *model.TableInfo) error func handlePartitionPlacement(ctx sessionctx.Context, partInfo *model.PartitionInfo) error { sessVars := ctx.GetSessionVars() if sessVars.PlacementMode == variable.PlacementModeIgnore && removePartitionPlacement(partInfo) { - sessVars.StmtCtx.AppendNote(errors.New( - fmt.Sprintf("Placement is ignored when TIDB_PLACEMENT_MODE is '%s'", variable.PlacementModeIgnore), - )) + sessVars.StmtCtx.AppendNote( + fmt.Errorf("Placement is ignored when TIDB_PLACEMENT_MODE is '%s'", variable.PlacementModeIgnore), + ) return nil } @@ -6961,9 +6958,9 @@ func handlePartitionPlacement(ctx sessionctx.Context, partInfo *model.PartitionI func checkIgnorePlacementDDL(ctx sessionctx.Context) bool { sessVars := ctx.GetSessionVars() if sessVars.PlacementMode == variable.PlacementModeIgnore { - sessVars.StmtCtx.AppendNote(errors.New( - fmt.Sprintf("Placement is ignored when TIDB_PLACEMENT_MODE is '%s'", variable.PlacementModeIgnore), - )) + sessVars.StmtCtx.AppendNote( + fmt.Errorf("Placement is ignored when TIDB_PLACEMENT_MODE is '%s'", variable.PlacementModeIgnore), + ) return true } return false diff --git a/ddl/ddl_error_test.go b/ddl/ddl_error_test.go index b869ae42d1aac..f22b67a97e0e5 100644 --- a/ddl/ddl_error_test.go +++ b/ddl/ddl_error_test.go @@ -214,5 +214,4 @@ func TestRenameViewOverDifferentSchemaError(t *testing.T) { //same schema tk.MustExec("rename table test.view_1 to test.view_1000;") - } diff --git a/ddl/ddl_tiflash_api.go b/ddl/ddl_tiflash_api.go index 4c3f9a70a3cec..69677ce34ca27 100644 --- a/ddl/ddl_tiflash_api.go +++ b/ddl/ddl_tiflash_api.go @@ -130,8 +130,8 @@ func (b *PollTiFlashBackoffContext) Tick(ID int64) (bool, bool, int) { return false, false, 0 } grew := e.MaybeGrow(b) - e.Counter += 1 - e.TotalCounter += 1 + e.Counter++ + e.TotalCounter++ return grew, true, e.TotalCounter } @@ -264,8 +264,8 @@ func getTiflashHTTPAddr(host string, statusAddr string) (string, error) { return addr, nil } -// GetTiFlashReplicaInfo parses model.TableInfo into []TiFlashReplicaStatus. -func GetTiFlashReplicaInfo(tblInfo *model.TableInfo, tableList *[]TiFlashReplicaStatus) { +// LoadTiFlashReplicaInfo parses model.TableInfo into []TiFlashReplicaStatus. +func LoadTiFlashReplicaInfo(tblInfo *model.TableInfo, tableList *[]TiFlashReplicaStatus) { if tblInfo.TiFlashReplica == nil { // reject tables that has no tiflash replica such like `INFORMATION_SCHEMA` return @@ -349,7 +349,7 @@ func updateTiFlashStores(pollTiFlashContext *TiFlashManagementContext) error { func (d *ddl) pollTiFlashReplicaStatus(ctx sessionctx.Context, pollTiFlashContext *TiFlashManagementContext) (bool, error) { allReplicaReady := true defer func() { - pollTiFlashContext.HandlePdCounter += 1 + pollTiFlashContext.HandlePdCounter++ pollTiFlashContext.HandlePdCounter %= PullTiFlashPdTick.Load() }() @@ -384,7 +384,7 @@ func (d *ddl) pollTiFlashReplicaStatus(ctx sessionctx.Context, pollTiFlashContex tbls := schema.SchemaTables(db.Name) for _, tbl := range tbls { tblInfo := tbl.Meta() - GetTiFlashReplicaInfo(tblInfo, &tableList) + LoadTiFlashReplicaInfo(tblInfo, &tableList) } } @@ -484,7 +484,7 @@ func getDropOrTruncateTableTiflash(ctx sessionctx.Context, currentSchema infosch return false, nil } uniqueIDMap[tblInfo.ID] = struct{}{} - GetTiFlashReplicaInfo(tblInfo, replicaInfos) + LoadTiFlashReplicaInfo(tblInfo, replicaInfos) return false, nil } fn := func(jobs []*model.Job) (bool, error) { diff --git a/ddl/ddl_tiflash_test.go b/ddl/ddl_tiflash_test.go index 2db35d19cb744..9c4bc300f3bf0 100644 --- a/ddl/ddl_tiflash_test.go +++ b/ddl/ddl_tiflash_test.go @@ -908,7 +908,7 @@ func TestTiFlashBatchRateLimiter(t *testing.T) { tb, err := s.dom.InfoSchema().TableByName(model.NewCIStr("tiflash_ddl_limit"), model.NewCIStr(fmt.Sprintf("t%v", i))) require.NoError(t, err) if tb.Meta().TiFlashReplica != nil { - cnt += 1 + cnt++ } } require.Equal(t, expected, cnt) diff --git a/ddl/index.go b/ddl/index.go index 50b1d98f032b3..4ff08c90ab3bd 100644 --- a/ddl/index.go +++ b/ddl/index.go @@ -426,7 +426,7 @@ func getNullColInfos(tblInfo *model.TableInfo, indexInfo *model.IndexInfo) ([]*m return nullCols, nil } -func checkPrimaryKeyNotNull(d *ddlCtx, w *worker, sqlMode mysql.SQLMode, t *meta.Meta, job *model.Job, +func checkPrimaryKeyNotNull(d *ddlCtx, w *worker, t *meta.Meta, job *model.Job, tblInfo *model.TableInfo, indexInfo *model.IndexInfo) (warnings []string, err error) { if !indexInfo.Primary { return nil, nil @@ -607,7 +607,7 @@ func (w *worker) onCreateIndex(d *ddlCtx, t *meta.Meta, job *model.Job, isPK boo case model.StateDeleteOnly: // delete only -> write only indexInfo.State = model.StateWriteOnly - _, err = checkPrimaryKeyNotNull(d, w, sqlMode, t, job, tblInfo, indexInfo) + _, err = checkPrimaryKeyNotNull(d, w, t, job, tblInfo, indexInfo) if err != nil { break } @@ -619,7 +619,7 @@ func (w *worker) onCreateIndex(d *ddlCtx, t *meta.Meta, job *model.Job, isPK boo case model.StateWriteOnly: // write only -> reorganization indexInfo.State = model.StateWriteReorganization - _, err = checkPrimaryKeyNotNull(d, w, sqlMode, t, job, tblInfo, indexInfo) + _, err = checkPrimaryKeyNotNull(d, w, t, job, tblInfo, indexInfo) if err != nil { break } @@ -782,9 +782,9 @@ func onDropIndex(d *ddlCtx, t *meta.Meta, job *model.Job) (ver int64, _ error) { if job.IsRollingback() { job.FinishTableJob(model.JobStateRollbackDone, model.StateNone, ver, tblInfo) job.Args[0] = indexInfo.ID + } else { // the partition ids were append by convertAddIdxJob2RollbackJob, it is weird, but for the compatibility, // we should keep appending the partitions in the convertAddIdxJob2RollbackJob. - } else { job.FinishTableJob(model.JobStateDone, model.StateNone, ver, tblInfo) job.Args = append(job.Args, indexInfo.ID, getPartitionIDs(tblInfo)) } @@ -1008,7 +1008,7 @@ type addIndexWorker struct { distinctCheckFlags []bool } -func newAddIndexWorker(sessCtx sessionctx.Context, worker *worker, id int, t table.PhysicalTable, indexInfo *model.IndexInfo, decodeColMap map[int64]decoder.Column, reorgInfo *reorgInfo, jc *JobContext) *addIndexWorker { +func newAddIndexWorker(sessCtx sessionctx.Context, id int, t table.PhysicalTable, indexInfo *model.IndexInfo, decodeColMap map[int64]decoder.Column, reorgInfo *reorgInfo, jc *JobContext) *addIndexWorker { index := tables.NewIndex(t.GetPhysicalID(), t.Meta(), indexInfo) rowDecoder := decoder.NewRowDecoder(t, t.WritableCols(), decodeColMap) return &addIndexWorker{ @@ -1456,7 +1456,7 @@ type cleanUpIndexWorker struct { baseIndexWorker } -func newCleanUpIndexWorker(sessCtx sessionctx.Context, worker *worker, id int, t table.PhysicalTable, decodeColMap map[int64]decoder.Column, reorgInfo *reorgInfo, jc *JobContext) *cleanUpIndexWorker { +func newCleanUpIndexWorker(sessCtx sessionctx.Context, id int, t table.PhysicalTable, decodeColMap map[int64]decoder.Column, reorgInfo *reorgInfo, jc *JobContext) *cleanUpIndexWorker { indexes := make([]table.Index, 0, len(t.Indices())) rowDecoder := decoder.NewRowDecoder(t, t.WritableCols(), decodeColMap) for _, index := range t.Indices() { diff --git a/ddl/index_modify_test.go b/ddl/index_modify_test.go index ee2df27913f28..7b64658b88ffe 100644 --- a/ddl/index_modify_test.go +++ b/ddl/index_modify_test.go @@ -782,7 +782,7 @@ func checkGlobalIndexRow( tblColMap := make(map[int64]*types.FieldType, len(tblInfo.Columns)) for _, col := range tblInfo.Columns { - tblColMap[col.ID] = &col.FieldType + tblColMap[col.ID] = &(col.FieldType) } // Check local index entry does not exist. diff --git a/ddl/job_table.go b/ddl/job_table.go index 23cedfb8b1b56..a003e1b8843ea 100644 --- a/ddl/job_table.go +++ b/ddl/job_table.go @@ -185,12 +185,12 @@ func (d *ddl) startDispatchLoop() { case <-d.ctx.Done(): return } - d.getDDLJobAndRun(sess, d.generalDDLWorkerPool, d.getGeneralJob) - d.getDDLJobAndRun(sess, d.reorgWorkerPool, d.getReorgJob) + d.loadDDLJobAndRun(sess, d.generalDDLWorkerPool, d.getGeneralJob) + d.loadDDLJobAndRun(sess, d.reorgWorkerPool, d.getReorgJob) } } -func (d *ddl) getDDLJobAndRun(sess *session, pool *workerPool, getJob func(*session) (*model.Job, error)) { +func (d *ddl) loadDDLJobAndRun(sess *session, pool *workerPool, getJob func(*session) (*model.Job, error)) { wk, err := pool.get() if err != nil || wk == nil { logutil.BgLogger().Debug(fmt.Sprintf("[ddl] no %v worker available now", pool.tp()), zap.Error(err)) diff --git a/ddl/modify_column_test.go b/ddl/modify_column_test.go index 783f11d7c71d0..bbfbe4da8d2b9 100644 --- a/ddl/modify_column_test.go +++ b/ddl/modify_column_test.go @@ -420,7 +420,6 @@ func TestModifyColumnCharset(t *testing.T) { " `a` varchar(8) DEFAULT NULL,\n" + " `b` varchar(8) CHARACTER SET utf8 COLLATE utf8_bin DEFAULT NULL\n" + ") ENGINE=InnoDB DEFAULT CHARSET=utf8mb4 COLLATE=utf8mb4_bin")) - } func TestModifyColumnTime_TimeToYear(t *testing.T) { diff --git a/ddl/placement_policy_test.go b/ddl/placement_policy_test.go index 0a1663abd85cd..f6d74b5a6cfb3 100644 --- a/ddl/placement_policy_test.go +++ b/ddl/placement_policy_test.go @@ -201,6 +201,7 @@ func TestPlacementPolicy(t *testing.T) { tk.MustExec("drop placement policy x") tk.MustGetErrCode("drop placement policy x", mysql.ErrPlacementPolicyNotExists) tk.MustExec("drop placement policy if exists x") + //nolint:revive,all_revive tk.MustQuery("show warnings").Check(testkit.Rows("Note 8239 Unknown placement policy 'x'")) // TODO: privilege check & constraint syntax check. diff --git a/ddl/placement_sql_test.go b/ddl/placement_sql_test.go index 4d560831d4044..f8ee695d80f45 100644 --- a/ddl/placement_sql_test.go +++ b/ddl/placement_sql_test.go @@ -580,7 +580,6 @@ func TestPlacementMode(t *testing.T) { err = dom.DDL().CreateSchemaWithInfo(tk.Session(), db1, ddl.OnExistError) require.NoError(t, err) tk.MustQuery("show create database db2").Check(testkit.Rows("db2 CREATE DATABASE `db2` /*!40100 DEFAULT CHARACTER SET utf8mb4 */")) - } func TestPlacementTiflashCheck(t *testing.T) { diff --git a/ddl/schematracker/dm_tracker.go b/ddl/schematracker/dm_tracker.go index 3958779df7cd9..3438f5380d4a1 100644 --- a/ddl/schematracker/dm_tracker.go +++ b/ddl/schematracker/dm_tracker.go @@ -1004,7 +1004,7 @@ func (d SchemaTracker) AlterTable(ctx context.Context, sctx sessionctx.Context, } // TruncateTable implements the DDL interface, it's no-op in DM's case. -func (d SchemaTracker) TruncateTable(ctx sessionctx.Context, tableIdent ast.Ident) error { +func (SchemaTracker) TruncateTable(ctx sessionctx.Context, tableIdent ast.Ident) error { return nil } @@ -1054,32 +1054,28 @@ func (d SchemaTracker) renameTable(ctx sessionctx.Context, oldIdents, newIdents } // LockTables implements the DDL interface, it's no-op in DM's case. -func (d SchemaTracker) LockTables(ctx sessionctx.Context, stmt *ast.LockTablesStmt) error { +func (SchemaTracker) LockTables(ctx sessionctx.Context, stmt *ast.LockTablesStmt) error { return nil } // UnlockTables implements the DDL interface, it's no-op in DM's case. -func (d SchemaTracker) UnlockTables(ctx sessionctx.Context, lockedTables []model.TableLockTpInfo) error { +func (SchemaTracker) UnlockTables(ctx sessionctx.Context, lockedTables []model.TableLockTpInfo) error { return nil - } // CleanupTableLock implements the DDL interface, it's no-op in DM's case. -func (d SchemaTracker) CleanupTableLock(ctx sessionctx.Context, tables []*ast.TableName) error { +func (SchemaTracker) CleanupTableLock(ctx sessionctx.Context, tables []*ast.TableName) error { return nil - } // UpdateTableReplicaInfo implements the DDL interface, it's no-op in DM's case. func (d SchemaTracker) UpdateTableReplicaInfo(ctx sessionctx.Context, physicalID int64, available bool) error { return nil - } // RepairTable implements the DDL interface, it's no-op in DM's case. func (d SchemaTracker) RepairTable(ctx sessionctx.Context, table *ast.TableName, createStmt *ast.CreateTableStmt) error { return nil - } // CreateSequence implements the DDL interface, it's no-op in DM's case. @@ -1090,30 +1086,26 @@ func (d SchemaTracker) CreateSequence(ctx sessionctx.Context, stmt *ast.CreateSe // DropSequence implements the DDL interface, it's no-op in DM's case. func (d SchemaTracker) DropSequence(ctx sessionctx.Context, stmt *ast.DropSequenceStmt) (err error) { return nil - } // AlterSequence implements the DDL interface, it's no-op in DM's case. -func (d SchemaTracker) AlterSequence(ctx sessionctx.Context, stmt *ast.AlterSequenceStmt) error { +func (SchemaTracker) AlterSequence(_ sessionctx.Context, _ *ast.AlterSequenceStmt) error { return nil } // CreatePlacementPolicy implements the DDL interface, it's no-op in DM's case. -func (d SchemaTracker) CreatePlacementPolicy(ctx sessionctx.Context, stmt *ast.CreatePlacementPolicyStmt) error { +func (SchemaTracker) CreatePlacementPolicy(_ sessionctx.Context, _ *ast.CreatePlacementPolicyStmt) error { return nil - } // DropPlacementPolicy implements the DDL interface, it's no-op in DM's case. -func (d SchemaTracker) DropPlacementPolicy(ctx sessionctx.Context, stmt *ast.DropPlacementPolicyStmt) error { +func (SchemaTracker) DropPlacementPolicy(_ sessionctx.Context, _ *ast.DropPlacementPolicyStmt) error { return nil - } // AlterPlacementPolicy implements the DDL interface, it's no-op in DM's case. -func (d SchemaTracker) AlterPlacementPolicy(ctx sessionctx.Context, stmt *ast.AlterPlacementPolicyStmt) error { +func (SchemaTracker) AlterPlacementPolicy(ctx sessionctx.Context, stmt *ast.AlterPlacementPolicyStmt) error { return nil - } // BatchCreateTableWithInfo implements the DDL interface, it will call CreateTableWithInfo for each table. @@ -1134,7 +1126,6 @@ func (d SchemaTracker) CreatePlacementPolicyWithInfo(ctx sessionctx.Context, pol // Start implements the DDL interface, it's no-op in DM's case. func (d SchemaTracker) Start(ctxPool *pools.ResourcePool) error { return nil - } // GetLease implements the DDL interface, it's no-op in DM's case. @@ -1158,30 +1149,30 @@ func (d SchemaTracker) Stop() error { } // RegisterStatsHandle implements the DDL interface, it's no-op in DM's case. -func (d SchemaTracker) RegisterStatsHandle(handle *handle.Handle) {} +func (SchemaTracker) RegisterStatsHandle(handle *handle.Handle) {} // SchemaSyncer implements the DDL interface, it's no-op in DM's case. -func (d SchemaTracker) SchemaSyncer() util.SchemaSyncer { +func (SchemaTracker) SchemaSyncer() util.SchemaSyncer { return nil } // OwnerManager implements the DDL interface, it's no-op in DM's case. -func (d SchemaTracker) OwnerManager() owner.Manager { +func (SchemaTracker) OwnerManager() owner.Manager { return nil } // GetID implements the DDL interface, it's no-op in DM's case. -func (d SchemaTracker) GetID() string { +func (SchemaTracker) GetID() string { return "schema-tracker" } // GetTableMaxHandle implements the DDL interface, it's no-op in DM's case. -func (d SchemaTracker) GetTableMaxHandle(ctx *ddl.JobContext, startTS uint64, tbl table.PhysicalTable) (kv.Handle, bool, error) { +func (SchemaTracker) GetTableMaxHandle(ctx *ddl.JobContext, startTS uint64, tbl table.PhysicalTable) (kv.Handle, bool, error) { return nil, false, nil } // SetBinlogClient implements the DDL interface, it's no-op in DM's case. -func (d SchemaTracker) SetBinlogClient(client *pumpcli.PumpsClient) {} +func (SchemaTracker) SetBinlogClient(client *pumpcli.PumpsClient) {} // GetHook implements the DDL interface, it's no-op in DM's case. func (d SchemaTracker) GetHook() ddl.Callback { @@ -1189,24 +1180,24 @@ func (d SchemaTracker) GetHook() ddl.Callback { } // SetHook implements the DDL interface, it's no-op in DM's case. -func (d SchemaTracker) SetHook(h ddl.Callback) {} +func (SchemaTracker) SetHook(h ddl.Callback) {} // GetInfoSchemaWithInterceptor implements the DDL interface. -func (d SchemaTracker) GetInfoSchemaWithInterceptor(ctx sessionctx.Context) infoschema.InfoSchema { +func (SchemaTracker) GetInfoSchemaWithInterceptor(ctx sessionctx.Context) infoschema.InfoSchema { panic("not implemented") } // DoDDLJob implements the DDL interface, it's no-op in DM's case. -func (d SchemaTracker) DoDDLJob(ctx sessionctx.Context, job *model.Job) error { +func (SchemaTracker) DoDDLJob(ctx sessionctx.Context, job *model.Job) error { return nil } // MoveJobFromQueue2Table implements the DDL interface, it's no-op in DM's case. -func (d SchemaTracker) MoveJobFromQueue2Table(b bool) error { +func (SchemaTracker) MoveJobFromQueue2Table(b bool) error { panic("implement me") } // MoveJobFromTable2Queue implements the DDL interface, it's no-op in DM's case. -func (d SchemaTracker) MoveJobFromTable2Queue() error { +func (SchemaTracker) MoveJobFromTable2Queue() error { panic("implement me") } diff --git a/ddl/sequence_test.go b/ddl/sequence_test.go index 743544cc25704..56b0a0529043f 100644 --- a/ddl/sequence_test.go +++ b/ddl/sequence_test.go @@ -862,6 +862,7 @@ func TestUnflodSequence(t *testing.T) { tk.MustQuery("select nextval(seq), b from (select nextval(seq) as b, a from t1) t2").Check(testkit.Rows("227 228", "229 230", "231 232")) tk.MustExec("insert into t2 select nextval(seq), b from (select nextval(seq) as b, a from t1) t2") tk.MustQuery("select * from t2").Check(testkit.Rows("233 234", "235 236", "237 238")) + //nolint:all_revive,revive tk.MustExec("delete from t2") // For union operator like select1 union select2, select1 and select2 will be executed parallelly, diff --git a/ddl/serial_test.go b/ddl/serial_test.go index 7d5c2c55a2d89..048cc001fca39 100644 --- a/ddl/serial_test.go +++ b/ddl/serial_test.go @@ -1132,6 +1132,7 @@ func TestForbidUnsupportedCollations(t *testing.T) { tk.MustExec("create table t1(a varchar(20))") mustGetUnsupportedCollation("alter table t1 modify a varchar(20) collate utf8mb4_roman_ci", "utf8mb4_roman_ci") mustGetUnsupportedCollation("alter table t1 modify a varchar(20) charset utf8 collate utf8_roman_ci", "utf8_roman_ci") + //nolint:revive,all_revive mustGetUnsupportedCollation("alter table t1 modify a varchar(20) charset utf8 collate utf8_roman_ci", "utf8_roman_ci") // TODO(bb7133): fix the following cases by setting charset from collate firstly. diff --git a/ddl/tiflash_replica_test.go b/ddl/tiflash_replica_test.go index 5431f917b7855..ca87913293cc3 100644 --- a/ddl/tiflash_replica_test.go +++ b/ddl/tiflash_replica_test.go @@ -190,7 +190,6 @@ func TestSetTableFlashReplicaForSystemTable(t *testing.T) { } else { require.Equal(t, fmt.Sprintf("[planner:1142]ALTER command denied to user 'root'@'%%' for table '%s'", strings.ToLower(one)), err.Error()) } - } sysTables = sysTables[:0] } @@ -444,5 +443,4 @@ func TestTruncateTable2(t *testing.T) { require.Equal(t, t1.Meta().TiFlashReplica.LocationLabels, t2.Meta().TiFlashReplica.LocationLabels) require.False(t, t2.Meta().TiFlashReplica.Available) require.Equal(t, []int64{partition.Definitions[1].ID}, t2.Meta().TiFlashReplica.AvailablePartitionIDs) - } diff --git a/domain/schema_checker.go b/domain/schema_checker.go index 5251cc4541f5d..f3737d85bdb80 100644 --- a/domain/schema_checker.go +++ b/domain/schema_checker.go @@ -72,7 +72,6 @@ func (s *SchemaChecker) CheckBySchemaVer(txnTS uint64, startSchemaVer tikv.Schem case ResultUnknown: time.Sleep(schemaOutOfDateRetryInterval) } - } metrics.SchemaLeaseErrorCounter.WithLabelValues("outdated").Inc() return nil, ErrInfoSchemaExpired diff --git a/dumpling/export/config_test.go b/dumpling/export/config_test.go index b799d25487fe6..966a933881497 100644 --- a/dumpling/export/config_test.go +++ b/dumpling/export/config_test.go @@ -50,5 +50,4 @@ func TestGetConfTables(t *testing.T) { actualDBTables, err := GetConfTables(tablesList) require.NoError(t, err) require.Equal(t, expectedDBTables, actualDBTables) - } diff --git a/dumpling/export/prepare_test.go b/dumpling/export/prepare_test.go index f9f559448d078..ebfb2ee7efe48 100644 --- a/dumpling/export/prepare_test.go +++ b/dumpling/export/prepare_test.go @@ -321,7 +321,7 @@ func TestValidateResolveAutoConsistency(t *testing.T) { for _, testCase := range testCases { conf.Consistency = testCase.confConsistency conf.Snapshot = testCase.confSnapshot - if testCase.err == true { + if testCase.err { require.NoError(t, validateResolveAutoConsistency(d)) } else { require.EqualError(t, validateResolveAutoConsistency(d), fmt.Sprintf("can't specify --snapshot when --consistency isn't snapshot, resolved consistency: %s", conf.Consistency)) diff --git a/dumpling/export/sql_test.go b/dumpling/export/sql_test.go index d79f74993c10b..c8950c8a1bc00 100644 --- a/dumpling/export/sql_test.go +++ b/dumpling/export/sql_test.go @@ -425,7 +425,6 @@ func TestShowCreatePolicy(t *testing.T) { require.NoError(t, err) require.Equal(t, "CREATE PLACEMENT POLICY `policy_x` LEARNERS=1", createPolicySQL) require.NoError(t, mock.ExpectationsWereMet()) - } func TestListPolicyNames(t *testing.T) { diff --git a/executor/admin_test.go b/executor/admin_test.go index ffb31b5c46e32..e9a9705564d00 100644 --- a/executor/admin_test.go +++ b/executor/admin_test.go @@ -1033,7 +1033,6 @@ func (l *logEntry) checkField(t *testing.T, requireFields ...zapcore.Field) { } require.NotNilf(t, f, "matched log fields %s:%s not found in log", rf.Key, rf) } - } func (l *logEntry) checkFieldNotEmpty(t *testing.T, fieldName string) { diff --git a/executor/aggfuncs/func_lead_lag_test.go b/executor/aggfuncs/func_lead_lag_test.go index 279f1360eca74..31ca5da97912c 100644 --- a/executor/aggfuncs/func_lead_lag_test.go +++ b/executor/aggfuncs/func_lead_lag_test.go @@ -114,7 +114,6 @@ func TestLeadLag(t *testing.T) { for _, test := range tests { testWindowFunc(t, test) } - } func TestMemLeadLag(t *testing.T) { @@ -163,5 +162,4 @@ func TestMemLeadLag(t *testing.T) { for _, test := range tests { testWindowAggMemFunc(t, test) } - } diff --git a/executor/aggregate.go b/executor/aggregate.go index 9d42e2f022b81..549ea91d3d838 100644 --- a/executor/aggregate.go +++ b/executor/aggregate.go @@ -699,7 +699,7 @@ func (w *HashAggFinalWorker) consumeIntermData(sctx sessionctx.Context) (err err } } -func (w *HashAggFinalWorker) getFinalResult(sctx sessionctx.Context) { +func (w *HashAggFinalWorker) loadFinalResult(sctx sessionctx.Context) { waitStart := time.Now() result, finished := w.receiveFinalResultHolder() if w.stats != nil { @@ -763,7 +763,7 @@ func (w *HashAggFinalWorker) run(ctx sessionctx.Context, waitGroup *sync.WaitGro if err := w.consumeIntermData(ctx); err != nil { w.outputCh <- &AfFinalResult{err: err} } - w.getFinalResult(ctx) + w.loadFinalResult(ctx) } // Next implements the Executor Next interface. diff --git a/executor/aggregate_test.go b/executor/aggregate_test.go index 224e03198d847..cb1285e8da4be 100644 --- a/executor/aggregate_test.go +++ b/executor/aggregate_test.go @@ -660,7 +660,6 @@ func TestSelectDistinct(t *testing.T) { r := tk.MustQuery("select distinct name from select_distinct_test;") r.Check(testkit.Rows("hello")) tk.MustExec("commit") - } func TestAggPushDown(t *testing.T) { @@ -1002,7 +1001,7 @@ func TestInjectProjBelowTopN(t *testing.T) { input []string output [][]string ) - aggMergeSuiteData.GetTestCases(t, &input, &output) + aggMergeSuiteData.LoadTestCases(t, &input, &output) for i, tt := range input { testdata.OnRecord(func() { output[i] = testdata.ConvertRowsToStrings(tk.MustQuery(tt).Rows()) @@ -1111,7 +1110,6 @@ func TestIssue10608(t *testing.T) { tk.MustExec("insert into t values(508931), (508932)") tk.MustQuery("select (select /*+ stream_agg() */ group_concat(concat(123,'-')) from t where t.a = s.b group by t.a) as t from s;").Check(testkit.Rows("123-", "123-")) tk.MustQuery("select (select /*+ hash_agg() */ group_concat(concat(123,'-')) from t where t.a = s.b group by t.a) as t from s;").Check(testkit.Rows("123-", "123-")) - } func TestIssue12759HashAggCalledByApply(t *testing.T) { @@ -1135,7 +1133,7 @@ func TestIssue12759HashAggCalledByApply(t *testing.T) { input []string output [][]string ) - aggMergeSuiteData.GetTestCases(t, &input, &output) + aggMergeSuiteData.LoadTestCases(t, &input, &output) for i, tt := range input { testdata.OnRecord(func() { output[i] = testdata.ConvertRowsToStrings(tk.MustQuery(tt).Rows()) @@ -1158,7 +1156,6 @@ func TestPR15242ShallowCopy(t *testing.T) { tk.MustExec(`insert into t values ('{"id": 3,"score":233}');`) tk.Session().GetSessionVars().MaxChunkSize = 2 tk.MustQuery(`select max(JSON_EXTRACT(a, '$.score')) as max_score,JSON_EXTRACT(a,'$.id') as id from t group by id order by id;`).Check(testkit.Rows("233 1", "233 2", "233 3")) - } func TestIssue15690(t *testing.T) { diff --git a/executor/analyze_test.go b/executor/analyze_test.go index 1b056752c2af7..3560f1b6a72e1 100644 --- a/executor/analyze_test.go +++ b/executor/analyze_test.go @@ -224,6 +224,7 @@ func TestFastAnalyze(t *testing.T) { "IndexReader 2.00 root index:IndexRangeScan", "└─IndexRangeScan 2.00 cop[tikv] table:t3, partition:p1, index:k(v) range:[3,3], keep order:false", )) + //nolint:revive,all_revive tk.MustExec(`set @@tidb_partition_prune_mode='` + string(variable.Dynamic) + `'`) // global-stats depends on stats-ver2, but stats-ver2 is not compatible with fast-analyze, so forbid using global-stats with fast-analyze now. diff --git a/executor/analyzetest/analyze_test.go b/executor/analyzetest/analyze_test.go index 8b9f9b593394b..557f67fd3070f 100644 --- a/executor/analyzetest/analyze_test.go +++ b/executor/analyzetest/analyze_test.go @@ -585,64 +585,66 @@ func TestAnalyzeFullSamplingOnIndexWithVirtualColumnOrPrefixColumn(t *testing.T) func TestSnapshotAnalyzeAndMaxTSAnalyze(t *testing.T) { for _, analyzeSnapshot := range []bool{true, false} { - store, clean := testkit.CreateMockStore(t) - defer clean() - tk := testkit.NewTestKit(t, store) + func(analyzeSnapshot bool) { + store, clean := testkit.CreateMockStore(t) + defer clean() + tk := testkit.NewTestKit(t, store) - tk.MustExec("use test") - if analyzeSnapshot { - tk.MustExec("set @@session.tidb_enable_analyze_snapshot = on") - } else { - tk.MustExec("set @@session.tidb_enable_analyze_snapshot = off") - } - tk.MustExec("drop table if exists t") - tk.MustExec("create table t(a int, index index_a(a))") - is := tk.Session().(sessionctx.Context).GetInfoSchema().(infoschema.InfoSchema) - tbl, err := is.TableByName(model.NewCIStr("test"), model.NewCIStr("t")) - require.NoError(t, err) - tblInfo := tbl.Meta() - tid := tblInfo.ID - tk.MustExec("insert into t values(1),(1),(1)") - tk.MustExec("begin") - txn, err := tk.Session().Txn(false) - require.NoError(t, err) - startTS1 := txn.StartTS() - tk.MustExec("commit") - tk.MustExec("insert into t values(2),(2),(2)") - tk.MustExec("begin") - txn, err = tk.Session().Txn(false) - require.NoError(t, err) - startTS2 := txn.StartTS() - tk.MustExec("commit") - require.NoError(t, failpoint.Enable("github.com/pingcap/tidb/executor/injectAnalyzeSnapshot", fmt.Sprintf("return(%d)", startTS1))) - tk.MustExec("analyze table t") - rows := tk.MustQuery(fmt.Sprintf("select count, snapshot from mysql.stats_meta where table_id = %d", tid)).Rows() - require.Len(t, rows, 1) - if analyzeSnapshot { - // Analyze cannot see the second insert if it reads the snapshot. - require.Equal(t, "3", rows[0][0]) - } else { - // Analyze can see the second insert if it reads the latest data. + tk.MustExec("use test") + if analyzeSnapshot { + tk.MustExec("set @@session.tidb_enable_analyze_snapshot = on") + } else { + tk.MustExec("set @@session.tidb_enable_analyze_snapshot = off") + } + tk.MustExec("drop table if exists t") + tk.MustExec("create table t(a int, index index_a(a))") + is := tk.Session().(sessionctx.Context).GetInfoSchema().(infoschema.InfoSchema) + tbl, err := is.TableByName(model.NewCIStr("test"), model.NewCIStr("t")) + require.NoError(t, err) + tblInfo := tbl.Meta() + tid := tblInfo.ID + tk.MustExec("insert into t values(1),(1),(1)") + tk.MustExec("begin") + txn, err := tk.Session().Txn(false) + require.NoError(t, err) + startTS1 := txn.StartTS() + tk.MustExec("commit") + tk.MustExec("insert into t values(2),(2),(2)") + tk.MustExec("begin") + txn, err = tk.Session().Txn(false) + require.NoError(t, err) + startTS2 := txn.StartTS() + tk.MustExec("commit") + require.NoError(t, failpoint.Enable("github.com/pingcap/tidb/executor/injectAnalyzeSnapshot", fmt.Sprintf("return(%d)", startTS1))) + tk.MustExec("analyze table t") + rows := tk.MustQuery(fmt.Sprintf("select count, snapshot from mysql.stats_meta where table_id = %d", tid)).Rows() + require.Len(t, rows, 1) + if analyzeSnapshot { + // Analyze cannot see the second insert if it reads the snapshot. + require.Equal(t, "3", rows[0][0]) + } else { + // Analyze can see the second insert if it reads the latest data. + require.Equal(t, "6", rows[0][0]) + } + s1Str := rows[0][1].(string) + require.NoError(t, failpoint.Enable("github.com/pingcap/tidb/executor/injectAnalyzeSnapshot", fmt.Sprintf("return(%d)", startTS2))) + tk.MustExec("analyze table t") + rows = tk.MustQuery(fmt.Sprintf("select count, snapshot from mysql.stats_meta where table_id = %d", tid)).Rows() + require.Len(t, rows, 1) require.Equal(t, "6", rows[0][0]) - } - s1Str := rows[0][1].(string) - require.NoError(t, failpoint.Enable("github.com/pingcap/tidb/executor/injectAnalyzeSnapshot", fmt.Sprintf("return(%d)", startTS2))) - tk.MustExec("analyze table t") - rows = tk.MustQuery(fmt.Sprintf("select count, snapshot from mysql.stats_meta where table_id = %d", tid)).Rows() - require.Len(t, rows, 1) - require.Equal(t, "6", rows[0][0]) - s2Str := rows[0][1].(string) - require.True(t, s1Str != s2Str) - tk.MustExec("set @@session.tidb_analyze_version = 2") - require.NoError(t, failpoint.Enable("github.com/pingcap/tidb/executor/injectAnalyzeSnapshot", fmt.Sprintf("return(%d)", startTS1))) - tk.MustExec("analyze table t") - rows = tk.MustQuery(fmt.Sprintf("select count, snapshot from mysql.stats_meta where table_id = %d", tid)).Rows() - require.Len(t, rows, 1) - require.Equal(t, "6", rows[0][0]) - s3Str := rows[0][1].(string) - // The third analyze doesn't write results into mysql.stats_xxx because its snapshot is smaller than the second analyze. - require.Equal(t, s2Str, s3Str) - require.NoError(t, failpoint.Disable("github.com/pingcap/tidb/executor/injectAnalyzeSnapshot")) + s2Str := rows[0][1].(string) + require.True(t, s1Str != s2Str) + tk.MustExec("set @@session.tidb_analyze_version = 2") + require.NoError(t, failpoint.Enable("github.com/pingcap/tidb/executor/injectAnalyzeSnapshot", fmt.Sprintf("return(%d)", startTS1))) + tk.MustExec("analyze table t") + rows = tk.MustQuery(fmt.Sprintf("select count, snapshot from mysql.stats_meta where table_id = %d", tid)).Rows() + require.Len(t, rows, 1) + require.Equal(t, "6", rows[0][0]) + s3Str := rows[0][1].(string) + // The third analyze doesn't write results into mysql.stats_xxx because its snapshot is smaller than the second analyze. + require.Equal(t, s2Str, s3Str) + require.NoError(t, failpoint.Disable("github.com/pingcap/tidb/executor/injectAnalyzeSnapshot")) + }(analyzeSnapshot) } } diff --git a/executor/benchmark_test.go b/executor/benchmark_test.go index 06282390677e4..443524b2fc06d 100644 --- a/executor/benchmark_test.go +++ b/executor/benchmark_test.go @@ -760,7 +760,6 @@ func baseBenchmarkWindowFunctionsWithFrame(b *testing.B, pipelined int) { } } } - } func BenchmarkWindowFunctionsWithFrame(b *testing.B) { @@ -2118,5 +2117,4 @@ func BenchmarkAggPartialResultMapperMemoryUsage(b *testing.B) { func BenchmarkPipelinedRowNumberWindowFunctionExecution(b *testing.B) { b.ReportAllocs() - } diff --git a/executor/builder.go b/executor/builder.go index 68568f294a3bb..fbc7251b376c5 100644 --- a/executor/builder.go +++ b/executor/builder.go @@ -429,7 +429,7 @@ func buildIndexLookUpChecker(b *executorBuilder, p *plannercore.PhysicalIndexLoo tps := make([]*types.FieldType, 0, fullColLen) for _, col := range is.Columns { - tps = append(tps, &col.FieldType) + tps = append(tps, &(col.FieldType)) } if !e.isCommonHandle() { @@ -1037,7 +1037,6 @@ func (b *executorBuilder) buildDDL(v *plannercore.DDL) Executor { b.Ti.PartitionTelemetry.UseTablePartitionList = true } } - } } } @@ -2815,7 +2814,6 @@ func markChildrenUsedCols(outputSchema *expression.Schema, childSchema ...*expre func constructDistExecForTiFlash(sctx sessionctx.Context, p plannercore.PhysicalPlan) ([]*tipb.Executor, error) { execPB, err := p.ToPB(sctx, kv.TiFlash) return []*tipb.Executor{execPB}, err - } func constructDAGReq(ctx sessionctx.Context, plans []plannercore.PhysicalPlan, storeType kv.StoreType) (dagReq *tipb.DAGRequest, err error) { diff --git a/executor/compact_table.go b/executor/compact_table.go index 10f1008488406..61a41e5c95b80 100644 --- a/executor/compact_table.go +++ b/executor/compact_table.go @@ -158,8 +158,7 @@ func (task *storeCompactTask) work() error { // Stop remaining partitions when error happens. break } - } - // For partition table, there must be no data in task.parentExec.tableInfo.ID. So no need to compact it. + } // For partition table, there must be no data in task.parentExec.tableInfo.ID. So no need to compact it. } else { task.allPhysicalTables = 1 task.compactedPhysicalTables = 0 diff --git a/executor/ddl_test.go b/executor/ddl_test.go index 26e8d5656dec0..b285067100a08 100644 --- a/executor/ddl_test.go +++ b/executor/ddl_test.go @@ -701,7 +701,6 @@ func TestAlterTableModifyColumn(t *testing.T) { tk.MustExec("drop table if exists err_modify_multiple_collate;") tk.MustExec("create table err_modify_multiple_collate (a char(1) collate utf8_bin collate utf8_general_ci) charset utf8mb4 collate utf8mb4_bin") tk.MustGetErrMsg("alter table err_modify_multiple_collate modify column a char(1) collate utf8_bin collate utf8mb4_bin;", dbterror.ErrCollationCharsetMismatch.GenWithStackByArgs("utf8mb4_bin", "utf8").Error()) - } func TestColumnCharsetAndCollate(t *testing.T) { diff --git a/executor/executor_test.go b/executor/executor_test.go index 00ce02851b3b0..187e9cdf16a78 100644 --- a/executor/executor_test.go +++ b/executor/executor_test.go @@ -883,7 +883,7 @@ func TestJSON(t *testing.T) { // check CAST AS JSON. tk.MustQuery(`select CAST('3' AS JSON), CAST('{}' AS JSON), CAST(null AS JSON)`).Check(testkit.Rows(`3 {} `)) - + //nolint:revive,all_revive tk.MustQuery("select a, count(1) from test_json group by a order by a").Check(testkit.Rows( " 1", "null 1", @@ -1477,7 +1477,7 @@ func TestSetOperation(t *testing.T) { Plan []string Res []string } - executorSuiteData.GetTestCases(t, &input, &output) + executorSuiteData.LoadTestCases(t, &input, &output) for i, tt := range input { testdata.OnRecord(func() { output[i].SQL = tt @@ -1508,7 +1508,7 @@ func TestSetOperationOnDiffColType(t *testing.T) { Plan []string Res []string } - executorSuiteData.GetTestCases(t, &input, &output) + executorSuiteData.LoadTestCases(t, &input, &output) for i, tt := range input { testdata.OnRecord(func() { output[i].SQL = tt @@ -1536,7 +1536,7 @@ func TestIndexScanWithYearCol(t *testing.T) { Plan []string Res []string } - executorSuiteData.GetTestCases(t, &input, &output) + executorSuiteData.LoadTestCases(t, &input, &output) for i, tt := range input { testdata.OnRecord(func() { output[i].SQL = tt @@ -1628,7 +1628,7 @@ func TestTimezonePushDown(t *testing.T) { ctx := context.Background() count := 0 ctx1 := context.WithValue(ctx, "CheckSelectRequestHook", func(req *kv.Request) { - count += 1 + count++ dagReq := new(tipb.DAGRequest) require.NoError(t, proto.Unmarshal(req.Data, dagReq)) require.Equal(t, systemTZ.String(), dagReq.GetTimeZoneName()) @@ -2014,6 +2014,7 @@ func TestCheckIndex(t *testing.T) { require.NoError(t, err) _, err = se.Execute(context.Background(), "admin check index t c") require.Error(t, err) + //nolint:revive,all_revive require.Contains(t, err.Error(), "table count 3 != index(c) count 2") // TODO: pass the case below: @@ -2707,7 +2708,6 @@ func TestSelectForUpdate(t *testing.T) { err = tk1.ExecToErr("commit") require.Error(t, err) - } func TestSelectForUpdateOf(t *testing.T) { @@ -2846,7 +2846,6 @@ func TestInsertValuesWithSubQuery(t *testing.T) { tk.MustGetErrMsg( "insert into t set a = 81, b = (select ( SELECT '1' AS `c0` WHERE '1' >= `subq_0`.`c0` ) as `c1` FROM ( SELECT '1' AS `c0` ) AS `subq_0` );", "Insert's SET operation or VALUES_LIST doesn't support complex subqueries now") - } func TestDIVZeroInPartitionExpr(t *testing.T) { diff --git a/executor/executor_txn_test.go b/executor/executor_txn_test.go index 6a893dc4c4518..9261e54e705e3 100644 --- a/executor/executor_txn_test.go +++ b/executor/executor_txn_test.go @@ -254,7 +254,6 @@ func TestInvalidReadCacheTable(t *testing.T) { for _, query := range queries { // enable historical read cache table tk.MustExec(query.sql) - } } diff --git a/executor/explainfor_test.go b/executor/explainfor_test.go index 7b547d7caff8d..7833573725c2e 100644 --- a/executor/explainfor_test.go +++ b/executor/explainfor_test.go @@ -1226,7 +1226,6 @@ func TestIgnorePlanCacheWithPrepare(t *testing.T) { tk.MustQuery("select @@last_plan_from_cache;").Check(testkit.Rows("1")) tk.MustQuery("execute stmt_join;").Check(testkit.Rows()) tk.MustQuery("select @@last_plan_from_binding;").Check(testkit.Rows("1")) - } func TestSelectView4PlanCache(t *testing.T) { diff --git a/executor/grant_test.go b/executor/grant_test.go index 9ea2f943071ea..94ea918064d5f 100644 --- a/executor/grant_test.go +++ b/executor/grant_test.go @@ -331,7 +331,6 @@ func TestGrantPrivilegeAtomic(t *testing.T) { tk.MustExec(`drop role if exists r1, r2, r3, r4;`) tk.MustExec(`drop table test.testatomic;`) - } func TestIssue2654(t *testing.T) { diff --git a/executor/index_lookup_merge_join.go b/executor/index_lookup_merge_join.go index 98f083a0c31b9..d1e32465cc4ff 100644 --- a/executor/index_lookup_merge_join.go +++ b/executor/index_lookup_merge_join.go @@ -257,7 +257,7 @@ func (e *IndexLookUpMergeJoin) Next(ctx context.Context, req *chunk.Chunk) error } req.Reset() if e.task == nil { - e.getFinishedTask(ctx) + e.loadFinishedTask(ctx) } for e.task != nil { select { @@ -266,7 +266,7 @@ func (e *IndexLookUpMergeJoin) Next(ctx context.Context, req *chunk.Chunk) error if e.task.doneErr != nil { return e.task.doneErr } - e.getFinishedTask(ctx) + e.loadFinishedTask(ctx) continue } req.SwapColumns(result.chk) @@ -280,14 +280,13 @@ func (e *IndexLookUpMergeJoin) Next(ctx context.Context, req *chunk.Chunk) error return nil } -func (e *IndexLookUpMergeJoin) getFinishedTask(ctx context.Context) { +// TODO: reuse the finished task memory to build tasks. +func (e *IndexLookUpMergeJoin) loadFinishedTask(ctx context.Context) { select { case e.task = <-e.resultCh: case <-ctx.Done(): e.task = nil } - - // TODO: reuse the finished task memory to build tasks. } func (omw *outerMergeWorker) run(ctx context.Context, wg *sync.WaitGroup, cancelFunc context.CancelFunc) { diff --git a/executor/infoschema_reader.go b/executor/infoschema_reader.go index 50c7a33d52369..9f31695e44f4d 100644 --- a/executor/infoschema_reader.go +++ b/executor/infoschema_reader.go @@ -398,7 +398,6 @@ func (e *memtableRetriever) setDataFromSchemata(ctx sessionctx.Context, schemas rows := make([][]types.Datum, 0, len(schemas)) for _, schema := range schemas { - charset := mysql.DefaultCharset collation := mysql.DefaultCollationName @@ -800,7 +799,7 @@ ForColumnsTag: continue } - ft := &col.FieldType + ft := &(col.FieldType) if tbl.IsView() { e.viewMu.RLock() if e.viewSchemaMap[tbl.ID] != nil { @@ -1319,7 +1318,6 @@ func (e *DDLJobsReaderExec) Next(ctx context.Context, req *chunk.Chunk) error { req.AppendString(12, e.runningJobs[i].Query) } } - } e.cursor += num count += num diff --git a/executor/insert.go b/executor/insert.go index abb3aa5efc7b5..fcb6e68330e7a 100644 --- a/executor/insert.go +++ b/executor/insert.go @@ -356,13 +356,13 @@ func (e *InsertExec) initEvalBuffer4Dup() { // Append the old row before the new row, to be consistent with "Schema4OnDuplicate" in the "Insert" PhysicalPlan. for _, col := range e.Table.WritableCols() { - evalBufferTypes = append(evalBufferTypes, &col.FieldType) + evalBufferTypes = append(evalBufferTypes, &(col.FieldType)) } if extraLen > 0 { evalBufferTypes = append(evalBufferTypes, e.SelectExec.base().retFieldTypes[e.rowLen:]...) } for _, col := range e.Table.Cols() { - evalBufferTypes = append(evalBufferTypes, &col.FieldType) + evalBufferTypes = append(evalBufferTypes, &(col.FieldType)) } if e.hasExtraHandle { evalBufferTypes = append(evalBufferTypes, types.NewFieldType(mysql.TypeLonglong)) diff --git a/executor/insert_common.go b/executor/insert_common.go index dee087b13ae91..9486ce2652938 100644 --- a/executor/insert_common.go +++ b/executor/insert_common.go @@ -187,7 +187,7 @@ func (e *InsertValues) initEvalBuffer() { } e.evalBufferTypes = make([]*types.FieldType, numCols) for i, col := range e.Table.Cols() { - e.evalBufferTypes[i] = &col.FieldType + e.evalBufferTypes[i] = &(col.FieldType) } if e.hasExtraHandle { e.evalBufferTypes[len(e.evalBufferTypes)-1] = types.NewFieldType(mysql.TypeLonglong) diff --git a/executor/insert_test.go b/executor/insert_test.go index 91fb15bc50ed1..3e4c75c663575 100644 --- a/executor/insert_test.go +++ b/executor/insert_test.go @@ -1007,7 +1007,6 @@ func TestInsertWithAutoidSchema(t *testing.T) { tk.MustQuery(tt.query).Check(tt.result) } } - } func TestPartitionInsertOnDuplicate(t *testing.T) { diff --git a/executor/join_test.go b/executor/join_test.go index 3b86c21636979..eb886bba504bd 100644 --- a/executor/join_test.go +++ b/executor/join_test.go @@ -680,7 +680,7 @@ func TestUsingAndNaturalJoinSchema(t *testing.T) { SQL string Res []string } - executorSuiteData.GetTestCases(t, &input, &output) + executorSuiteData.LoadTestCases(t, &input, &output) for i, tt := range input { testdata.OnRecord(func() { output[i].SQL = tt @@ -708,7 +708,7 @@ func TestNaturalJoin(t *testing.T) { Plan []string Res []string } - executorSuiteData.GetTestCases(t, &input, &output) + executorSuiteData.LoadTestCases(t, &input, &output) for i, tt := range input { testdata.OnRecord(func() { output[i].SQL = tt @@ -2762,7 +2762,6 @@ func TestIssue30211(t *testing.T) { err = tk.QueryToErr("select /*+ inl_hash_join(t1) */ * from t1 join t2 on t1.a = t2.a;") require.EqualError(t, err, "failpoint panic: TestIssue30211 IndexJoinPanic") - }() tk.MustExec("insert into t1 values(1),(2);") tk.MustExec("insert into t2 values(1),(1),(2),(2);") diff --git a/executor/joiner.go b/executor/joiner.go index ecfab11f66822..01d124bbc23cd 100644 --- a/executor/joiner.go +++ b/executor/joiner.go @@ -251,10 +251,7 @@ func (j *baseJoiner) makeShallowJoinRow(isRightJoin bool, inner, outer chunk.Row // filter is used to filter the result constructed by tryToMatchInners, the result is // built by one outer row and multiple inner rows. The returned bool value // indicates whether the outer row matches any inner rows. -func (j *baseJoiner) filter( - input, output *chunk.Chunk, outerColLen int, - lUsed, rUsed []int) (bool, error) { - +func (j *baseJoiner) filter(input, output *chunk.Chunk, outerColLen int, lUsed, rUsed []int) (bool, error) { var err error j.selected, err = expression.VectorizedFilter(j.ctx, j.conditions, chunk.NewIterator4Chunk(input), j.selected) if err != nil { @@ -284,7 +281,6 @@ func (j *baseJoiner) filter( innerColOffset, outerColOffset = len(lUsed), 0 innerColLen, outerColLen = outerColLen, innerColLen } - } return chunk.CopySelectedJoinRowsWithSameOuterRows(input, innerColOffset, innerColLen, outerColOffset, outerColLen, j.selected, output) } @@ -296,7 +292,6 @@ func (j *baseJoiner) filter( func (j *baseJoiner) filterAndCheckOuterRowStatus( input, output *chunk.Chunk, innerColsLen int, outerRowStatus []outerRowStatusFlag, lUsed, rUsed []int) ([]outerRowStatusFlag, error) { - var err error j.selected, j.isNull, err = expression.VectorizedFilterConsiderNull(j.ctx, j.conditions, chunk.NewIterator4Chunk(input), j.selected, j.isNull) if err != nil { diff --git a/executor/mem_reader.go b/executor/mem_reader.go index e1e9ea100ee5c..0fa74bb01271e 100644 --- a/executor/mem_reader.go +++ b/executor/mem_reader.go @@ -104,7 +104,7 @@ func (m *memIndexReader) getMemRows(ctx context.Context) ([][]types.Datum, error case m.table.PKIsHandle: for _, col := range m.table.Columns { if mysql.HasPriKeyFlag(col.GetFlag()) { - tps = append(tps, &col.FieldType) + tps = append(tps, &(col.FieldType)) break } } diff --git a/executor/memtable_reader.go b/executor/memtable_reader.go index 4fd4fe2fe7f6b..dc215e71fe3bb 100644 --- a/executor/memtable_reader.go +++ b/executor/memtable_reader.go @@ -817,7 +817,6 @@ func (e *hotRegionsHistoryRetriver) startRetrieving( pdServers []infoschema.ServerInfo, req *HistoryHotRegionsRequest, ) ([]chan hotRegionsResult, error) { - var results []chan hotRegionsResult for _, srv := range pdServers { for typ := range e.extractor.HotRegionTypes { diff --git a/executor/partition_table_test.go b/executor/partition_table_test.go index 3b8b50174ebc5..3503bcb9db8f2 100644 --- a/executor/partition_table_test.go +++ b/executor/partition_table_test.go @@ -410,7 +410,6 @@ func TestOrderByandLimit(t *testing.T) { } func TestBatchGetandPointGetwithHashPartition(t *testing.T) { - store, clean := testkit.CreateMockStore(t) defer clean() @@ -3077,7 +3076,7 @@ PARTITION BY RANGE (a) ( var input []string var output []testOutput - executorSuiteData.GetTestCases(t, &input, &output) + executorSuiteData.LoadTestCases(t, &input, &output) verifyPartitionResult(tk, input, output) } @@ -3105,7 +3104,7 @@ PARTITION BY RANGE (a) ( var input []string var output []testOutput - executorSuiteData.GetTestCases(t, &input, &output) + executorSuiteData.LoadTestCases(t, &input, &output) verifyPartitionResult(tk, input, output) } @@ -3128,7 +3127,7 @@ PARTITION BY RANGE (a) ( var input []string var output []testOutput - executorSuiteData.GetTestCases(t, &input, &output) + executorSuiteData.LoadTestCases(t, &input, &output) verifyPartitionResult(tk, input, output) } @@ -3155,7 +3154,7 @@ PARTITION BY RANGE (a) ( var input []string var output []testOutput - executorSuiteData.GetTestCases(t, &input, &output) + executorSuiteData.LoadTestCases(t, &input, &output) verifyPartitionResult(tk, input, output) } @@ -3179,7 +3178,7 @@ PARTITION BY RANGE (a) ( var input []string var output []testOutput - executorSuiteData.GetTestCases(t, &input, &output) + executorSuiteData.LoadTestCases(t, &input, &output) verifyPartitionResult(tk, input, output) } @@ -3207,7 +3206,7 @@ PARTITION BY RANGE (a) ( var input []string var output []testOutput - executorSuiteData.GetTestCases(t, &input, &output) + executorSuiteData.LoadTestCases(t, &input, &output) verifyPartitionResult(tk, input, output) } diff --git a/executor/plan_replayer.go b/executor/plan_replayer.go index 8ef54d86ba10d..456ac90fa1fde 100644 --- a/executor/plan_replayer.go +++ b/executor/plan_replayer.go @@ -542,7 +542,7 @@ func loadVariables(ctx sessionctx.Context, z *zip.Reader) error { if err != nil { return errors.AddStack(err) } - //nolint: errcheck + //nolint: errcheck,all_revive defer v.Close() _, err = toml.DecodeReader(v, &varMap) if err != nil { diff --git a/executor/point_get_test.go b/executor/point_get_test.go index 2de1c9b75a9ab..3b68a291f6057 100644 --- a/executor/point_get_test.go +++ b/executor/point_get_test.go @@ -269,7 +269,6 @@ func TestIndexLookupBinary(t *testing.T) { tk.MustIndexLookup(`select * from t where a = "a";`).Check(testkit.Rows()) tk.MustIndexLookup(`select * from t where a = "a ";`).Check(testkit.Rows(`a b `)) tk.MustIndexLookup(`select * from t where a = "a ";`).Check(testkit.Rows()) - } func TestOverflowOrTruncated(t *testing.T) { @@ -503,7 +502,7 @@ func TestClusterIndexCBOPointGet(t *testing.T) { Plan []string Res []string } - pointGetSuiteData.GetTestCases(t, &input, &output) + pointGetSuiteData.LoadTestCases(t, &input, &output) for i, tt := range input { plan := tk.MustQuery("explain format = 'brief' " + tt) res := tk.MustQuery(tt).Sort() @@ -793,7 +792,6 @@ func TestPointGetLockExistKey(t *testing.T) { {rc: true, key: "primary key"}, {rc: true, key: "unique key"}, } { - tableName := fmt.Sprintf("t_%d", i) func(rc bool, key string, tableName string) { testLock(rc, key, tableName) diff --git a/executor/prepared_test.go b/executor/prepared_test.go index 9fbdaf3b4c50f..8f3ad5882ff39 100644 --- a/executor/prepared_test.go +++ b/executor/prepared_test.go @@ -539,7 +539,7 @@ func TestPlanCacheWithDifferentVariableTypes(t *testing.T) { Result []string } } - prepareMergeSuiteData.GetTestCases(t, &input, &output) + prepareMergeSuiteData.LoadTestCases(t, &input, &output) for i, tt := range input { tk.MustExec(tt.PrepareStmt) testdata.OnRecord(func() { @@ -1082,7 +1082,7 @@ func TestParameterPushDown(t *testing.T) { Plan []string FromCache string } - prepareMergeSuiteData.GetTestCases(t, &input, &output) + prepareMergeSuiteData.LoadTestCases(t, &input, &output) for i, tt := range input { if strings.HasPrefix(tt.SQL, "execute") { @@ -1290,7 +1290,6 @@ func TestTemporaryTable4PlanCache(t *testing.T) { tk.MustQuery("execute stmt;").Check(testkit.Rows()) tk.MustQuery("execute stmt;").Check(testkit.Rows()) tk.MustQuery("select @@last_plan_from_cache;").Check(testkit.Rows("0")) - } func TestPrepareStmtAfterIsolationReadChange(t *testing.T) { diff --git a/executor/projection.go b/executor/projection.go index f43d264ebc74a..994128caff421 100644 --- a/executor/projection.go +++ b/executor/projection.go @@ -179,7 +179,6 @@ func (e *ProjectionExec) Next(ctx context.Context, req *chunk.Chunk) error { return e.unParallelExecute(ctx, req) } return e.parallelExecute(ctx, req) - } func (e *ProjectionExec) isUnparallelExec() bool { diff --git a/executor/set_test.go b/executor/set_test.go index 5b0fd526fa25c..75b6f92f6ae78 100644 --- a/executor/set_test.go +++ b/executor/set_test.go @@ -1572,7 +1572,6 @@ func TestEnableNoopFunctionsVar(t *testing.T) { tk.MustExec("set global read_only = on") tk.MustQuery("select @@global.read_only;").Check(testkit.Rows("1")) require.Error(t, tk.ExecToErr("set global read_only = abc")) - } // https://github.com/pingcap/tidb/issues/29670 diff --git a/executor/show.go b/executor/show.go index 3439d83aca44f..bc9bd72e2907c 100644 --- a/executor/show.go +++ b/executor/show.go @@ -587,7 +587,6 @@ func (e *ShowExec) fetchShowTableStatus(ctx context.Context) error { continue } e.result.AppendRow(row) - } return nil } diff --git a/executor/showtest/show_test.go b/executor/showtest/show_test.go index e983bc3a2746e..83ee9b5cfa8a1 100644 --- a/executor/showtest/show_test.go +++ b/executor/showtest/show_test.go @@ -831,7 +831,6 @@ func TestShowStatsPrivilege(t *testing.T) { tk1.MustExec("show stats_meta") tk1.MustExec("SHOW STATS_BUCKETS") tk1.MustExec("SHOW STATS_HISTOGRAMS") - } func TestIssue18878(t *testing.T) { @@ -1122,7 +1121,6 @@ func TestUnprivilegedShow(t *testing.T) { createTime := model.TSConvert2Time(tblInfo.Meta().UpdateTS).Format("2006-01-02 15:04:05") tk.MustQuery("show table status from testshow").Check(testkit.Rows(fmt.Sprintf("t1 InnoDB 10 Compact 0 0 0 0 0 0 %s utf8mb4_bin ", createTime))) - } func TestCollation(t *testing.T) { @@ -1898,7 +1896,6 @@ func TestShowTableStatusLike(t *testing.T) { rows = tk.MustQuery("SHOW table status LIKE 'li%'").Rows() require.Equal(t, "Li_1", rows[0][0]) require.Equal(t, "li_2", rows[1][0]) - } func TestShowCollationsLike(t *testing.T) { diff --git a/executor/shuffle.go b/executor/shuffle.go index 9143afd032f10..a667d5f11ca20 100644 --- a/executor/shuffle.go +++ b/executor/shuffle.go @@ -102,7 +102,6 @@ func (e *ShuffleExec) Open(ctx context.Context) error { if err := s.Open(ctx); err != nil { return err } - } if err := e.baseExecutor.Open(ctx); err != nil { return err diff --git a/executor/simple.go b/executor/simple.go index a7794d4fb8498..b66b813cde8fd 100644 --- a/executor/simple.go +++ b/executor/simple.go @@ -934,7 +934,6 @@ func (e *SimpleExec) executeAlterUser(ctx context.Context, s *ast.AlterUserStmt) spec.User.Username = user.Username spec.User.Hostname = user.AuthHostname } else { - // The user executing the query (user) does not match the user specified (spec.User) // The MySQL manual states: // "In most cases, ALTER USER requires the global CREATE USER privilege, or the UPDATE privilege for the mysql system schema" @@ -1177,14 +1176,13 @@ func (e *SimpleExec) executeRenameUser(s *ast.RenameUserStmt) error { // rename relationship from mysql.global_grants // TODO: add global_grants into the parser + // TODO: need update columns_priv once we implement columns_priv functionality. + // When that is added, please refactor both executeRenameUser and executeDropUser to use an array of tables + // to loop over, so it is easier to maintain. if err = renameUserHostInSystemTable(sqlExecutor, "global_grants", "User", "Host", userToUser); err != nil { failedUser = oldUser.String() + " TO " + newUser.String() + " mysql.global_grants error" break } - - //TODO: need update columns_priv once we implement columns_priv functionality. - // When that is added, please refactor both executeRenameUser and executeDropUser to use an array of tables - // to loop over, so it is easier to maintain. } if failedUser == "" { @@ -1361,9 +1359,7 @@ func (e *SimpleExec) executeDropUser(ctx context.Context, s *ast.DropUserStmt) e break } } - } - - //TODO: need delete columns_priv once we implement columns_priv functionality. + } //TODO: need delete columns_priv once we implement columns_priv functionality. } if len(failedUsers) == 0 { diff --git a/executor/simpletest/simple_test.go b/executor/simpletest/simple_test.go index 5cbeb2e8b5da4..8b9fa04be0e1d 100644 --- a/executor/simpletest/simple_test.go +++ b/executor/simpletest/simple_test.go @@ -895,7 +895,6 @@ func TestSetPwd(t *testing.T) { tk.MustExec(setPwdSQL) result = tk.MustQuery(`SELECT authentication_string FROM mysql.User WHERE User="testpwd" and Host="localhost"`) result.Check(testkit.Rows(auth.EncodePassword("pwd"))) - } func TestFlushPrivileges(t *testing.T) { @@ -922,7 +921,6 @@ func TestFlushPrivileges(t *testing.T) { // After flush. _, err = se.Execute(ctx, `SELECT authentication_string FROM mysql.User WHERE User="testflush" and Host="localhost"`) require.NoError(t, err) - } func TestFlushPrivilegesPanic(t *testing.T) { diff --git a/executor/splittest/split_table_test.go b/executor/splittest/split_table_test.go index 9f5ceec8a01ea..f874bb8fa711b 100644 --- a/executor/splittest/split_table_test.go +++ b/executor/splittest/split_table_test.go @@ -713,5 +713,4 @@ func TestShowTableRegion(t *testing.T) { } require.Equal(t, infosync.PlacementScheduleStatePending.String(), rows[i][12]) } - } diff --git a/executor/tiflashtest/tiflash_test.go b/executor/tiflashtest/tiflash_test.go index 886c2012b0b00..90ca47627c317 100644 --- a/executor/tiflashtest/tiflash_test.go +++ b/executor/tiflashtest/tiflash_test.go @@ -205,7 +205,6 @@ func TestJoinRace(t *testing.T) { tk.MustExec("set @@session.tidb_enforce_mpp=ON") tk.MustExec("set @@tidb_opt_broadcast_cartesian_join=0") tk.MustQuery("select count(*) from (select count(a) x from t group by b) t1 join (select count(a) x from t group by b) t2 on t1.x > t2.x").Check(testkit.Rows("6")) - } func TestMppExecution(t *testing.T) { @@ -793,7 +792,6 @@ func TestMppUnionAll(t *testing.T) { tk.MustExec("set @@tidb_enforce_mpp=1") tk.MustExec("insert into x4 values (2, 2), (2, 3)") tk.MustQuery("(select * from x1 union all select * from x4) order by a, b").Check(testkit.Rows("1 1", "2 2", "2 2", "2 3", "3 3", "4 4")) - } func TestUnionWithEmptyDualTable(t *testing.T) { diff --git a/executor/write.go b/executor/write.go index 397ec2669a46d..a501bd4a48651 100644 --- a/executor/write.go +++ b/executor/write.go @@ -227,7 +227,6 @@ func updateRecord(ctx context.Context, sctx sessionctx.Context, h kv.Handle, old } return false, err } - } if onDup { sc.AddAffectedRows(2) diff --git a/expression/aggregation/base_func.go b/expression/aggregation/base_func.go index e7ae700b092f4..25ec74a287cab 100644 --- a/expression/aggregation/base_func.go +++ b/expression/aggregation/base_func.go @@ -255,7 +255,6 @@ func (a *baseFuncDesc) typeInfer4GroupConcat(ctx sessionctx.Context) { a.Args[i] = expression.BuildCastFunction(ctx, a.Args[i], tp) } } - } func (a *baseFuncDesc) typeInfer4MaxMin(ctx sessionctx.Context) { diff --git a/expression/bench_test.go b/expression/bench_test.go index a8b3da39c5fe8..db71564aedc54 100644 --- a/expression/bench_test.go +++ b/expression/bench_test.go @@ -191,7 +191,6 @@ func BenchmarkScalarFunctionClone(b *testing.B) { func getRandomTime(r *rand.Rand) types.CoreTime { return types.FromDate(r.Intn(2200), r.Intn(10)+1, r.Intn(20)+1, r.Intn(12), r.Intn(60), r.Intn(60), r.Intn(1000000)) - } // dataGenerator is used to generate data for test. @@ -1549,7 +1548,7 @@ func testVectorizedBuiltinFunc(t *testing.T, vecExprCases vecExprBenchCases) { tmp := strings.Split(baseFuncName, ".") baseFuncName = tmp[len(tmp)-1] - if !testAll && (testFunc[baseFuncName] != true && testFunc[funcName] != true) { + if !testAll && (!testFunc[baseFuncName] && !testFunc[funcName]) { continue } // do not forget to implement the vectorized method. @@ -1768,7 +1767,7 @@ func benchmarkVectorizedBuiltinFunc(b *testing.B, vecExprCases vecExprBenchCases tmp := strings.Split(baseFuncName, ".") baseFuncName = tmp[len(tmp)-1] - if !testAll && testFunc[baseFuncName] != true && testFunc[funcName] != true { + if !testAll && !testFunc[baseFuncName] && !testFunc[funcName] { continue } diff --git a/expression/builtin_arithmetic.go b/expression/builtin_arithmetic.go index 067b5c2b50d76..e834cad55bff9 100644 --- a/expression/builtin_arithmetic.go +++ b/expression/builtin_arithmetic.go @@ -340,7 +340,6 @@ func (c *arithmeticMinusFunctionClass) getFunction(ctx sessionctx.Context, args sig.setPbCode(tipb.ScalarFuncSig_MinusDecimal) return sig, nil } else { - bf, err := newBaseBuiltinFuncWithTp(ctx, c.funcName, args, types.ETInt, types.ETInt, types.ETInt) if err != nil { return nil, err diff --git a/expression/builtin_arithmetic_vec.go b/expression/builtin_arithmetic_vec.go index 9d35ac25c3945..4197d2e977737 100644 --- a/expression/builtin_arithmetic_vec.go +++ b/expression/builtin_arithmetic_vec.go @@ -25,6 +25,7 @@ import ( "github.com/pingcap/tidb/util/mathutil" ) +//revive:disable:defer func (b *builtinArithmeticMultiplyRealSig) vectorized() bool { return true } @@ -673,7 +674,6 @@ func (b *builtinArithmeticMultiplyIntSig) vecEvalInt(input *chunk.Chunk, result result.MergeNulls(buf) var tmp int64 for i := 0; i < n; i++ { - tmp = x[i] * y[i] if (x[i] != 0 && tmp/x[i] != y[i]) || (tmp == math.MinInt64 && x[i] == -1) { if result.IsNull(i) { @@ -788,7 +788,6 @@ func (b *builtinArithmeticIntDivideIntSig) divideUU(result *chunk.Column, lhsI64 } else { resultI64s[i] = int64(uint64(lhs) / uint64(rhs)) } - } return nil } @@ -1039,7 +1038,6 @@ func (b *builtinArithmeticMultiplyIntUnsignedSig) vecEvalInt(input *chunk.Chunk, result.MergeNulls(buf) var res uint64 for i := 0; i < n; i++ { - res = x[i] * y[i] if x[i] != 0 && res/x[i] != y[i] { if result.IsNull(i) { diff --git a/expression/builtin_cast.go b/expression/builtin_cast.go index ce34f71f6d3a4..904824e584d92 100644 --- a/expression/builtin_cast.go +++ b/expression/builtin_cast.go @@ -519,11 +519,13 @@ func (b *builtinCastIntAsDecimalSig) evalDecimal(row chunk.Row) (res *types.MyDe return res, isNull, err } if unsignedArgs0 := mysql.HasUnsignedFlag(b.args[0].GetType().GetFlag()); !mysql.HasUnsignedFlag(b.tp.GetFlag()) && !unsignedArgs0 { + //revive:disable:empty-lines res = types.NewDecFromInt(val) // Round up to 0 if the value is negative but the expression eval type is unsigned in `UNION` statement // NOTE: the following expressions are equal (so choose the more efficient one): // `b.inUnion && mysql.HasUnsignedFlag(b.tp.GetFlag()) && !unsignedArgs0 && val < 0` // `b.inUnion && !unsignedArgs0 && val < 0` + //revive:enable:empty-lines } else if b.inUnion && !unsignedArgs0 && val < 0 { res = &types.MyDecimal{} } else { @@ -1786,8 +1788,7 @@ const inUnionCastContext inCastContext = 0 func CanImplicitEvalInt(expr Expression) bool { switch f := expr.(type) { case *ScalarFunction: - switch f.FuncName.L { - case ast.DayName: + if f.FuncName.L == ast.DayName { return true } } diff --git a/expression/builtin_control.go b/expression/builtin_control.go index cb6717de7dd15..e03581084418c 100644 --- a/expression/builtin_control.go +++ b/expression/builtin_control.go @@ -147,7 +147,6 @@ func InferType4ControlFuncs(ctx sessionctx.Context, funcName string, lexp, rexp } flen := maxlen(lhsFlen, rhsFlen) + resultFieldType.GetDecimal() + 1 // account for -1 len fields resultFieldType.SetFlenUnderLimit(flen) - } else { resultFieldType.SetFlen(maxlen(lhs.GetFlen(), rhs.GetFlen())) } diff --git a/expression/builtin_encryption_test.go b/expression/builtin_encryption_test.go index bda56e7bc4bdb..4dcc7b914bfe1 100644 --- a/expression/builtin_encryption_test.go +++ b/expression/builtin_encryption_test.go @@ -494,7 +494,6 @@ func TestMD5Hash(t *testing.T) { } _, err := funcs[ast.MD5].getFunction(ctx, []Expression{NewZero()}) require.NoError(t, err) - } func TestRandomBytes(t *testing.T) { diff --git a/expression/builtin_encryption_vec.go b/expression/builtin_encryption_vec.go index 1c2124b8c3001..cd303cd7c79c8 100644 --- a/expression/builtin_encryption_vec.go +++ b/expression/builtin_encryption_vec.go @@ -35,6 +35,7 @@ import ( "github.com/pingcap/tidb/util/encrypt" ) +//revive:disable:defer func (b *builtinAesDecryptSig) vectorized() bool { return true } @@ -585,6 +586,7 @@ func (b *builtinCompressSig) vecEvalString(input *chunk.Chunk, result *chunk.Col } buffer := allocByteSlice(resultLength) + //nolint: revive defer deallocateByteSlice(buffer) buffer = buffer[:resultLength] diff --git a/expression/builtin_json_vec.go b/expression/builtin_json_vec.go index ea75b6b96187e..d20c0dfe4705f 100644 --- a/expression/builtin_json_vec.go +++ b/expression/builtin_json_vec.go @@ -28,6 +28,7 @@ import ( "github.com/pingcap/tipb/go-tipb" ) +//revive:disable:defer func vecJSONModify(ctx sessionctx.Context, args []Expression, bufAllocator columnBufferAllocator, input *chunk.Chunk, result *chunk.Column, mt json.ModifyType) error { nr := input.NumRows() jsonBuf, err := bufAllocator.get() diff --git a/expression/builtin_string.go b/expression/builtin_string.go index 55ab3d15c7486..e3474206b9bb8 100644 --- a/expression/builtin_string.go +++ b/expression/builtin_string.go @@ -1903,7 +1903,6 @@ func (b *builtinTrim3ArgsSig) evalString(row chunk.Row) (d string, isNull bool, default: d = trimLeft(str, remstr) d = trimRight(d, remstr) - } return d, false, nil } diff --git a/expression/builtin_string_test.go b/expression/builtin_string_test.go index a80d0dcda2a44..38d2724093c2a 100644 --- a/expression/builtin_string_test.go +++ b/expression/builtin_string_test.go @@ -2517,7 +2517,6 @@ func TestToBase64Sig(t *testing.T) { lastWarn := warnings[len(warnings)-1] require.True(t, terror.ErrorEqual(errWarnAllowedPacketOverflowed, lastWarn.Err)) ctx.GetSessionVars().StmtCtx.SetWarnings([]stmtctx.SQLWarn{}) - } else { require.False(t, isNull) } diff --git a/expression/builtin_string_vec.go b/expression/builtin_string_vec.go index 844b9e598bf61..50671ccfa9232 100644 --- a/expression/builtin_string_vec.go +++ b/expression/builtin_string_vec.go @@ -33,6 +33,7 @@ import ( "github.com/pingcap/tidb/util/collate" ) +//revive:disable:defer func (b *builtinLowerSig) vecEvalString(input *chunk.Chunk, result *chunk.Column) error { // if error is not nil return error, or builtinLowerSig is for binary strings (do nothing) return b.args[0].VecEvalString(b.ctx, input, result) diff --git a/expression/builtin_time.go b/expression/builtin_time.go index c54e0aecd79a6..6ed67939220e0 100644 --- a/expression/builtin_time.go +++ b/expression/builtin_time.go @@ -3169,7 +3169,6 @@ func (du *baseDateArithmetical) vecGetDateFromString(b *baseBuiltinFunc, input * } else { dates[i] = date } - } return nil } @@ -6085,7 +6084,6 @@ func (c *timestampAddFunctionClass) getFunction(ctx sessionctx.Context, args []E sig := &builtinTimestampAddSig{bf} sig.setPbCode(tipb.ScalarFuncSig_TimestampAdd) return sig, nil - } type builtinTimestampAddSig struct { diff --git a/expression/builtin_vectorized_test.go b/expression/builtin_vectorized_test.go index 6ba1803337e93..e512dcc4d2f3c 100644 --- a/expression/builtin_vectorized_test.go +++ b/expression/builtin_vectorized_test.go @@ -928,7 +928,6 @@ func BenchmarkFloat32ColRow(b *testing.B) { if _, _, err := col.EvalReal(ctx, row); err != nil { b.Fatal(err) } - } } } diff --git a/expression/constant_propagation.go b/expression/constant_propagation.go index c8d625c7b8be1..55f56702b8209 100644 --- a/expression/constant_propagation.go +++ b/expression/constant_propagation.go @@ -499,7 +499,6 @@ func (s *propOuterJoinConstSolver) validColEqualCond(cond Expression) (*Column, } } return nil, nil - } // deriveConds given `outerCol = innerCol`, derive new expression for specified conditions. diff --git a/expression/constant_propagation_test.go b/expression/constant_propagation_test.go index 6ff6d31a6faad..25fc6233da202 100644 --- a/expression/constant_propagation_test.go +++ b/expression/constant_propagation_test.go @@ -39,7 +39,7 @@ func TestOuterJoinPropConst(t *testing.T) { } expressionSuiteData := expression.GetExpressionSuiteData() - expressionSuiteData.GetTestCases(t, &input, &output) + expressionSuiteData.LoadTestCases(t, &input, &output) for i, tt := range input { testdata.OnRecord(func() { output[i].SQL = tt diff --git a/expression/expr_to_pb_test.go b/expression/expr_to_pb_test.go index b8aa3bf2f3aba..16bdfa38723bd 100644 --- a/expression/expr_to_pb_test.go +++ b/expression/expr_to_pb_test.go @@ -1650,7 +1650,6 @@ func TestPushDownSwitcher(t *testing.T) { } func TestPanicIfPbCodeUnspecified(t *testing.T) { - args := []Expression{genColumn(mysql.TypeLong, 1), genColumn(mysql.TypeLong, 2)} fc, err := NewFunction( mock.NewContext(), diff --git a/expression/expression.go b/expression/expression.go index d23897dddd7c3..52a94c6056ec4 100644 --- a/expression/expression.go +++ b/expression/expression.go @@ -263,7 +263,6 @@ func EvalBool(ctx sessionctx.Context, exprList CNFExprs, row chunk.Row) (bool, b i, err = HandleOverflowOnSelection(ctx.GetSessionVars().StmtCtx, i, err) if err != nil { return false, false, err - } } if i == 0 { @@ -766,6 +765,7 @@ type VarAssignment struct { // splitNormalFormItems split CNF(conjunctive normal form) like "a and b and c", or DNF(disjunctive normal form) like "a or b or c" func splitNormalFormItems(onExpr Expression, funcName string) []Expression { + //nolint: revive switch v := onExpr.(type) { case *ScalarFunction: if v.FuncName.L == funcName { diff --git a/expression/flag_simplify_test.go b/expression/flag_simplify_test.go index c02cb7a1fa037..686e465db7b54 100644 --- a/expression/flag_simplify_test.go +++ b/expression/flag_simplify_test.go @@ -37,7 +37,7 @@ func TestSimplifyExpressionByFlag(t *testing.T) { Plan []string } flagSimplifyData := expression.GetFlagSimplifyData() - flagSimplifyData.GetTestCases(t, &input, &output) + flagSimplifyData.LoadTestCases(t, &input, &output) for i, tt := range input { testdata.OnRecord(func() { output[i].SQL = tt diff --git a/expression/integration_test.go b/expression/integration_test.go index 8b2aa0139347f..2061aab4be26b 100644 --- a/expression/integration_test.go +++ b/expression/integration_test.go @@ -467,7 +467,6 @@ func TestMiscellaneousBuiltin(t *testing.T) { result = tk.MustQuery(`SELECT RELEASE_LOCK('test_lock3');`) // not acquired result.Check(testkit.Rows("0")) tk.MustQuery(`SELECT RELEASE_ALL_LOCKS()`).Check(testkit.Rows("0")) // none acquired - } func TestConvertToBit(t *testing.T) { @@ -4143,7 +4142,6 @@ func TestNotExistFunc(t *testing.T) { tk.MustExec("use test") _, err = tk.Exec("SELECT timestampliteral(rand())") require.Error(t, err, "[expression:1305]FUNCTION test.timestampliteral does not exist") - } func TestDecodetoChunkReuse(t *testing.T) { @@ -5010,7 +5008,6 @@ func TestIssue18525(t *testing.T) { tk.MustExec("insert into t1 values ('l', NULL, '1000-01-04')") tk.MustExec("insert into t1 values ('b', NULL, '1000-01-02')") tk.MustQuery("select INTERVAL( ( CONVERT( -11752 USING utf8 ) ), 6558853612195285496, `col1`) from t1").Check(testkit.Rows("0", "0", "0")) - } func TestSchemaDMLNotChange(t *testing.T) { @@ -5907,7 +5904,6 @@ func TestJiraSetInnoDBDefaultRowFormat(t *testing.T) { tk.MustQuery("SHOW VARIABLES LIKE 'character_set_server'").Check(testkit.Rows("character_set_server utf8mb4")) tk.MustQuery("SHOW VARIABLES LIKE 'innodb_file_format'").Check(testkit.Rows("innodb_file_format Barracuda")) tk.MustQuery("SHOW VARIABLES LIKE 'innodb_large_prefix'").Check(testkit.Rows("innodb_large_prefix ON")) - } func TestIssue23623(t *testing.T) { @@ -6389,7 +6385,6 @@ OR Variable_name = 'time_zone' OR Variable_name = 'system_time_zone' OR Variable_name = 'lower_case_table_names' OR Variable_name = 'max_allowed_packet' OR Variable_name = 'net_buffer_length' OR Variable_name = 'sql_mode' OR Variable_name = 'query_cache_type' OR Variable_name = 'query_cache_size' OR Variable_name = 'license' OR Variable_name = 'init_connect'`).Rows(), 19) - } func TestBuiltinFuncJSONMergePatch_InColumn(t *testing.T) { diff --git a/expression/partition_pruner.go b/expression/partition_pruner.go index 472ff7ea8accc..f673a6df67cdb 100644 --- a/expression/partition_pruner.go +++ b/expression/partition_pruner.go @@ -39,7 +39,7 @@ func (p *hashPartitionPruner) getColID(col *Column) int { func (p *hashPartitionPruner) insertCol(col *Column) { _, ok := p.colMapper[col.UniqueID] if !ok { - p.numColumn += 1 + p.numColumn++ p.colMapper[col.UniqueID] = len(p.colMapper) } } diff --git a/expression/util.go b/expression/util.go index f3bddcc0e8e52..bd6ba4a17ad08 100644 --- a/expression/util.go +++ b/expression/util.go @@ -560,7 +560,7 @@ func locateStringWithCollation(str, substr, coll string) int64 { count := int64(0) for { r, size := utf8.DecodeRuneInString(str) - count += 1 + count++ index -= len(collator.KeyWithoutTrimRightSpace(string(r))) if index <= 0 { return count + 1 diff --git a/go.mod b/go.mod index a1f88378152f1..8c29ac0730e43 100644 --- a/go.mod +++ b/go.mod @@ -54,13 +54,13 @@ require ( github.com/prometheus/client_golang v1.12.2 github.com/prometheus/client_model v0.2.0 github.com/prometheus/common v0.32.1 - github.com/shirou/gopsutil/v3 v3.22.4 + github.com/shirou/gopsutil/v3 v3.22.6 github.com/shurcooL/httpgzip v0.0.0-20190720172056-320755c1c1b0 github.com/shurcooL/vfsgen v0.0.0-20200824052919-0d455de96546 // indirect github.com/soheilhy/cmux v0.1.5 - github.com/spf13/cobra v1.4.0 + github.com/spf13/cobra v1.5.0 github.com/spf13/pflag v1.0.5 - github.com/stretchr/testify v1.7.2-0.20220504104629-106ec21d14df + github.com/stretchr/testify v1.8.0 github.com/tiancaiamao/appdash v0.0.0-20181126055449-889f96f722a2 github.com/tikv/client-go/v2 v2.0.1-0.20220729034404-e10841f2d158 github.com/tikv/pd/client v0.0.0-20220725055910-7187a7ab72db @@ -79,15 +79,15 @@ require ( go.uber.org/goleak v1.1.12 go.uber.org/multierr v1.8.0 go.uber.org/zap v1.21.0 - golang.org/x/exp v0.0.0-20220428152302-39d4317da171 - golang.org/x/net v0.0.0-20220412020605-290c469a71a5 + golang.org/x/exp v0.0.0-20220722155223-a9213eeb770e + golang.org/x/net v0.0.0-20220722155237-a158d28d115b golang.org/x/oauth2 v0.0.0-20220411215720-9780585627b5 - golang.org/x/sync v0.0.0-20220513210516-0976fa681c29 - golang.org/x/sys v0.0.0-20220622161953-175b2fd9d664 + golang.org/x/sync v0.0.0-20220722155255-886fb9371eb4 + golang.org/x/sys v0.0.0-20220728004956-3c1f35247d10 golang.org/x/term v0.0.0-20210927222741-03fcf44c2211 golang.org/x/text v0.3.7 golang.org/x/time v0.0.0-20220224211638-0e9765cccd65 - golang.org/x/tools v0.1.11 + golang.org/x/tools v0.1.12 google.golang.org/api v0.74.0 google.golang.org/grpc v1.45.0 gopkg.in/yaml.v2 v2.4.0 @@ -98,9 +98,9 @@ require ( require ( github.com/aliyun/alibaba-cloud-sdk-go v1.61.1581 github.com/charithe/durationcheck v0.0.9 - github.com/daixiang0/gci v0.3.4 + github.com/daixiang0/gci v0.4.3 github.com/golangci/gofmt v0.0.0-20190930125516-244bba706f1a - github.com/golangci/golangci-lint v1.46.2 + github.com/golangci/golangci-lint v1.47.2 github.com/golangci/gosec v0.0.0-20180901114220-8afd9cbb6cfb github.com/golangci/misspell v0.3.5 github.com/golangci/unconvert v0.0.0-20180507085042-28b1c447d1f4 @@ -111,7 +111,7 @@ require ( github.com/nishanths/predeclared v0.2.2 github.com/prometheus/prometheus v0.0.0-20190525122359-d20e84d0fb64 github.com/tdakkota/asciicheck v0.1.1 - honnef.co/go/tools v0.3.1 + honnef.co/go/tools v0.3.2 ) require ( @@ -164,7 +164,7 @@ require ( github.com/golang/glog v1.0.0 // indirect github.com/golang/groupcache v0.0.0-20210331224755-41bb18bfe9da // indirect github.com/golangci/prealloc v0.0.0-20180630174525-215b22d4de21 - github.com/google/go-cmp v0.5.7 // indirect + github.com/google/go-cmp v0.5.8 // indirect github.com/gorilla/handlers v1.5.1 // indirect github.com/gorilla/websocket v1.4.2 // indirect github.com/grpc-ecosystem/go-grpc-prometheus v1.2.0 // indirect @@ -226,13 +226,13 @@ require ( go.opentelemetry.io/otel/sdk/metric v0.20.0 // indirect go.opentelemetry.io/otel/trace v0.20.0 // indirect go.opentelemetry.io/proto/otlp v0.7.0 // indirect - golang.org/x/exp/typeparams v0.0.0-20220218215828-6cf2b201936e // indirect + golang.org/x/exp/typeparams v0.0.0-20220613132600-b0d781184e0d // indirect golang.org/x/mod v0.6.0-dev.0.20220419223038-86c51ed26bb4 // indirect golang.org/x/xerrors v0.0.0-20220411194840-2f41105eb62f // indirect google.golang.org/appengine v1.6.7 // indirect google.golang.org/protobuf v1.28.0 // indirect gopkg.in/natefinch/lumberjack.v2 v2.0.0 // indirect - gopkg.in/yaml.v3 v3.0.0-20210107192922-496545a6307b // indirect + gopkg.in/yaml.v3 v3.0.1 // indirect sigs.k8s.io/yaml v1.2.0 // indirect ) diff --git a/go.sum b/go.sum index 7698c659bf9f3..631c0e76db772 100644 --- a/go.sum +++ b/go.sum @@ -211,13 +211,13 @@ github.com/coreos/go-systemd/v22 v22.3.2/go.mod h1:Y58oyj3AT4RCenI/lSvhwexgC+NSV github.com/coreos/pkg v0.0.0-20180928190104-399ea9e2e55f/go.mod h1:E3G3o1h8I7cfcXa63jLwjI0eiQQMgzzUDFVpN/nH/eA= github.com/cpuguy83/go-md2man v1.0.10/go.mod h1:SmD6nW6nTyfqj6ABTjUi3V3JVMnlJmwcJI5acqYI6dE= github.com/cpuguy83/go-md2man/v2 v2.0.0/go.mod h1:maD7wRr/U5Z6m/iR4s+kqSMx2CaBsrgA7czyZG/E6dU= -github.com/cpuguy83/go-md2man/v2 v2.0.1/go.mod h1:tgQtvFlXSQOSOSIRvRPT7W67SCa46tRHOmNcaadrF8o= +github.com/cpuguy83/go-md2man/v2 v2.0.2/go.mod h1:tgQtvFlXSQOSOSIRvRPT7W67SCa46tRHOmNcaadrF8o= github.com/creack/pty v1.1.9/go.mod h1:oKZEueFk5CKHvIhNR5MUki03XCEU+Q6VDXinZuGJ33E= github.com/creack/pty v1.1.11/go.mod h1:oKZEueFk5CKHvIhNR5MUki03XCEU+Q6VDXinZuGJ33E= github.com/cznic/mathutil v0.0.0-20181122101859-297441e03548 h1:iwZdTE0PVqJCos1vaoKsclOGD3ADKpshg3SRtYBbwso= github.com/cznic/mathutil v0.0.0-20181122101859-297441e03548/go.mod h1:e6NPNENfs9mPDVNRekM7lKScauxd5kXTr1Mfyig6TDM= -github.com/daixiang0/gci v0.3.4 h1:+EZ83znNs73C9ZBTM7xhNagMP6gJs5wlptiFiuce5BM= -github.com/daixiang0/gci v0.3.4/go.mod h1:pB1j339Q+2sv/EyKd4dgvGXcaBGIErim+dlhLDtqeW4= +github.com/daixiang0/gci v0.4.3 h1:wf7x0xRjQqTlA2dzHTI0A/xPyp7VcBatBG9nwGatwbQ= +github.com/daixiang0/gci v0.4.3/go.mod h1:EpVfrztufwVgQRXjnX4zuNinEpLj5OmMjtu/+MB0V0c= github.com/danjacques/gofslock v0.0.0-20191023191349-0a45f885bc37 h1:X6mKGhCFOxrKeeHAjv/3UvT6e5RRxW6wRdlqlV6/H4w= github.com/danjacques/gofslock v0.0.0-20191023191349-0a45f885bc37/go.mod h1:DC3JtzuG7kxMvJ6dZmf2ymjNyoXwgtklr7FN+Um2B0U= github.com/davecgh/go-spew v1.1.0/go.mod h1:J7Y8YcW2NihsgmVo/mv3lAwl/skON4iLHjSsI+c5H38= @@ -382,8 +382,8 @@ github.com/golang/snappy v0.0.4 h1:yAGX7huGHXlcLOEtBnF4w7FQwA26wojNCwOYAEhLjQM= github.com/golang/snappy v0.0.4/go.mod h1:/XxbfmMg8lxefKM7IXC3fBNl/7bRcc72aCRzEWrmP2Q= github.com/golangci/gofmt v0.0.0-20190930125516-244bba706f1a h1:iR3fYXUjHCR97qWS8ch1y9zPNsgXThGwjKPrYfqMPks= github.com/golangci/gofmt v0.0.0-20190930125516-244bba706f1a/go.mod h1:9qCChq59u/eW8im404Q2WWTrnBUQKjpNYKMbU4M7EFU= -github.com/golangci/golangci-lint v1.46.2 h1:o90t/Xa6dhJbvy8Bz2RpzUXqrkigp19DLStMolTZbyo= -github.com/golangci/golangci-lint v1.46.2/go.mod h1:3DkdHnxn9eoTTrpT2gB0TEv8KSziuoqe9FitgQLHvAY= +github.com/golangci/golangci-lint v1.47.2 h1:qvMDVv49Hrx3PSEXZ0bD/yhwSbhsOihQjFYCKieegIw= +github.com/golangci/golangci-lint v1.47.2/go.mod h1:lpS2pjBZtRyXewUcOY7yUL3K4KfpoWz072yRN8AuhHg= github.com/golangci/gosec v0.0.0-20180901114220-8afd9cbb6cfb h1:Bi7BYmZVg4C+mKGi8LeohcP2GGUl2XJD4xCkJoZSaYc= github.com/golangci/gosec v0.0.0-20180901114220-8afd9cbb6cfb/go.mod h1:ON/c2UR0VAAv6ZEAFKhjCLplESSmRFfZcDLASbI1GWo= github.com/golangci/misspell v0.3.5 h1:pLzmVdl3VxTOncgzHcvLOKirdvcx/TydsClUQXTehjo= @@ -410,8 +410,9 @@ github.com/google/go-cmp v0.5.3/go.mod h1:v8dTdLbMG2kIc/vJvl+f65V22dbkXbowE6jgT/ github.com/google/go-cmp v0.5.4/go.mod h1:v8dTdLbMG2kIc/vJvl+f65V22dbkXbowE6jgT/gNBxE= github.com/google/go-cmp v0.5.5/go.mod h1:v8dTdLbMG2kIc/vJvl+f65V22dbkXbowE6jgT/gNBxE= github.com/google/go-cmp v0.5.6/go.mod h1:v8dTdLbMG2kIc/vJvl+f65V22dbkXbowE6jgT/gNBxE= -github.com/google/go-cmp v0.5.7 h1:81/ik6ipDQS2aGcBfIN5dHDB36BwrStyeAQquSYCV4o= github.com/google/go-cmp v0.5.7/go.mod h1:n+brtR0CgQNWTVd5ZUFpTBC8YFBDLK/h/bpaJ8/DtOE= +github.com/google/go-cmp v0.5.8 h1:e6P7q2lk1O+qJJb4BtCQXlK8vWEO8V1ZeuEdJNOqZyg= +github.com/google/go-cmp v0.5.8/go.mod h1:17dUlkBOakJ0+DkrSSNjCkIjxS6bF9zb3elmeNGIjoY= github.com/google/go-querystring v1.0.0/go.mod h1:odCYkC5MyYFN7vkCjXpyrEuKhc/BUO6wN/zVPAxq5ck= github.com/google/gofuzz v0.0.0-20170612174753-24818f796faf/go.mod h1:HP5RmnzzSNb993RKQDq4+1A4ia9nllfqcQFTQJedwGI= github.com/google/gofuzz v1.0.0/go.mod h1:dBl0BpW6vV/+mYPU4Po3pmUjxk6FQPldtuIdl/M65Eg= @@ -821,8 +822,8 @@ github.com/sclevine/agouti v3.0.0+incompatible/go.mod h1:b4WX9W9L1sfQKXeJf1mUTLZ github.com/sean-/seed v0.0.0-20170313163322-e2103e2c3529/go.mod h1:DxrIzT+xaE7yg65j358z/aeFdxmN0P9QXhEzd20vsDc= github.com/sergi/go-diff v1.1.0/go.mod h1:STckp+ISIX8hZLjrqAeVduY0gWCT9IjLuqbuNXdaHfM= github.com/shirou/gopsutil/v3 v3.21.12/go.mod h1:BToYZVTlSVlfazpDDYFnsVZLaoRG+g8ufT6fPQLdJzA= -github.com/shirou/gopsutil/v3 v3.22.4 h1:srAQaiX6jX/cYL6q29aE0m8lOskT9CurZ9N61YR3yoI= -github.com/shirou/gopsutil/v3 v3.22.4/go.mod h1:D01hZJ4pVHPpCTZ3m3T2+wDF2YAGfd+H4ifUguaQzHM= +github.com/shirou/gopsutil/v3 v3.22.6 h1:FnHOFOh+cYAM0C30P+zysPISzlknLC5Z1G4EAElznfQ= +github.com/shirou/gopsutil/v3 v3.22.6/go.mod h1:EdIubSnZhbAvBS1yJ7Xi+AShB/hxwLHOMz4MCYz7yMs= github.com/shopspring/decimal v0.0.0-20180709203117-cd690d0c9e24/go.mod h1:M+9NzErvs504Cn4c5DxATwIqPbtswREoFCre64PpcG4= github.com/shurcooL/httpfs v0.0.0-20171119174359-809beceb2371/go.mod h1:ZY1cvUeJuFPAdZ/B6v7RHavJWZn2YPVFQ1OSXhCGOkg= github.com/shurcooL/httpfs v0.0.0-20190707220628-8d4bc4ba7749 h1:bUGsEnyNbVPw06Bs80sCeARAlK8lhwqGyi6UT8ymuGk= @@ -852,8 +853,8 @@ github.com/spf13/afero v1.2.2/go.mod h1:9ZxEEn6pIJ8Rxe320qSDBk6AsU0r9pR7Q4OcevTd github.com/spf13/cast v1.3.0/go.mod h1:Qx5cxh0v+4UWYiBimWS+eyWzqEqokIECu5etghLkUJE= github.com/spf13/cobra v0.0.5/go.mod h1:3K3wKZymM7VvHMDS9+Akkh4K60UwM26emMESw8tLCHU= github.com/spf13/cobra v1.1.3/go.mod h1:pGADOWyqRD/YMrPZigI/zbliZ2wVD/23d+is3pSWzOo= -github.com/spf13/cobra v1.4.0 h1:y+wJpx64xcgO1V+RcnwW0LEHxTKRi2ZDPSBjWnrg88Q= -github.com/spf13/cobra v1.4.0/go.mod h1:Wo4iy3BUC+X2Fybo0PDqwJIv3dNRiZLHQymsfxlB84g= +github.com/spf13/cobra v1.5.0 h1:X+jTBEBqF0bHN+9cSMgmfuvv2VHJ9ezmFNf9Y/XstYU= +github.com/spf13/cobra v1.5.0/go.mod h1:dWXEIy2H428czQCjInthrTRUg7yKbok+2Qi/yBIJoUM= github.com/spf13/jwalterweatherman v1.0.0/go.mod h1:cQK4TGJAtQXfYWX+Ddv3mKDzgVb68N+wFjFa4jdeBTo= github.com/spf13/pflag v1.0.3/go.mod h1:DYY7MBk1bdzusC3SYhjObp+wFpr4gzcvqqNjLnInEg4= github.com/spf13/pflag v1.0.5 h1:iy+VFUOCP1a+8yFto/drg2CJ5u0yRoB7fZw3DKv/JXA= @@ -864,6 +865,7 @@ github.com/stathat/consistent v1.0.0 h1:ZFJ1QTRn8npNBKW065raSZ8xfOqhpb8vLOkfp4Cc github.com/stathat/consistent v1.0.0/go.mod h1:uajTPbgSygZBJ+V+0mY7meZ8i0XAcZs7AQ6V121XSxw= github.com/stretchr/objx v0.1.0/go.mod h1:HFkY916IF+rwdDfMAkV7OtwuqBVzrE8GR6GFx+wExME= github.com/stretchr/objx v0.1.1/go.mod h1:HFkY916IF+rwdDfMAkV7OtwuqBVzrE8GR6GFx+wExME= +github.com/stretchr/objx v0.4.0/go.mod h1:YvHI0jy2hoMjB+UWwv71VJQ9isScKT/TqJzVSSt89Yw= github.com/stretchr/testify v1.1.4/go.mod h1:a8OnRcib4nhh0OaRAV+Yts87kKdq0PP7pXfy6kDkUVs= github.com/stretchr/testify v1.2.2/go.mod h1:a8OnRcib4nhh0OaRAV+Yts87kKdq0PP7pXfy6kDkUVs= github.com/stretchr/testify v1.3.0/go.mod h1:M5WIy9Dh21IEIfnGCwXGc5bZfKNJtfHm1UVUgZn+9EI= @@ -872,8 +874,9 @@ github.com/stretchr/testify v1.5.1/go.mod h1:5W2xD1RspED5o8YsWQXVCued0rvSQ+mT+I5 github.com/stretchr/testify v1.6.1/go.mod h1:6Fq8oRcR53rry900zMqJjRRixrwX3KX962/h/Wwjteg= github.com/stretchr/testify v1.7.0/go.mod h1:6Fq8oRcR53rry900zMqJjRRixrwX3KX962/h/Wwjteg= github.com/stretchr/testify v1.7.1/go.mod h1:6Fq8oRcR53rry900zMqJjRRixrwX3KX962/h/Wwjteg= -github.com/stretchr/testify v1.7.2-0.20220504104629-106ec21d14df h1:rh3VYpfvzXRbJ90ymx1yfhGl/wq8ac2m/cUbao61kwY= -github.com/stretchr/testify v1.7.2-0.20220504104629-106ec21d14df/go.mod h1:6Fq8oRcR53rry900zMqJjRRixrwX3KX962/h/Wwjteg= +github.com/stretchr/testify v1.7.5/go.mod h1:yNjHg4UonilssWZ8iaSj1OCr/vHnekPRkoO+kdMU+MU= +github.com/stretchr/testify v1.8.0 h1:pSgiaMZlXftHpm5L7V1+rVB+AZJydKsMxsQBIJw4PKk= +github.com/stretchr/testify v1.8.0/go.mod h1:yNjHg4UonilssWZ8iaSj1OCr/vHnekPRkoO+kdMU+MU= github.com/subosito/gotenv v1.2.0/go.mod h1:N0PQaV/YGNqwC0u51sEeR/aUtSLEXKX9iv69rRypqCw= github.com/tdakkota/asciicheck v0.1.1 h1:PKzG7JUTUmVspQTDqtkX9eSiLGossXTybutHwTXuO0A= github.com/tdakkota/asciicheck v0.1.1/go.mod h1:yHp0ai0Z9gUljN3o0xMhYJnH/IcvkdTBOX2fmJ93JEM= @@ -1046,10 +1049,11 @@ golang.org/x/exp v0.0.0-20200119233911-0405dc783f0a/go.mod h1:2RIsYlXP63K8oxa1u0 golang.org/x/exp v0.0.0-20200207192155-f17229e696bd/go.mod h1:J/WKrq2StrnmMY6+EHIKF9dgMWnmCNThgcyBT1FY9mM= golang.org/x/exp v0.0.0-20200224162631-6cc2880d07d6/go.mod h1:3jZMyOhIsHpP37uCMkUooju7aAi5cS1Q23tOzKc+0MU= golang.org/x/exp v0.0.0-20200513190911-00229845015e/go.mod h1:4M0jN8W1tt0AVLNr8HDosyJCDCDuyL9N9+3m7wDWgKw= -golang.org/x/exp v0.0.0-20220428152302-39d4317da171 h1:TfdoLivD44QwvssI9Sv1xwa5DcL5XQr4au4sZ2F2NV4= -golang.org/x/exp v0.0.0-20220428152302-39d4317da171/go.mod h1:lgLbSvA5ygNOMpwM/9anMpWVlVJ7Z+cHWq/eFuinpGE= -golang.org/x/exp/typeparams v0.0.0-20220218215828-6cf2b201936e h1:qyrTQ++p1afMkO4DPEeLGq/3oTsdlvdH4vqZUBWzUKM= +golang.org/x/exp v0.0.0-20220722155223-a9213eeb770e h1:+WEEuIdZHnUeJJmEUjyYC2gfUMj69yZXw17EnHg/otA= +golang.org/x/exp v0.0.0-20220722155223-a9213eeb770e/go.mod h1:Kr81I6Kryrl9sr8s2FK3vxD90NdsKWRuOIl2O4CvYbA= golang.org/x/exp/typeparams v0.0.0-20220218215828-6cf2b201936e/go.mod h1:AbB0pIl9nAr9wVwH+Z2ZpaocVmF5I4GyWCDIsVjR0bk= +golang.org/x/exp/typeparams v0.0.0-20220613132600-b0d781184e0d h1:+W8Qf4iJtMGKkyAygcKohjxTk4JPsL9DpzApJ22m5Ic= +golang.org/x/exp/typeparams v0.0.0-20220613132600-b0d781184e0d/go.mod h1:AbB0pIl9nAr9wVwH+Z2ZpaocVmF5I4GyWCDIsVjR0bk= golang.org/x/image v0.0.0-20180708004352-c73c2afc3b81/go.mod h1:ux5Hcp/YLpHSI86hEcLt0YII63i6oz57MZXIpbrjZUs= golang.org/x/image v0.0.0-20190227222117-0694c2d4d067/go.mod h1:kZ7UVZpmo3dzQBMxlp+ypCbDeSB+sBbTgSJuh5dn5js= golang.org/x/image v0.0.0-20190802002840-cff245a6509b/go.mod h1:FeLwcggjj3mMvU+oOTbSwawSJRM1uh48EjtB4UJZlP0= @@ -1137,8 +1141,8 @@ golang.org/x/net v0.0.0-20211015210444-4f30a5c0130f/go.mod h1:9nx3DQGgdP8bBQD5qx golang.org/x/net v0.0.0-20220127200216-cd36cc0744dd/go.mod h1:CfG3xpIq0wQ8r1q4Su4UZFWDARRcnwPjda9FqA0JpMk= golang.org/x/net v0.0.0-20220225172249-27dd8689420f/go.mod h1:CfG3xpIq0wQ8r1q4Su4UZFWDARRcnwPjda9FqA0JpMk= golang.org/x/net v0.0.0-20220325170049-de3da57026de/go.mod h1:CfG3xpIq0wQ8r1q4Su4UZFWDARRcnwPjda9FqA0JpMk= -golang.org/x/net v0.0.0-20220412020605-290c469a71a5 h1:bRb386wvrE+oBNdF1d/Xh9mQrfQ4ecYhW5qJ5GvTGT4= -golang.org/x/net v0.0.0-20220412020605-290c469a71a5/go.mod h1:CfG3xpIq0wQ8r1q4Su4UZFWDARRcnwPjda9FqA0JpMk= +golang.org/x/net v0.0.0-20220722155237-a158d28d115b h1:PxfKdU9lEEDYjdIzOtC4qFWgkU2rGHdKlKowJSMN9h0= +golang.org/x/net v0.0.0-20220722155237-a158d28d115b/go.mod h1:XRhObCWvk6IyKnWLug+ECip1KBveYUHfp+8e9klMJ9c= golang.org/x/oauth2 v0.0.0-20180821212333-d2e6202438be/go.mod h1:N/0e6XlmueqKjAGxoOufVs8QHGRruUQn6yWY3a++T0U= golang.org/x/oauth2 v0.0.0-20190226205417-e64efc72b421/go.mod h1:gOpvHmFTYa4IltrdGE7lF6nIHvwfUNPOp7c8zoXwtLw= golang.org/x/oauth2 v0.0.0-20190604053449-0f29369cfe45/go.mod h1:gOpvHmFTYa4IltrdGE7lF6nIHvwfUNPOp7c8zoXwtLw= @@ -1170,8 +1174,8 @@ golang.org/x/sync v0.0.0-20200625203802-6e8e738ad208/go.mod h1:RxMgew5VJxzue5/jJ golang.org/x/sync v0.0.0-20201020160332-67f06af15bc9/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM= golang.org/x/sync v0.0.0-20201207232520-09787c993a3a/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM= golang.org/x/sync v0.0.0-20210220032951-036812b2e83c/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM= -golang.org/x/sync v0.0.0-20220513210516-0976fa681c29 h1:w8s32wxx3sY+OjLlv9qltkLU5yvJzxjjgiHWLjdIcw4= -golang.org/x/sync v0.0.0-20220513210516-0976fa681c29/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM= +golang.org/x/sync v0.0.0-20220722155255-886fb9371eb4 h1:uVc8UZUe6tr40fFVnUP5Oj+veunVezqYl9z7DYw9xzw= +golang.org/x/sync v0.0.0-20220722155255-886fb9371eb4/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM= golang.org/x/sys v0.0.0-20180816055513-1c9583448a9c/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY= golang.org/x/sys v0.0.0-20180823144017-11551d06cbcc/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY= golang.org/x/sys v0.0.0-20180830151530-49385e6e1522/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY= @@ -1261,8 +1265,9 @@ golang.org/x/sys v0.0.0-20220128215802-99c3d69c2c27/go.mod h1:oPkhp1MJrh7nUepCBc golang.org/x/sys v0.0.0-20220209214540-3681064d5158/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= golang.org/x/sys v0.0.0-20220227234510-4e6760a101f9/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= golang.org/x/sys v0.0.0-20220328115105-d36c6a25d886/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= -golang.org/x/sys v0.0.0-20220622161953-175b2fd9d664 h1:wEZYwx+kK+KlZ0hpvP2Ls1Xr4+RWnlzGFwPP0aiDjIU= -golang.org/x/sys v0.0.0-20220622161953-175b2fd9d664/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= +golang.org/x/sys v0.0.0-20220520151302-bc2c85ada10a/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= +golang.org/x/sys v0.0.0-20220728004956-3c1f35247d10 h1:WIoqL4EROvwiPdUtaip4VcDdpZ4kha7wBWZrbVKCIZg= +golang.org/x/sys v0.0.0-20220728004956-3c1f35247d10/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= golang.org/x/term v0.0.0-20201126162022-7de9c90e9dd1/go.mod h1:bj7SfCRtBDWHUb9snDiAeCFNEtKQo2Wmx5Cou7ajbmo= golang.org/x/term v0.0.0-20210927222741-03fcf44c2211 h1:JGgROgKl9N8DuW20oFS5gxc+lE67/N3FcwmBPMe7ArY= golang.org/x/term v0.0.0-20210927222741-03fcf44c2211/go.mod h1:jbD1KX2456YbFQfuXm/mYQcufACuNUgVhRMnK/tPxf8= @@ -1352,8 +1357,8 @@ golang.org/x/tools v0.1.4/go.mod h1:o0xws9oXOQQZyjljx8fwUC0k7L1pTE6eaCbjGeHmOkk= golang.org/x/tools v0.1.5/go.mod h1:o0xws9oXOQQZyjljx8fwUC0k7L1pTE6eaCbjGeHmOkk= golang.org/x/tools v0.1.10/go.mod h1:Uh6Zz+xoGYZom868N8YTex3t7RhtHDBrE8Gzo9bV56E= golang.org/x/tools v0.1.11-0.20220513221640-090b14e8501f/go.mod h1:SgwaegtQh8clINPpECJMqnxLv9I09HLqnW3RMqW0CA4= -golang.org/x/tools v0.1.11 h1:loJ25fNOEhSXfHrpoGj91eCUThwdNX6u24rO1xnNteY= -golang.org/x/tools v0.1.11/go.mod h1:SgwaegtQh8clINPpECJMqnxLv9I09HLqnW3RMqW0CA4= +golang.org/x/tools v0.1.12 h1:VveCTK38A2rkS8ZqFY25HIDFscX5X9OoEhJd3quQmXU= +golang.org/x/tools v0.1.12/go.mod h1:hNGJHUnrk76NpqgfD5Aqm5Crs+Hm0VOH/i9J2+nxYbc= golang.org/x/xerrors v0.0.0-20190717185122-a985d3407aa7/go.mod h1:I/5z698sn9Ka8TeJc9MKroUUfqBBauWjQqLJ2OPfmY0= golang.org/x/xerrors v0.0.0-20191011141410-1b5146add898/go.mod h1:I/5z698sn9Ka8TeJc9MKroUUfqBBauWjQqLJ2OPfmY0= golang.org/x/xerrors v0.0.0-20191204190536-9bdfabe68543/go.mod h1:I/5z698sn9Ka8TeJc9MKroUUfqBBauWjQqLJ2OPfmY0= @@ -1579,8 +1584,9 @@ gopkg.in/yaml.v2 v2.3.0/go.mod h1:hI93XBmqTisBFMUTm0b8Fm+jr3Dg1NNxqwp+5A1VGuI= gopkg.in/yaml.v2 v2.4.0 h1:D8xgwECY7CYvx+Y2n4sBz93Jn9JRvxdiyyo8CTfuKaY= gopkg.in/yaml.v2 v2.4.0/go.mod h1:RDklbk79AGWmwhnvt/jBztapEOGDOx6ZbXqjP6csGnQ= gopkg.in/yaml.v3 v3.0.0-20200313102051-9f266ea9e77c/go.mod h1:K4uyk7z7BCEPqu6E+C64Yfv1cQ7kz7rIZviUmN+EgEM= -gopkg.in/yaml.v3 v3.0.0-20210107192922-496545a6307b h1:h8qDotaEPuJATrMmW04NCwg7v22aHH28wwpauUhK9Oo= gopkg.in/yaml.v3 v3.0.0-20210107192922-496545a6307b/go.mod h1:K4uyk7z7BCEPqu6E+C64Yfv1cQ7kz7rIZviUmN+EgEM= +gopkg.in/yaml.v3 v3.0.1 h1:fxVm/GzAzEWqLHuvctI91KS9hhNmmWOoWu0XTYJS7CA= +gopkg.in/yaml.v3 v3.0.1/go.mod h1:K4uyk7z7BCEPqu6E+C64Yfv1cQ7kz7rIZviUmN+EgEM= honnef.co/go/tools v0.3.3 h1:oDx7VAwstgpYpb3wv0oxiZlxY+foCpRAwY7Vk6XpAgA= honnef.co/go/tools v0.3.3/go.mod h1:jzwdWgg7Jdq75wlfblQxO4neNaFFSvgc1tD5Wv8U0Yw= k8s.io/api v0.0.0-20190409021203-6e4e0e4f393b/go.mod h1:iuAfoD4hCxJ8Onx9kaTIt30j7jUFS00AXQi6QMi99vA= diff --git a/infoschema/cache_test.go b/infoschema/cache_test.go index c229df8c8747c..83506bc4794d8 100644 --- a/infoschema/cache_test.go +++ b/infoschema/cache_test.go @@ -93,7 +93,6 @@ func TestInsert(t *testing.T) { require.Nil(t, ic.GetBySnapshotTS(2)) require.Nil(t, ic.GetBySnapshotTS(5)) require.Equal(t, is6, ic.GetBySnapshotTS(10)) - } func TestGetByVersion(t *testing.T) { diff --git a/infoschema/cluster_tables_test.go b/infoschema/cluster_tables_test.go index 551993b2a69d0..5e347480bbb22 100644 --- a/infoschema/cluster_tables_test.go +++ b/infoschema/cluster_tables_test.go @@ -178,7 +178,6 @@ func TestTestDataLockWaits(t *testing.T) { "6B657932 3 4 "+digest2.String()+" ", "6B657933 5 6 ", "6B657934 7 8 ")) - } func SubTestDataLockWaitsPrivilege(t *testing.T) { @@ -215,7 +214,6 @@ func SubTestDataLockWaitsPrivilege(t *testing.T) { Hostname: "localhost", }, nil, nil)) _ = tk.MustQuery("select * from information_schema.DATA_LOCK_WAITS") - } func TestSelectClusterTable(t *testing.T) { diff --git a/infoschema/tables_test.go b/infoschema/tables_test.go index 0a7d8187b867a..549652372bd2f 100644 --- a/infoschema/tables_test.go +++ b/infoschema/tables_test.go @@ -630,7 +630,6 @@ WHERE table_name = 'slow_query' and column_name = '` + columnName + `'`). //check select tk.MustQuery(`select ` + columnName + ` from information_schema.slow_query`).Check(testkit.Rows("1")) - } func TestReloadDropDatabase(t *testing.T) { diff --git a/parser/ast/ddl.go b/parser/ast/ddl.go index 52d489a89cf85..c93d09ae254f2 100644 --- a/parser/ast/ddl.go +++ b/parser/ast/ddl.go @@ -3311,7 +3311,6 @@ func (n *AlterTableStmt) HaveOnlyPlacementOptions() bool { for _, n := range n.Specs { if n.Tp != AlterTablePartitionOptions { return false - } if !n.IsAllPlacementRule() { return false diff --git a/parser/ast/dml.go b/parser/ast/dml.go index 93ee525d30bf5..99a76f2bc4409 100644 --- a/parser/ast/dml.go +++ b/parser/ast/dml.go @@ -542,7 +542,6 @@ func (n *TableSource) Restore(ctx *format.RestoreCtx) error { if err := tn.AsOf.Restore(ctx); err != nil { return errors.Annotate(err, "An error occurred while restore TableSource.AsOf") } - } if err := tn.restoreIndexHints(ctx); err != nil { return errors.Annotate(err, "An error occurred while restore TableSource.Source.(*TableName).IndexHints") @@ -979,7 +978,6 @@ func (s *TableSample) Restore(ctx *format.RestoreCtx) error { ctx.WriteKeyWord(" PERCENT") case SampleClauseUnitTypeRow: ctx.WriteKeyWord(" ROWS") - } ctx.WritePlain(")") if s.RepeatableSeed != nil { @@ -1181,7 +1179,7 @@ func (n *WithClause) Accept(v Visitor) (Node, bool) { // Restore implements Node interface. func (n *SelectStmt) Restore(ctx *format.RestoreCtx) error { if n.WithBeforeBraces { - defer ctx.RestoreCTEFunc()() + defer ctx.RestoreCTEFunc()() //nolint: all_revive err := n.With.Restore(ctx) if err != nil { return err @@ -1194,7 +1192,7 @@ func (n *SelectStmt) Restore(ctx *format.RestoreCtx) error { }() } if !n.WithBeforeBraces && n.With != nil { - defer ctx.RestoreCTEFunc()() + defer ctx.RestoreCTEFunc()() //nolint: all_revive err := n.With.Restore(ctx) if err != nil { return err @@ -1530,7 +1528,7 @@ type SetOprSelectList struct { // Restore implements Node interface. func (n *SetOprSelectList) Restore(ctx *format.RestoreCtx) error { if n.With != nil { - defer ctx.RestoreCTEFunc()() + defer ctx.RestoreCTEFunc()() //nolint: all_revive if err := n.With.Restore(ctx); err != nil { return errors.Annotate(err, "An error occurred while restore SetOprSelectList.With") } @@ -1631,7 +1629,7 @@ func (*SetOprStmt) resultSet() {} // Restore implements Node interface. func (n *SetOprStmt) Restore(ctx *format.RestoreCtx) error { if n.With != nil { - defer ctx.RestoreCTEFunc()() + defer ctx.RestoreCTEFunc()() //nolint: all_revive if err := n.With.Restore(ctx); err != nil { return errors.Annotate(err, "An error occurred while restore UnionStmt.With") } @@ -2213,7 +2211,7 @@ type DeleteStmt struct { // Restore implements Node interface. func (n *DeleteStmt) Restore(ctx *format.RestoreCtx) error { if n.With != nil { - defer ctx.RestoreCTEFunc()() + defer ctx.RestoreCTEFunc()() //nolint: all_revive err := n.With.Restore(ctx) if err != nil { return err @@ -2438,7 +2436,7 @@ type UpdateStmt struct { // Restore implements Node interface. func (n *UpdateStmt) Restore(ctx *format.RestoreCtx) error { if n.With != nil { - defer ctx.RestoreCTEFunc()() + defer ctx.RestoreCTEFunc()() //nolint: all_revive err := n.With.Restore(ctx) if err != nil { return err @@ -3418,7 +3416,6 @@ func (n *SplitRegionStmt) Restore(ctx *format.RestoreCtx) error { } if n.SplitSyntaxOpt.HasPartition { ctx.WriteKeyWord("PARTITION ") - } } ctx.WriteKeyWord("TABLE ") diff --git a/parser/ast/expressions.go b/parser/ast/expressions.go index 66f40eb205952..270c46218af61 100644 --- a/parser/ast/expressions.go +++ b/parser/ast/expressions.go @@ -909,7 +909,6 @@ func (n *PatternLikeExpr) Restore(ctx *format.RestoreCtx) error { if escape != "\\" { ctx.WriteKeyWord(" ESCAPE ") ctx.WriteString(escape) - } return nil } diff --git a/parser/ast/expressions_test.go b/parser/ast/expressions_test.go index 27dd16487d2e9..478db9ce0d78a 100644 --- a/parser/ast/expressions_test.go +++ b/parser/ast/expressions_test.go @@ -353,7 +353,6 @@ func TestPositionExprRestore(t *testing.T) { return node.(*SelectStmt).OrderBy.Items[0] } runNodeRestoreTest(t, testCases, "select * from t order by %s", extractNodeFunc) - } func TestExistsSubqueryExprRestore(t *testing.T) { diff --git a/parser/ast/misc.go b/parser/ast/misc.go index b5e05e99f2bed..1ebbea01334e2 100644 --- a/parser/ast/misc.go +++ b/parser/ast/misc.go @@ -1054,11 +1054,11 @@ func (n *SetConfigStmt) Accept(v Visitor) (Node, bool) { return v.Leave(newNode) } n = newNode.(*SetConfigStmt) - if node, ok := n.Value.Accept(v); !ok { + node, ok := n.Value.Accept(v) + if !ok { return n, false - } else { - n.Value = node.(ExprNode) } + n.Value = node.(ExprNode) return v.Leave(n) } diff --git a/parser/ast/misc_test.go b/parser/ast/misc_test.go index b7a5cac8bee02..2356fdb3beb4c 100644 --- a/parser/ast/misc_test.go +++ b/parser/ast/misc_test.go @@ -338,7 +338,6 @@ func TestBRIESecureText(t *testing.T) { n, ok := node.(ast.SensitiveStmtNode) require.True(t, ok, comment) require.Regexp(t, tc.secured, n.SecureText(), comment) - } } diff --git a/parser/ast/util.go b/parser/ast/util.go index 82f7285728221..6caae36a203bf 100644 --- a/parser/ast/util.go +++ b/parser/ast/util.go @@ -70,8 +70,7 @@ type readOnlyChecker struct { // Enter implements Visitor interface. func (checker *readOnlyChecker) Enter(in Node) (out Node, skipChildren bool) { - switch node := in.(type) { - case *VariableExpr: + if node, ok := in.(*VariableExpr); ok { // like func rewriteVariable(), this stands for SetVar. if !node.IsSystem && node.Value != nil { checker.readOnly = false diff --git a/parser/charset/charset_test.go b/parser/charset/charset_test.go index e09ad6b3c5bba..6de594c68c54d 100644 --- a/parser/charset/charset_test.go +++ b/parser/charset/charset_test.go @@ -84,7 +84,7 @@ func TestGetDefaultCollation(t *testing.T) { if collate.IsDefault { if desc, ok := CharacterSetInfos[collate.CharsetName]; ok { require.Equal(t, desc.DefaultCollation, collate.Name) - charsetNum += 1 + charsetNum++ } } } diff --git a/parser/goyacc/format_yacc.go b/parser/goyacc/format_yacc.go index 7ccff8da30900..ae752b51b9287 100644 --- a/parser/goyacc/format_yacc.go +++ b/parser/goyacc/format_yacc.go @@ -253,17 +253,16 @@ func printSingleName(f format.Formatter, name *parser.Name, maxCharLength int) e if strLit != nil && strLit.Token != nil { _, err := f.Format("%-*s %s\n", maxCharLength, name.Token.Val, strLit.Token.Val) return err - } else { - _, err := f.Format("%s\n", name.Token.Val) - return err } + _, err := f.Format("%s\n", name.Token.Val) + return err } type NameArr []*parser.Name -func (ns NameArr) span(pred func(*parser.Name) bool) (NameArr, NameArr) { - first := ns.takeWhile(pred) - second := ns[len(first):] +func (ns NameArr) span(pred func(*parser.Name) bool) (first NameArr, second NameArr) { + first = ns.takeWhile(pred) + second = ns[len(first):] return first, second } @@ -530,7 +529,7 @@ func (n *NotNilAssert) and(target interface{}) *NotNilAssert { if target == nil { n.err = errors.Errorf("encounter nil, index: %d", n.idx) } - n.idx += 1 + n.idx++ return n } diff --git a/parser/parser_test.go b/parser/parser_test.go index 57550a440a8cf..c2e949405dced 100644 --- a/parser/parser_test.go +++ b/parser/parser_test.go @@ -398,7 +398,6 @@ func RunRestoreTest(t *testing.T, sourceSQLs, expectSQLs string, enableWindowFun restoreSQLs += "; " } restoreSQLs += restoreSQL - } require.Equalf(t, expectSQLs, restoreSQLs, "restore %v; expect %v", restoreSQLs, expectSQLs) } @@ -5891,7 +5890,6 @@ func TestVisitFrameBound(t *testing.T) { require.Equal(t, tbl.exprRc, checker.exprRc) require.Equal(t, tbl.unit, checker.unit) } - } func TestFieldText(t *testing.T) { @@ -6156,7 +6154,7 @@ func (checker *nodeTextCleaner) Enter(in ast.Node) (out ast.Node, skipChildren b col.Tp.SetCollate(strings.ToUpper(col.Tp.GetCollate())) for i, option := range col.Options { - if option.Tp == 0 && option.Expr == nil && option.Stored == false && option.Refer == nil { + if option.Tp == 0 && option.Expr == nil && !option.Stored && option.Refer == nil { col.Options = append(col.Options[:i], col.Options[i+1:]...) } } diff --git a/parser/types/field_type_test.go b/parser/types/field_type_test.go index 6310dbb102bc9..5d458cb38b661 100644 --- a/parser/types/field_type_test.go +++ b/parser/types/field_type_test.go @@ -270,7 +270,6 @@ func TestEnumSetFlen(t *testing.T) { require.NoError(t, err) col := stmt.(*ast.CreateTableStmt).Cols[0] require.Equal(t, ca.ex, col.Tp.GetFlen()) - } } diff --git a/planner/cascades/integration_test.go b/planner/cascades/integration_test.go index 426a4d467fa3b..b41a992f156d7 100644 --- a/planner/cascades/integration_test.go +++ b/planner/cascades/integration_test.go @@ -55,7 +55,7 @@ func TestPKIsHandleRangeScan(t *testing.T) { Result []string } integrationSuiteData := cascades.GetIntegrationSuiteData() - integrationSuiteData.GetTestCases(t, &input, &output) + integrationSuiteData.LoadTestCases(t, &input, &output) for i, sql := range input { testdata.OnRecord(func() { output[i].SQL = sql @@ -85,7 +85,7 @@ func TestIndexScan(t *testing.T) { Result []string } integrationSuiteData := cascades.GetIntegrationSuiteData() - integrationSuiteData.GetTestCases(t, &input, &output) + integrationSuiteData.LoadTestCases(t, &input, &output) for i, sql := range input { testdata.OnRecord(func() { output[i].SQL = sql @@ -129,7 +129,7 @@ func TestSort(t *testing.T) { Result []string } integrationSuiteData := cascades.GetIntegrationSuiteData() - integrationSuiteData.GetTestCases(t, &input, &output) + integrationSuiteData.LoadTestCases(t, &input, &output) for i, sql := range input { testdata.OnRecord(func() { output[i].SQL = sql @@ -161,7 +161,7 @@ func TestAggregation(t *testing.T) { Result []string } integrationSuiteData := cascades.GetIntegrationSuiteData() - integrationSuiteData.GetTestCases(t, &input, &output) + integrationSuiteData.LoadTestCases(t, &input, &output) for i, sql := range input { testdata.OnRecord(func() { output[i].SQL = sql @@ -181,7 +181,7 @@ func TestPushdownDistinctEnable(t *testing.T) { Result []string } integrationSuiteData := cascades.GetIntegrationSuiteData() - integrationSuiteData.GetTestCases(t, &input, &output) + integrationSuiteData.LoadTestCases(t, &input, &output) vars := []string{ fmt.Sprintf("set @@session.%s = 1", variable.TiDBOptDistinctAggPushDown), } @@ -196,7 +196,7 @@ func TestPushdownDistinctDisable(t *testing.T) { Result []string } integrationSuiteData := cascades.GetIntegrationSuiteData() - integrationSuiteData.GetTestCases(t, &input, &output) + integrationSuiteData.LoadTestCases(t, &input, &output) vars := []string{ fmt.Sprintf("set @@session.%s = 0", variable.TiDBOptDistinctAggPushDown), } @@ -253,7 +253,7 @@ func TestSimplePlans(t *testing.T) { Result []string } integrationSuiteData := cascades.GetIntegrationSuiteData() - integrationSuiteData.GetTestCases(t, &input, &output) + integrationSuiteData.LoadTestCases(t, &input, &output) for i, sql := range input { testdata.OnRecord(func() { output[i].SQL = sql @@ -288,7 +288,7 @@ func TestJoin(t *testing.T) { Result []string } integrationSuiteData := cascades.GetIntegrationSuiteData() - integrationSuiteData.GetTestCases(t, &input, &output) + integrationSuiteData.LoadTestCases(t, &input, &output) for i, sql := range input { testdata.OnRecord(func() { output[i].SQL = sql @@ -319,7 +319,7 @@ func TestApply(t *testing.T) { Result []string } integrationSuiteData := cascades.GetIntegrationSuiteData() - integrationSuiteData.GetTestCases(t, &input, &output) + integrationSuiteData.LoadTestCases(t, &input, &output) for i, sql := range input { testdata.OnRecord(func() { output[i].SQL = sql @@ -345,7 +345,7 @@ func TestMemTableScan(t *testing.T) { Result []string } integrationSuiteData := cascades.GetIntegrationSuiteData() - integrationSuiteData.GetTestCases(t, &input, &output) + integrationSuiteData.LoadTestCases(t, &input, &output) for i, sql := range input { testdata.OnRecord(func() { output[i].SQL = sql @@ -374,7 +374,7 @@ func TestTopN(t *testing.T) { Result []string } integrationSuiteData := cascades.GetIntegrationSuiteData() - integrationSuiteData.GetTestCases(t, &input, &output) + integrationSuiteData.LoadTestCases(t, &input, &output) for i, sql := range input { testdata.OnRecord(func() { output[i].SQL = sql @@ -408,7 +408,7 @@ func TestCascadePlannerHashedPartTable(t *testing.T) { Result []string } integrationSuiteData := cascades.GetIntegrationSuiteData() - integrationSuiteData.GetTestCases(t, &input, &output) + integrationSuiteData.LoadTestCases(t, &input, &output) for i, sql := range input { testdata.OnRecord(func() { output[i].SQL = sql @@ -439,7 +439,7 @@ func TestInlineProjection(t *testing.T) { Result []string } integrationSuiteData := cascades.GetIntegrationSuiteData() - integrationSuiteData.GetTestCases(t, &input, &output) + integrationSuiteData.LoadTestCases(t, &input, &output) for i, sql := range input { testdata.OnRecord(func() { output[i].SQL = sql diff --git a/planner/cascades/stringer_test.go b/planner/cascades/stringer_test.go index bdc4abb8486b6..2c28c4cf8e52a 100644 --- a/planner/cascades/stringer_test.go +++ b/planner/cascades/stringer_test.go @@ -48,7 +48,7 @@ func TestGroupStringer(t *testing.T) { SQL string Result []string } - stringerSuiteData.GetTestCases(t, &input, &output) + stringerSuiteData.LoadTestCases(t, &input, &output) p := parser.New() ctx := plannercore.MockContext() diff --git a/planner/cascades/transformation_rules_test.go b/planner/cascades/transformation_rules_test.go index cfd25f42c1c38..a57100a253d92 100644 --- a/planner/cascades/transformation_rules_test.go +++ b/planner/cascades/transformation_rules_test.go @@ -80,7 +80,7 @@ func TestAggPushDownGather(t *testing.T) { SQL string Result []string } - transformationRulesSuiteData.GetTestCases(t, &input, &output) + transformationRulesSuiteData.LoadTestCases(t, &input, &output) p := parser.New() ctx := plannercore.MockContext() @@ -150,7 +150,7 @@ func TestPredicatePushDown(t *testing.T) { SQL string Result []string } - transformationRulesSuiteData.GetTestCases(t, &input, &output) + transformationRulesSuiteData.LoadTestCases(t, &input, &output) testGroupToString(t, input, output, optimizer) } @@ -189,7 +189,7 @@ func TestTopNRules(t *testing.T) { SQL string Result []string } - transformationRulesSuiteData.GetTestCases(t, &input, &output) + transformationRulesSuiteData.LoadTestCases(t, &input, &output) testGroupToString(t, input, output, optimizer) } @@ -209,7 +209,7 @@ func TestProjectionElimination(t *testing.T) { SQL string Result []string } - transformationRulesSuiteData.GetTestCases(t, &input, &output) + transformationRulesSuiteData.LoadTestCases(t, &input, &output) testGroupToString(t, input, output, optimizer) } @@ -228,7 +228,7 @@ func TestEliminateMaxMin(t *testing.T) { SQL string Result []string } - transformationRulesSuiteData.GetTestCases(t, &input, &output) + transformationRulesSuiteData.LoadTestCases(t, &input, &output) testGroupToString(t, input, output, optimizer) } @@ -247,7 +247,7 @@ func TestMergeAggregationProjection(t *testing.T) { SQL string Result []string } - transformationRulesSuiteData.GetTestCases(t, &input, &output) + transformationRulesSuiteData.LoadTestCases(t, &input, &output) testGroupToString(t, input, output, optimizer) } @@ -273,7 +273,7 @@ func TestMergeAdjacentTopN(t *testing.T) { SQL string Result []string } - transformationRulesSuiteData.GetTestCases(t, &input, &output) + transformationRulesSuiteData.LoadTestCases(t, &input, &output) testGroupToString(t, input, output, optimizer) } @@ -293,7 +293,7 @@ func TestMergeAdjacentLimit(t *testing.T) { SQL string Result []string } - transformationRulesSuiteData.GetTestCases(t, &input, &output) + transformationRulesSuiteData.LoadTestCases(t, &input, &output) testGroupToString(t, input, output, optimizer) } @@ -312,7 +312,7 @@ func TestTransformLimitToTableDual(t *testing.T) { SQL string Result []string } - transformationRulesSuiteData.GetTestCases(t, &input, &output) + transformationRulesSuiteData.LoadTestCases(t, &input, &output) testGroupToString(t, input, output, optimizer) } @@ -331,7 +331,7 @@ func TestPostTransformationRules(t *testing.T) { SQL string Result []string } - transformationRulesSuiteData.GetTestCases(t, &input, &output) + transformationRulesSuiteData.LoadTestCases(t, &input, &output) testGroupToString(t, input, output, optimizer) } @@ -356,7 +356,7 @@ func TestPushLimitDownTiKVSingleGather(t *testing.T) { SQL string Result []string } - transformationRulesSuiteData.GetTestCases(t, &input, &output) + transformationRulesSuiteData.LoadTestCases(t, &input, &output) testGroupToString(t, input, output, optimizer) } @@ -378,7 +378,7 @@ func TestEliminateOuterJoin(t *testing.T) { SQL string Result []string } - transformationRulesSuiteData.GetTestCases(t, &input, &output) + transformationRulesSuiteData.LoadTestCases(t, &input, &output) testGroupToString(t, input, output, optimizer) } @@ -397,7 +397,7 @@ func TestTransformAggregateCaseToSelection(t *testing.T) { SQL string Result []string } - transformationRulesSuiteData.GetTestCases(t, &input, &output) + transformationRulesSuiteData.LoadTestCases(t, &input, &output) testGroupToString(t, input, output, optimizer) } @@ -419,7 +419,7 @@ func TestTransformAggToProj(t *testing.T) { SQL string Result []string } - transformationRulesSuiteData.GetTestCases(t, &input, &output) + transformationRulesSuiteData.LoadTestCases(t, &input, &output) testGroupToString(t, input, output, optimizer) } @@ -439,7 +439,7 @@ func TestDecorrelate(t *testing.T) { SQL string Result []string } - transformationRulesSuiteData.GetTestCases(t, &input, &output) + transformationRulesSuiteData.LoadTestCases(t, &input, &output) testGroupToString(t, input, output, optimizer) } @@ -465,7 +465,7 @@ func TestInjectProj(t *testing.T) { SQL string Result []string } - transformationRulesSuiteData.GetTestCases(t, &input, &output) + transformationRulesSuiteData.LoadTestCases(t, &input, &output) testGroupToString(t, input, output, optimizer) } @@ -488,6 +488,6 @@ func TestMergeAdjacentWindow(t *testing.T) { SQL string Result []string } - transformationRulesSuiteData.GetTestCases(t, &input, &output) + transformationRulesSuiteData.LoadTestCases(t, &input, &output) testGroupToString(t, input, output, optimizer) } diff --git a/planner/core/binary_plan_test.go b/planner/core/binary_plan_test.go index 40abb5c62b493..056972f5917a5 100644 --- a/planner/core/binary_plan_test.go +++ b/planner/core/binary_plan_test.go @@ -109,7 +109,7 @@ func TestBinaryPlanInExplainAndSlowLog(t *testing.T) { BinaryPlan *tipb.ExplainData } planSuiteData := core.GetBinaryPlanSuiteData() - planSuiteData.GetTestCases(t, &input, &output) + planSuiteData.LoadTestCases(t, &input, &output) for i, test := range input { comment := fmt.Sprintf("case:%v sql:%s", i, test) diff --git a/planner/core/cacheable_checker_test.go b/planner/core/cacheable_checker_test.go index d4802db30f183..c945f84d8264e 100644 --- a/planner/core/cacheable_checker_test.go +++ b/planner/core/cacheable_checker_test.go @@ -244,5 +244,4 @@ func TestCacheable(t *testing.T) { }, } require.True(t, core.Cacheable(stmt, is)) - } diff --git a/planner/core/cbo_test.go b/planner/core/cbo_test.go index 2aeeba0e92072..fce856f54c78f 100644 --- a/planner/core/cbo_test.go +++ b/planner/core/cbo_test.go @@ -101,7 +101,7 @@ func TestCBOWithoutAnalyze(t *testing.T) { Plan []string } analyzeSuiteData := core.GetAnalyzeSuiteData() - analyzeSuiteData.GetTestCases(t, &input, &output) + analyzeSuiteData.LoadTestCases(t, &input, &output) for i, sql := range input { plan := testKit.MustQuery(sql) testdata.OnRecord(func() { @@ -125,7 +125,7 @@ func TestStraightJoin(t *testing.T) { var input []string var output [][]string analyzeSuiteData := core.GetAnalyzeSuiteData() - analyzeSuiteData.GetTestCases(t, &input, &output) + analyzeSuiteData.LoadTestCases(t, &input, &output) for i, tt := range input { testdata.OnRecord(func() { output[i] = testdata.ConvertRowsToStrings(testKit.MustQuery(tt).Rows()) @@ -153,7 +153,7 @@ func TestTableDual(t *testing.T) { Plan []string } analyzeSuiteData := core.GetAnalyzeSuiteData() - analyzeSuiteData.GetTestCases(t, &input, &output) + analyzeSuiteData.LoadTestCases(t, &input, &output) for i, sql := range input { plan := testKit.MustQuery(sql) testdata.OnRecord(func() { @@ -190,7 +190,7 @@ func TestEstimation(t *testing.T) { Plan []string } analyzeSuiteData := core.GetAnalyzeSuiteData() - analyzeSuiteData.GetTestCases(t, &input, &output) + analyzeSuiteData.LoadTestCases(t, &input, &output) for i, sql := range input { plan := testKit.MustQuery(sql) testdata.OnRecord(func() { @@ -243,7 +243,7 @@ func TestIndexRead(t *testing.T) { ctx := testKit.Session() var input, output []string analyzeSuiteData := core.GetAnalyzeSuiteData() - analyzeSuiteData.GetTestCases(t, &input, &output) + analyzeSuiteData.LoadTestCases(t, &input, &output) for i, tt := range input { stmts, err := session.Parse(ctx, tt) @@ -274,7 +274,7 @@ func TestEmptyTable(t *testing.T) { testKit.MustExec("analyze table t, t1") var input, output []string analyzeSuiteData := core.GetAnalyzeSuiteData() - analyzeSuiteData.GetTestCases(t, &input, &output) + analyzeSuiteData.LoadTestCases(t, &input, &output) for i, tt := range input { ctx := testKit.Session() stmts, err := session.Parse(ctx, tt) @@ -339,7 +339,7 @@ func TestAnalyze(t *testing.T) { var input, output []string analyzeSuiteData := core.GetAnalyzeSuiteData() - analyzeSuiteData.GetTestCases(t, &input, &output) + analyzeSuiteData.LoadTestCases(t, &input, &output) for i, tt := range input { ctx := testKit.Session() @@ -392,7 +392,7 @@ func TestOutdatedAnalyze(t *testing.T) { Plan []string } analyzeSuiteData := core.GetAnalyzeSuiteData() - analyzeSuiteData.GetTestCases(t, &input, &output) + analyzeSuiteData.LoadTestCases(t, &input, &output) for i, tt := range input { testKit.Session().GetSessionVars().SetEnablePseudoForOutdatedStats(tt.EnablePseudoForOutdatedStats) statistics.RatioOfPseudoEstimate.Store(tt.RatioOfPseudoEstimate) @@ -419,7 +419,7 @@ func TestNullCount(t *testing.T) { var input []string var output [][]string analyzeSuiteData := core.GetAnalyzeSuiteData() - analyzeSuiteData.GetTestCases(t, &input, &output) + analyzeSuiteData.LoadTestCases(t, &input, &output) for i := 0; i < 2; i++ { testdata.OnRecord(func() { output[i] = testdata.ConvertRowsToStrings(testKit.MustQuery(input[i]).Rows()) @@ -451,7 +451,7 @@ func TestCorrelatedEstimation(t *testing.T) { output [][]string ) analyzeSuiteData := core.GetAnalyzeSuiteData() - analyzeSuiteData.GetTestCases(t, &input, &output) + analyzeSuiteData.LoadTestCases(t, &input, &output) for i, tt := range input { rs := tk.MustQuery(tt) testdata.OnRecord(func() { @@ -483,7 +483,7 @@ func TestInconsistentEstimation(t *testing.T) { Plan []string } analyzeSuiteData := core.GetAnalyzeSuiteData() - analyzeSuiteData.GetTestCases(t, &input, &output) + analyzeSuiteData.LoadTestCases(t, &input, &output) for i, sql := range input { plan := tk.MustQuery(sql) testdata.OnRecord(func() { @@ -624,7 +624,7 @@ func TestIssue9562(t *testing.T) { Plan []string } analyzeSuiteData := core.GetAnalyzeSuiteData() - analyzeSuiteData.GetTestCases(t, &input, &output) + analyzeSuiteData.LoadTestCases(t, &input, &output) for i, ts := range input { for j, tt := range ts { if j != len(ts)-1 { @@ -689,7 +689,7 @@ func TestLimitCrossEstimation(t *testing.T) { Plan []string } analyzeSuiteData := core.GetAnalyzeSuiteData() - analyzeSuiteData.GetTestCases(t, &input, &output) + analyzeSuiteData.LoadTestCases(t, &input, &output) for i, ts := range input { for j, tt := range ts { if j != len(ts)-1 { @@ -726,7 +726,7 @@ func TestLowSelIndexGreedySearch(t *testing.T) { // - index `idx2` runs much faster than `idx4` experimentally; // - estimated row count of IndexLookUp should be 0; analyzeSuiteData := core.GetAnalyzeSuiteData() - analyzeSuiteData.GetTestCases(t, &input, &output) + analyzeSuiteData.LoadTestCases(t, &input, &output) for i, tt := range input { testdata.OnRecord(func() { output[i].SQL = tt @@ -762,7 +762,7 @@ func TestTiFlashCostModel(t *testing.T) { var input, output [][]string analyzeSuiteData := core.GetAnalyzeSuiteData() - analyzeSuiteData.GetTestCases(t, &input, &output) + analyzeSuiteData.LoadTestCases(t, &input, &output) for i, ts := range input { for j, tt := range ts { if j != len(ts)-1 { @@ -795,7 +795,7 @@ func TestIndexEqualUnknown(t *testing.T) { Plan []string } analyzeSuiteData := core.GetAnalyzeSuiteData() - analyzeSuiteData.GetTestCases(t, &input, &output) + analyzeSuiteData.LoadTestCases(t, &input, &output) for i, tt := range input { testdata.OnRecord(func() { output[i].SQL = tt @@ -824,7 +824,7 @@ func TestLimitIndexEstimation(t *testing.T) { } analyzeSuiteData := core.GetAnalyzeSuiteData() - analyzeSuiteData.GetTestCases(t, &input, &output) + analyzeSuiteData.LoadTestCases(t, &input, &output) for i, tt := range input { testdata.OnRecord(func() { output[i].SQL = tt diff --git a/planner/core/enforce_mpp_test.go b/planner/core/enforce_mpp_test.go index 5b39a08bdd8cb..3dc1453fe05c5 100644 --- a/planner/core/enforce_mpp_test.go +++ b/planner/core/enforce_mpp_test.go @@ -120,7 +120,7 @@ func TestEnforceMPP(t *testing.T) { Warn []string } enforceMPPSuiteData := plannercore.GetEnforceMPPSuiteData() - enforceMPPSuiteData.GetTestCases(t, &input, &output) + enforceMPPSuiteData.LoadTestCases(t, &input, &output) filterWarnings := func(originalWarnings []stmtctx.SQLWarn) []stmtctx.SQLWarn { warnings := make([]stmtctx.SQLWarn, 0, 4) for _, warning := range originalWarnings { @@ -169,7 +169,7 @@ func TestEnforceMPPWarning1(t *testing.T) { Warn []string } enforceMPPSuiteData := plannercore.GetEnforceMPPSuiteData() - enforceMPPSuiteData.GetTestCases(t, &input, &output) + enforceMPPSuiteData.LoadTestCases(t, &input, &output) for i, tt := range input { testdata.OnRecord(func() { output[i].SQL = tt @@ -253,7 +253,7 @@ func TestEnforceMPPWarning2(t *testing.T) { Warn []string } enforceMPPSuiteData := plannercore.GetEnforceMPPSuiteData() - enforceMPPSuiteData.GetTestCases(t, &input, &output) + enforceMPPSuiteData.LoadTestCases(t, &input, &output) for i, tt := range input { testdata.OnRecord(func() { output[i].SQL = tt @@ -305,7 +305,7 @@ func TestEnforceMPPWarning3(t *testing.T) { Warn []string } enforceMPPSuiteData := plannercore.GetEnforceMPPSuiteData() - enforceMPPSuiteData.GetTestCases(t, &input, &output) + enforceMPPSuiteData.LoadTestCases(t, &input, &output) for i, tt := range input { testdata.OnRecord(func() { output[i].SQL = tt @@ -368,7 +368,7 @@ func TestEnforceMPPWarning4(t *testing.T) { Warn []string } enforceMPPSuiteData := plannercore.GetEnforceMPPSuiteData() - enforceMPPSuiteData.GetTestCases(t, &input, &output) + enforceMPPSuiteData.LoadTestCases(t, &input, &output) for i, tt := range input { testdata.OnRecord(func() { output[i].SQL = tt @@ -422,7 +422,7 @@ func TestMPP2PhaseAggPushDown(t *testing.T) { Warn []string } enforceMPPSuiteData := plannercore.GetEnforceMPPSuiteData() - enforceMPPSuiteData.GetTestCases(t, &input, &output) + enforceMPPSuiteData.LoadTestCases(t, &input, &output) for i, tt := range input { testdata.OnRecord(func() { output[i].SQL = tt @@ -474,7 +474,7 @@ func TestMPPSkewedGroupDistinctRewrite(t *testing.T) { Warn []string } enforceMPPSuiteData := plannercore.GetEnforceMPPSuiteData() - enforceMPPSuiteData.GetTestCases(t, &input, &output) + enforceMPPSuiteData.LoadTestCases(t, &input, &output) for i, tt := range input { testdata.OnRecord(func() { output[i].SQL = tt diff --git a/planner/core/exhaust_physical_plans.go b/planner/core/exhaust_physical_plans.go index 4641dfbc49c38..18d8f6d2bd54a 100644 --- a/planner/core/exhaust_physical_plans.go +++ b/planner/core/exhaust_physical_plans.go @@ -1895,7 +1895,6 @@ func (p *LogicalJoin) tryToGetMppHashJoin(prop *property.PhysicalProperty, useBC p.SCtx().GetSessionVars().RaiseWarningWhenMPPEnforced("MPP mode may be blocked because `Cartesian Product` is only supported by broadcast join, check value and documents of variable `tidb_opt_broadcast_cartesian_join`.") return nil } - } if len(p.LeftConditions) != 0 && p.JoinType != LeftOuterJoin { p.SCtx().GetSessionVars().RaiseWarningWhenMPPEnforced("MPP mode may be blocked because there is a join that is not `left join` but has left conditions, which is not supported by mpp now, see github.com/pingcap/tidb/issues/26090 for more information.") diff --git a/planner/core/expression_rewriter.go b/planner/core/expression_rewriter.go index f4e7556f63e26..7a5212ee77007 100644 --- a/planner/core/expression_rewriter.go +++ b/planner/core/expression_rewriter.go @@ -2167,7 +2167,7 @@ func decodeRecordKey(key []byte, tableID int64, tbl table.Table, loc *time.Locat } cols := make(map[int64]*types.FieldType, len(tblInfo.Columns)) for _, col := range tblInfo.Columns { - cols[col.ID] = &col.FieldType + cols[col.ID] = &(col.FieldType) } handleColIDs := make([]int64, 0, len(idxInfo.Columns)) for _, col := range idxInfo.Columns { diff --git a/planner/core/expression_rewriter_test.go b/planner/core/expression_rewriter_test.go index e3d8e0be2bf65..98bf5e0b4e0b8 100644 --- a/planner/core/expression_rewriter_test.go +++ b/planner/core/expression_rewriter_test.go @@ -414,7 +414,7 @@ func TestMultiColInExpression(t *testing.T) { tk.MustExec("set @@tidb_enable_chunk_rpc = on") expressionRewriterSuiteData := plannercore.GetExpressionRewriterSuiteData() - expressionRewriterSuiteData.GetTestCases(t, &input, &output) + expressionRewriterSuiteData.LoadTestCases(t, &input, &output) for i, tt := range input { testdata.OnRecord(func() { output[i].SQL = tt @@ -441,7 +441,7 @@ func TestBitFuncsReturnType(t *testing.T) { } expressionRewriterSuiteData := plannercore.GetExpressionRewriterSuiteData() - expressionRewriterSuiteData.GetTestCases(t, &input, &output) + expressionRewriterSuiteData.LoadTestCases(t, &input, &output) for i, tt := range input { tk.MustQuery("explain format = 'brief' " + tt).Check(testkit.Rows(output[i].Plan...)) } diff --git a/planner/core/flat_plan_test.go b/planner/core/flat_plan_test.go index 574b230efdb67..4dd13e19090ad 100644 --- a/planner/core/flat_plan_test.go +++ b/planner/core/flat_plan_test.go @@ -76,7 +76,7 @@ func TestFlatPhysicalPlan(t *testing.T) { CTEs [][]*FlatPhysicalOperatorForTest } planSuiteData := core.GetFlatPlanSuiteData() - planSuiteData.GetTestCases(t, &input, &output) + planSuiteData.LoadTestCases(t, &input, &output) p := parser.New() is := infoschema.MockInfoSchema([]*model.TableInfo{core.MockSignedTable(), core.MockUnsignedTable()}) diff --git a/planner/core/indexmerge_test.go b/planner/core/indexmerge_test.go index 28118728b41e7..f109b85aaee18 100644 --- a/planner/core/indexmerge_test.go +++ b/planner/core/indexmerge_test.go @@ -59,7 +59,7 @@ func getIndexMergePathDigest(paths []*util.AccessPath, startIndex int) string { func TestIndexMergePathGeneration(t *testing.T) { var input, output []string - indexMergeSuiteData.GetTestCases(t, &input, &output) + indexMergeSuiteData.LoadTestCases(t, &input, &output) ctx := context.TODO() sctx := MockContext() is := infoschema.MockInfoSchema([]*model.TableInfo{MockSignedTable(), MockView()}) diff --git a/planner/core/integration_partition_test.go b/planner/core/integration_partition_test.go index be088c7023742..880a4809149d9 100644 --- a/planner/core/integration_partition_test.go +++ b/planner/core/integration_partition_test.go @@ -51,7 +51,7 @@ func TestListPartitionPushDown(t *testing.T) { Plan []string } integrationPartitionSuiteData := core.GetIntegrationPartitionSuiteData() - integrationPartitionSuiteData.GetTestCases(t, &input, &output) + integrationPartitionSuiteData.LoadTestCases(t, &input, &output) for i, tt := range input { testdata.OnRecord(func() { output[i].SQL = tt @@ -93,7 +93,7 @@ func TestListColVariousTypes(t *testing.T) { Results []string } integrationPartitionSuiteData := core.GetIntegrationPartitionSuiteData() - integrationPartitionSuiteData.GetTestCases(t, &input, &output) + integrationPartitionSuiteData.LoadTestCases(t, &input, &output) for i, tt := range input { testdata.OnRecord(func() { output[i].SQL = tt @@ -130,7 +130,7 @@ func TestListPartitionPruning(t *testing.T) { StaticPlan []string } integrationPartitionSuiteData := core.GetIntegrationPartitionSuiteData() - integrationPartitionSuiteData.GetTestCases(t, &input, &output) + integrationPartitionSuiteData.LoadTestCases(t, &input, &output) for i, tt := range input { testdata.OnRecord(func() { output[i].SQL = tt @@ -162,7 +162,7 @@ func TestListPartitionFunctions(t *testing.T) { Results []string } integrationPartitionSuiteData := core.GetIntegrationPartitionSuiteData() - integrationPartitionSuiteData.GetTestCases(t, &input, &output) + integrationPartitionSuiteData.LoadTestCases(t, &input, &output) for i, tt := range input { testdata.OnRecord(func() { output[i].SQL = tt diff --git a/planner/core/integration_test.go b/planner/core/integration_test.go index f096ff151123e..e4534dabf78f5 100644 --- a/planner/core/integration_test.go +++ b/planner/core/integration_test.go @@ -184,7 +184,7 @@ func TestPushLimitDownIndexLookUpReader(t *testing.T) { Plan []string } integrationSuiteData := core.GetIntegrationSuiteData() - integrationSuiteData.GetTestCases(t, &input, &output) + integrationSuiteData.LoadTestCases(t, &input, &output) for i, tt := range input { testdata.OnRecord(func() { output[i].SQL = tt @@ -210,7 +210,7 @@ func TestAggColumnPrune(t *testing.T) { Res []string } integrationSuiteData := core.GetIntegrationSuiteData() - integrationSuiteData.GetTestCases(t, &input, &output) + integrationSuiteData.LoadTestCases(t, &input, &output) for i, tt := range input { testdata.OnRecord(func() { output[i].SQL = tt @@ -233,7 +233,7 @@ func TestIsFromUnixtimeNullRejective(t *testing.T) { Plan []string } integrationSuiteData := core.GetIntegrationSuiteData() - integrationSuiteData.GetTestCases(t, &input, &output) + integrationSuiteData.LoadTestCases(t, &input, &output) for i, tt := range input { testdata.OnRecord(func() { output[i].SQL = tt @@ -374,7 +374,7 @@ func TestSimplifyOuterJoinWithCast(t *testing.T) { Plan []string } integrationSuiteData := core.GetIntegrationSuiteData() - integrationSuiteData.GetTestCases(t, &input, &output) + integrationSuiteData.LoadTestCases(t, &input, &output) for i, tt := range input { testdata.OnRecord(func() { output[i].SQL = tt @@ -446,7 +446,7 @@ func TestSelPushDownTiFlash(t *testing.T) { Plan []string } integrationSuiteData := core.GetIntegrationSuiteData() - integrationSuiteData.GetTestCases(t, &input, &output) + integrationSuiteData.LoadTestCases(t, &input, &output) for i, tt := range input { testdata.OnRecord(func() { output[i].SQL = tt @@ -503,7 +503,7 @@ func TestVerboseExplain(t *testing.T) { Plan []string } integrationSuiteData := core.GetIntegrationSuiteData() - integrationSuiteData.GetTestCases(t, &input, &output) + integrationSuiteData.LoadTestCases(t, &input, &output) for i, tt := range input { testdata.OnRecord(func() { output[i].SQL = tt @@ -544,7 +544,7 @@ func TestPushDownToTiFlashWithKeepOrder(t *testing.T) { Plan []string } integrationSuiteData := core.GetIntegrationSuiteData() - integrationSuiteData.GetTestCases(t, &input, &output) + integrationSuiteData.LoadTestCases(t, &input, &output) for i, tt := range input { testdata.OnRecord(func() { output[i].SQL = tt @@ -586,7 +586,7 @@ func TestPushDownToTiFlashWithKeepOrderInFastMode(t *testing.T) { Plan []string } integrationSuiteData := core.GetIntegrationSuiteData() - integrationSuiteData.GetTestCases(t, &input, &output) + integrationSuiteData.LoadTestCases(t, &input, &output) for i, tt := range input { testdata.OnRecord(func() { output[i].SQL = tt @@ -642,7 +642,7 @@ func TestMPPJoin(t *testing.T) { Plan []string } integrationSuiteData := core.GetIntegrationSuiteData() - integrationSuiteData.GetTestCases(t, &input, &output) + integrationSuiteData.LoadTestCases(t, &input, &output) for i, tt := range input { testdata.OnRecord(func() { output[i].SQL = tt @@ -684,7 +684,7 @@ func TestMPPLeftSemiJoin(t *testing.T) { Warn []string } integrationSuiteData := core.GetIntegrationSuiteData() - integrationSuiteData.GetTestCases(t, &input, &output) + integrationSuiteData.LoadTestCases(t, &input, &output) for i, tt := range input { testdata.OnRecord(func() { output[i].SQL = tt @@ -740,7 +740,7 @@ func TestMPPOuterJoinBuildSideForBroadcastJoin(t *testing.T) { Plan []string } integrationSuiteData := core.GetIntegrationSuiteData() - integrationSuiteData.GetTestCases(t, &input, &output) + integrationSuiteData.LoadTestCases(t, &input, &output) for i, tt := range input { testdata.OnRecord(func() { output[i].SQL = tt @@ -787,7 +787,7 @@ func TestMPPOuterJoinBuildSideForShuffleJoinWithFixedBuildSide(t *testing.T) { Plan []string } integrationSuiteData := core.GetIntegrationSuiteData() - integrationSuiteData.GetTestCases(t, &input, &output) + integrationSuiteData.LoadTestCases(t, &input, &output) for i, tt := range input { testdata.OnRecord(func() { output[i].SQL = tt @@ -834,7 +834,7 @@ func TestMPPOuterJoinBuildSideForShuffleJoin(t *testing.T) { Plan []string } integrationSuiteData := core.GetIntegrationSuiteData() - integrationSuiteData.GetTestCases(t, &input, &output) + integrationSuiteData.LoadTestCases(t, &input, &output) for i, tt := range input { testdata.OnRecord(func() { output[i].SQL = tt @@ -897,7 +897,7 @@ func TestMPPShuffledJoin(t *testing.T) { Plan []string } integrationSuiteData := core.GetIntegrationSuiteData() - integrationSuiteData.GetTestCases(t, &input, &output) + integrationSuiteData.LoadTestCases(t, &input, &output) for i, tt := range input { testdata.OnRecord(func() { output[i].SQL = tt @@ -949,7 +949,7 @@ func TestMPPJoinWithCanNotFoundColumnInSchemaColumnsError(t *testing.T) { Plan []string } integrationSuiteData := core.GetIntegrationSuiteData() - integrationSuiteData.GetTestCases(t, &input, &output) + integrationSuiteData.LoadTestCases(t, &input, &output) for i, tt := range input { testdata.OnRecord(func() { output[i].SQL = tt @@ -996,7 +996,7 @@ func TestJoinNotSupportedByTiFlash(t *testing.T) { Plan []string } integrationSuiteData := core.GetIntegrationSuiteData() - integrationSuiteData.GetTestCases(t, &input, &output) + integrationSuiteData.LoadTestCases(t, &input, &output) for i, tt := range input { testdata.OnRecord(func() { output[i].SQL = tt @@ -1008,7 +1008,7 @@ func TestJoinNotSupportedByTiFlash(t *testing.T) { tk.MustExec("set @@session.tidb_broadcast_join_threshold_size = 1") tk.MustExec("set @@session.tidb_broadcast_join_threshold_count = 1") - integrationSuiteData.GetTestCases(t, &input, &output) + integrationSuiteData.LoadTestCases(t, &input, &output) for i, tt := range input { testdata.OnRecord(func() { output[i].SQL = tt @@ -1058,7 +1058,7 @@ func TestMPPWithHashExchangeUnderNewCollation(t *testing.T) { Plan []string } integrationSuiteData := core.GetIntegrationSuiteData() - integrationSuiteData.GetTestCases(t, &input, &output) + integrationSuiteData.LoadTestCases(t, &input, &output) for i, tt := range input { testdata.OnRecord(func() { output[i].SQL = tt @@ -1101,7 +1101,7 @@ func TestMPPWithBroadcastExchangeUnderNewCollation(t *testing.T) { Plan []string } integrationSuiteData := core.GetIntegrationSuiteData() - integrationSuiteData.GetTestCases(t, &input, &output) + integrationSuiteData.LoadTestCases(t, &input, &output) for i, tt := range input { testdata.OnRecord(func() { output[i].SQL = tt @@ -1184,7 +1184,7 @@ func TestMPPAvgRewrite(t *testing.T) { Plan []string } integrationSuiteData := core.GetIntegrationSuiteData() - integrationSuiteData.GetTestCases(t, &input, &output) + integrationSuiteData.LoadTestCases(t, &input, &output) for i, tt := range input { testdata.OnRecord(func() { output[i].SQL = tt @@ -1301,7 +1301,7 @@ func TestReadFromStorageHint(t *testing.T) { Warn []string } integrationSuiteData := core.GetIntegrationSuiteData() - integrationSuiteData.GetTestCases(t, &input, &output) + integrationSuiteData.LoadTestCases(t, &input, &output) for i, tt := range input { testdata.OnRecord(func() { output[i].SQL = tt @@ -1343,7 +1343,7 @@ func TestReadFromStorageHintAndIsolationRead(t *testing.T) { Warn []string } integrationSuiteData := core.GetIntegrationSuiteData() - integrationSuiteData.GetTestCases(t, &input, &output) + integrationSuiteData.LoadTestCases(t, &input, &output) for i, tt := range input { tk.Session().GetSessionVars().StmtCtx.SetWarnings(nil) testdata.OnRecord(func() { @@ -1385,7 +1385,7 @@ func TestIsolationReadTiFlashNotChoosePointGet(t *testing.T) { Result []string } integrationSuiteData := core.GetIntegrationSuiteData() - integrationSuiteData.GetTestCases(t, &input, &output) + integrationSuiteData.LoadTestCases(t, &input, &output) for i, tt := range input { testdata.OnRecord(func() { output[i].SQL = tt @@ -1424,7 +1424,7 @@ func TestIsolationReadTiFlashUseIndexHint(t *testing.T) { Warn []string } integrationSuiteData := core.GetIntegrationSuiteData() - integrationSuiteData.GetTestCases(t, &input, &output) + integrationSuiteData.LoadTestCases(t, &input, &output) for i, tt := range input { testdata.OnRecord(func() { output[i].SQL = tt @@ -1450,7 +1450,7 @@ func TestIsolationReadDoNotFilterSystemDB(t *testing.T) { Plan []string } integrationSuiteData := core.GetIntegrationSuiteData() - integrationSuiteData.GetTestCases(t, &input, &output) + integrationSuiteData.LoadTestCases(t, &input, &output) for i, tt := range input { testdata.OnRecord(func() { output[i].SQL = tt @@ -1480,7 +1480,7 @@ func TestPartitionTableStats(t *testing.T) { Result []string } integrationSuiteData := core.GetIntegrationSuiteData() - integrationSuiteData.GetTestCases(t, &input, &output) + integrationSuiteData.LoadTestCases(t, &input, &output) for i, tt := range input { testdata.OnRecord(func() { output[i].SQL = tt @@ -1507,7 +1507,7 @@ func TestPartitionPruningForInExpr(t *testing.T) { Plan []string } integrationSuiteData := core.GetIntegrationSuiteData() - integrationSuiteData.GetTestCases(t, &input, &output) + integrationSuiteData.LoadTestCases(t, &input, &output) for i, tt := range input { testdata.OnRecord(func() { output[i].SQL = tt @@ -1592,7 +1592,7 @@ func TestMaxMinEliminate(t *testing.T) { Plan []string } integrationSuiteData := core.GetIntegrationSuiteData() - integrationSuiteData.GetTestCases(t, &input, &output) + integrationSuiteData.LoadTestCases(t, &input, &output) for i, tt := range input { testdata.OnRecord(func() { output[i].SQL = tt @@ -1636,7 +1636,7 @@ func TestIndexJoinUniqueCompositeIndex(t *testing.T) { Plan []string } integrationSuiteData := core.GetIntegrationSuiteData() - integrationSuiteData.GetTestCases(t, &input, &output) + integrationSuiteData.LoadTestCases(t, &input, &output) for i, tt := range input { testdata.OnRecord(func() { output[i].SQL = tt @@ -1661,7 +1661,7 @@ func TestIndexMerge(t *testing.T) { Plan []string } integrationSuiteData := core.GetIntegrationSuiteData() - integrationSuiteData.GetTestCases(t, &input, &output) + integrationSuiteData.LoadTestCases(t, &input, &output) for i, tt := range input { testdata.OnRecord(func() { output[i].SQL = tt @@ -1686,7 +1686,7 @@ func TestIndexMergeHint4CNF(t *testing.T) { Plan []string } integrationSuiteData := core.GetIntegrationSuiteData() - integrationSuiteData.GetTestCases(t, &input, &output) + integrationSuiteData.LoadTestCases(t, &input, &output) for i, tt := range input { testdata.OnRecord(func() { output[i].SQL = tt @@ -1758,7 +1758,7 @@ func TestIndexJoinTableRange(t *testing.T) { Plan []string } integrationSuiteData := core.GetIntegrationSuiteData() - integrationSuiteData.GetTestCases(t, &input, &output) + integrationSuiteData.LoadTestCases(t, &input, &output) for i, tt := range input { testdata.OnRecord(func() { output[i].SQL = tt @@ -1793,7 +1793,7 @@ func TestSubqueryWithTopN(t *testing.T) { Plan []string } integrationSuiteData := core.GetIntegrationSuiteData() - integrationSuiteData.GetTestCases(t, &input, &output) + integrationSuiteData.LoadTestCases(t, &input, &output) for i, tt := range input { testdata.OnRecord(func() { output[i].SQL = tt @@ -1817,7 +1817,7 @@ func TestIndexHintWarning(t *testing.T) { Warnings []string } integrationSuiteData := core.GetIntegrationSuiteData() - integrationSuiteData.GetTestCases(t, &input, &output) + integrationSuiteData.LoadTestCases(t, &input, &output) for i, tt := range input { testdata.OnRecord(func() { output[i].SQL = tt @@ -1921,7 +1921,7 @@ func TestApproxPercentile(t *testing.T) { Res []string } integrationSuiteData := core.GetIntegrationSuiteData() - integrationSuiteData.GetTestCases(t, &input, &output) + integrationSuiteData.LoadTestCases(t, &input, &output) for i, tt := range input { testdata.OnRecord(func() { output[i].SQL = tt @@ -1963,7 +1963,7 @@ func TestHintWithRequiredProperty(t *testing.T) { Warnings []string } integrationSuiteData := core.GetIntegrationSuiteData() - integrationSuiteData.GetTestCases(t, &input, &output) + integrationSuiteData.LoadTestCases(t, &input, &output) for i, tt := range input { testdata.OnRecord(func() { output[i].SQL = tt @@ -2045,7 +2045,7 @@ func TestHintWithoutTableWarning(t *testing.T) { Warnings []string } integrationSuiteData := core.GetIntegrationSuiteData() - integrationSuiteData.GetTestCases(t, &input, &output) + integrationSuiteData.LoadTestCases(t, &input, &output) for i, tt := range input { testdata.OnRecord(func() { output[i].SQL = tt @@ -2176,7 +2176,7 @@ func TestIndexJoinInnerIndexNDV(t *testing.T) { Plan []string } integrationSuiteData := core.GetIntegrationSuiteData() - integrationSuiteData.GetTestCases(t, &input, &output) + integrationSuiteData.LoadTestCases(t, &input, &output) for i, tt := range input { testdata.OnRecord(func() { output[i].SQL = tt @@ -2222,7 +2222,7 @@ func TestIndexMergeSerial(t *testing.T) { Warnings []string } integrationSuiteData := core.GetIntegrationSuiteData() - integrationSuiteData.GetTestCases(t, &input, &output) + integrationSuiteData.LoadTestCases(t, &input, &output) for i, tt := range input { testdata.OnRecord(func() { output[i].SQL = tt @@ -2468,7 +2468,7 @@ func TestStreamAggProp(t *testing.T) { Res []string } integrationSuiteData := core.GetIntegrationSuiteData() - integrationSuiteData.GetTestCases(t, &input, &output) + integrationSuiteData.LoadTestCases(t, &input, &output) for i, tt := range input { testdata.OnRecord(func() { output[i].SQL = tt @@ -2523,7 +2523,7 @@ func TestOptimizeHintOnPartitionTable(t *testing.T) { Warn []string } integrationSuiteData := core.GetIntegrationSuiteData() - integrationSuiteData.GetTestCases(t, &input, &output) + integrationSuiteData.LoadTestCases(t, &input, &output) for i, tt := range input { testdata.OnRecord(func() { output[i].SQL = tt @@ -2689,7 +2689,7 @@ func TestAccessPathOnClusterIndex(t *testing.T) { Res []string } integrationSuiteData := core.GetIntegrationSuiteData() - integrationSuiteData.GetTestCases(t, &input, &output) + integrationSuiteData.LoadTestCases(t, &input, &output) for i, tt := range input { testdata.OnRecord(func() { output[i].SQL = tt @@ -2734,7 +2734,7 @@ func TestIndexJoinOnClusteredIndex(t *testing.T) { Res []string } integrationSuiteData := core.GetIntegrationSuiteData() - integrationSuiteData.GetTestCases(t, &input, &output) + integrationSuiteData.LoadTestCases(t, &input, &output) for i, tt := range input { testdata.OnRecord(func() { output[i].SQL = tt @@ -3492,7 +3492,7 @@ partition p2 values less than (10))`) Plan []string } integrationSuiteData := core.GetIntegrationSuiteData() - integrationSuiteData.GetTestCases(t, &input, &output) + integrationSuiteData.LoadTestCases(t, &input, &output) for i, tt := range input { testdata.OnRecord(func() { output[i].SQL = tt @@ -3703,7 +3703,7 @@ func TestIssue20710(t *testing.T) { Plan []string } integrationSuiteData := core.GetIntegrationSuiteData() - integrationSuiteData.GetTestCases(t, &input, &output) + integrationSuiteData.LoadTestCases(t, &input, &output) for i, tt := range input { testdata.OnRecord(func() { output[i].SQL = tt @@ -3803,7 +3803,6 @@ func TestOrderByHavingNotInSelect(t *testing.T) { "[planner:1055]Expression #1 of SELECT list is not in GROUP BY clause and contains nonaggregated column 'test.t1.v2' which is not functionally dependent on columns in GROUP BY clause; this is incompatible with sql_mode=only_full_group_by") tk.MustGetErrMsg("select v2, v1 from (select t1.v1, t2.v2 from ttest t1 join ttest t2) t3 join (select 1, 2) t2 group by v1", "[planner:1055]Expression #1 of SELECT list is not in GROUP BY clause and contains nonaggregated column 'test.t3.v2' which is not functionally dependent on columns in GROUP BY clause; this is incompatible with sql_mode=only_full_group_by") - } func TestUpdateSetDefault(t *testing.T) { @@ -4079,7 +4078,7 @@ func TestInvalidHint(t *testing.T) { Warnings []string } integrationSuiteData := core.GetIntegrationSuiteData() - integrationSuiteData.GetTestCases(t, &input, &output) + integrationSuiteData.LoadTestCases(t, &input, &output) warning := "show warnings;" for i, tt := range input { testdata.OnRecord(func() { @@ -4138,7 +4137,7 @@ func TestConvertRangeToPoint(t *testing.T) { Plan []string } integrationSuiteData := core.GetIntegrationSuiteData() - integrationSuiteData.GetTestCases(t, &input, &output) + integrationSuiteData.LoadTestCases(t, &input, &output) for i, tt := range input { testdata.OnRecord(func() { output[i].SQL = tt @@ -4199,7 +4198,7 @@ func TestIssue22105(t *testing.T) { Plan []string } integrationSuiteData := core.GetIntegrationSuiteData() - integrationSuiteData.GetTestCases(t, &input, &output) + integrationSuiteData.LoadTestCases(t, &input, &output) for i, tt := range input { testdata.OnRecord(func() { output[i].SQL = tt @@ -4325,7 +4324,7 @@ func TestPushDownProjectionForTiKV(t *testing.T) { Plan []string } integrationSuiteData := core.GetIntegrationSuiteData() - integrationSuiteData.GetTestCases(t, &input, &output) + integrationSuiteData.LoadTestCases(t, &input, &output) for i, tt := range input { testdata.OnRecord(func() { output[i].SQL = tt @@ -4366,7 +4365,7 @@ func TestPushDownProjectionForTiFlashCoprocessor(t *testing.T) { Plan []string } integrationSuiteData := core.GetIntegrationSuiteData() - integrationSuiteData.GetTestCases(t, &input, &output) + integrationSuiteData.LoadTestCases(t, &input, &output) for i, tt := range input { testdata.OnRecord(func() { output[i].SQL = tt @@ -4407,7 +4406,7 @@ func TestPushDownProjectionForTiFlash(t *testing.T) { Plan []string } integrationSuiteData := core.GetIntegrationSuiteData() - integrationSuiteData.GetTestCases(t, &input, &output) + integrationSuiteData.LoadTestCases(t, &input, &output) for i, tt := range input { testdata.OnRecord(func() { output[i].SQL = tt @@ -4449,7 +4448,7 @@ func TestPushDownSelectionForMPP(t *testing.T) { Plan []string } integrationSuiteData := core.GetIntegrationSuiteData() - integrationSuiteData.GetTestCases(t, &input, &output) + integrationSuiteData.LoadTestCases(t, &input, &output) for i, tt := range input { testdata.OnRecord(func() { output[i].SQL = tt @@ -4491,7 +4490,7 @@ func TestPushDownProjectionForMPP(t *testing.T) { Plan []string } integrationSuiteData := core.GetIntegrationSuiteData() - integrationSuiteData.GetTestCases(t, &input, &output) + integrationSuiteData.LoadTestCases(t, &input, &output) for i, tt := range input { testdata.OnRecord(func() { output[i].SQL = tt @@ -4519,7 +4518,7 @@ func TestReorderSimplifiedOuterJoins(t *testing.T) { Plan []string } integrationSuiteData := core.GetIntegrationSuiteData() - integrationSuiteData.GetTestCases(t, &input, &output) + integrationSuiteData.LoadTestCases(t, &input, &output) for i, tt := range input { testdata.OnRecord(func() { output[i].SQL = tt @@ -4545,7 +4544,7 @@ func TestIssue23887(t *testing.T) { Res []string } integrationSuiteData := core.GetIntegrationSuiteData() - integrationSuiteData.GetTestCases(t, &input, &output) + integrationSuiteData.LoadTestCases(t, &input, &output) for i, tt := range input { testdata.OnRecord(func() { output[i].SQL = tt @@ -4626,7 +4625,7 @@ func TestPushDownAggForMPP(t *testing.T) { Plan []string } integrationSuiteData := core.GetIntegrationSuiteData() - integrationSuiteData.GetTestCases(t, &input, &output) + integrationSuiteData.LoadTestCases(t, &input, &output) for i, tt := range input { testdata.OnRecord(func() { output[i].SQL = tt @@ -4667,7 +4666,7 @@ func TestMppUnionAll(t *testing.T) { Plan []string } integrationSuiteData := core.GetIntegrationSuiteData() - integrationSuiteData.GetTestCases(t, &input, &output) + integrationSuiteData.LoadTestCases(t, &input, &output) for i, tt := range input { testdata.OnRecord(func() { output[i].SQL = tt @@ -4676,7 +4675,6 @@ func TestMppUnionAll(t *testing.T) { res := tk.MustQuery(tt) res.Check(testkit.Rows(output[i].Plan...)) } - } func TestMppJoinDecimal(t *testing.T) { @@ -4715,7 +4713,7 @@ func TestMppJoinDecimal(t *testing.T) { Plan []string } integrationSuiteData := core.GetIntegrationSuiteData() - integrationSuiteData.GetTestCases(t, &input, &output) + integrationSuiteData.LoadTestCases(t, &input, &output) for i, tt := range input { testdata.OnRecord(func() { output[i].SQL = tt @@ -4757,7 +4755,7 @@ func TestMppAggTopNWithJoin(t *testing.T) { Plan []string } integrationSuiteData := core.GetIntegrationSuiteData() - integrationSuiteData.GetTestCases(t, &input, &output) + integrationSuiteData.LoadTestCases(t, &input, &output) for i, tt := range input { testdata.OnRecord(func() { output[i].SQL = tt @@ -4782,7 +4780,7 @@ func TestLimitIndexLookUpKeepOrder(t *testing.T) { Plan []string } integrationSuiteData := core.GetIntegrationSuiteData() - integrationSuiteData.GetTestCases(t, &input, &output) + integrationSuiteData.LoadTestCases(t, &input, &output) for i, tt := range input { testdata.OnRecord(func() { output[i].SQL = tt @@ -4807,7 +4805,7 @@ func TestDecorrelateInnerJoinInSubquery(t *testing.T) { Plan []string } integrationSuiteData := core.GetIntegrationSuiteData() - integrationSuiteData.GetTestCases(t, &input, &output) + integrationSuiteData.LoadTestCases(t, &input, &output) for i, tt := range input { testdata.OnRecord(func() { output[i].SQL = tt @@ -4835,7 +4833,7 @@ func TestDecorrelateLimitInSubquery(t *testing.T) { Plan []string } integrationSuiteData := core.GetIntegrationSuiteData() - integrationSuiteData.GetTestCases(t, &input, &output) + integrationSuiteData.LoadTestCases(t, &input, &output) for i, tt := range input { testdata.OnRecord(func() { output[i].SQL = tt @@ -5008,7 +5006,7 @@ func TestMultiColMaxOneRow(t *testing.T) { Plan []string } integrationSuiteData := core.GetIntegrationSuiteData() - integrationSuiteData.GetTestCases(t, &input, &output) + integrationSuiteData.LoadTestCases(t, &input, &output) for i, tt := range input { testdata.OnRecord(func() { output[i].SQL = tt @@ -5106,7 +5104,7 @@ func TestIssue24095(t *testing.T) { Plan []string } integrationSuiteData := core.GetIntegrationSuiteData() - integrationSuiteData.GetTestCases(t, &input, &output) + integrationSuiteData.LoadTestCases(t, &input, &output) for i, tt := range input { testdata.OnRecord(func() { output[i].SQL = tt @@ -5239,7 +5237,7 @@ func TestSequenceAsDataSource(t *testing.T) { Plan []string } integrationSuiteData := core.GetIntegrationSuiteData() - integrationSuiteData.GetTestCases(t, &input, &output) + integrationSuiteData.LoadTestCases(t, &input, &output) for i, tt := range input { testdata.OnRecord(func() { output[i].SQL = tt @@ -5331,7 +5329,7 @@ func TestMergeContinuousSelections(t *testing.T) { Plan []string } integrationSuiteData := core.GetIntegrationSuiteData() - integrationSuiteData.GetTestCases(t, &input, &output) + integrationSuiteData.LoadTestCases(t, &input, &output) for i, tt := range input { testdata.OnRecord(func() { output[i].SQL = tt @@ -5370,7 +5368,6 @@ func TestSelectIgnoreTemporaryTableInView(t *testing.T) { tk.MustQuery("select * from v3").Check(testkit.Rows("1 2", "3 4")) tk.MustQuery("select * from v4").Check(testkit.Rows("3 4 3 5")) tk.MustQuery("select * from v5").Check(testkit.Rows("1 2", "3 4")) - } // TestIsMatchProp is used to test https://github.com/pingcap/tidb/issues/26017. @@ -5390,7 +5387,7 @@ func TestIsMatchProp(t *testing.T) { Plan []string } integrationSuiteData := core.GetIntegrationSuiteData() - integrationSuiteData.GetTestCases(t, &input, &output) + integrationSuiteData.LoadTestCases(t, &input, &output) for i, tt := range input { testdata.OnRecord(func() { output[i].SQL = tt @@ -5718,7 +5715,7 @@ func TestHeuristicIndexSelection(t *testing.T) { Warnings []string } integrationSuiteData := core.GetIntegrationSuiteData() - integrationSuiteData.GetTestCases(t, &input, &output) + integrationSuiteData.LoadTestCases(t, &input, &output) for i, tt := range input { testdata.OnRecord(func() { output[i].SQL = tt @@ -5748,7 +5745,7 @@ func TestOutputSkylinePruningInfo(t *testing.T) { Warnings []string } integrationSuiteData := core.GetIntegrationSuiteData() - integrationSuiteData.GetTestCases(t, &input, &output) + integrationSuiteData.LoadTestCases(t, &input, &output) for i, tt := range input { testdata.OnRecord(func() { output[i].SQL = tt @@ -5782,7 +5779,7 @@ func TestPreferRangeScanForUnsignedIntHandle(t *testing.T) { Warnings []string } integrationSuiteData := core.GetIntegrationSuiteData() - integrationSuiteData.GetTestCases(t, &input, &output) + integrationSuiteData.LoadTestCases(t, &input, &output) for i, tt := range input { testdata.OnRecord(func() { output[i].SQL = tt @@ -5819,7 +5816,7 @@ func TestIssue27083(t *testing.T) { Plan []string } integrationSuiteData := core.GetIntegrationSuiteData() - integrationSuiteData.GetTestCases(t, &input, &output) + integrationSuiteData.LoadTestCases(t, &input, &output) for i, tt := range input { testdata.OnRecord(func() { output[i].SQL = tt @@ -5959,7 +5956,7 @@ func TestGroupBySetVar(t *testing.T) { Plan []string } integrationSuiteData := core.GetIntegrationSuiteData() - integrationSuiteData.GetTestCases(t, &input, &output) + integrationSuiteData.LoadTestCases(t, &input, &output) for i, tt := range input { res := tk.MustQuery("explain format = 'brief' " + tt) testdata.OnRecord(func() { @@ -6001,7 +5998,7 @@ func TestPushDownGroupConcatToTiFlash(t *testing.T) { Warning []string } integrationSuiteData := core.GetIntegrationSuiteData() - integrationSuiteData.GetTestCases(t, &input, &output) + integrationSuiteData.LoadTestCases(t, &input, &output) for i, tt := range input { testdata.OnRecord(func() { output[i].SQL = tt @@ -6159,7 +6156,7 @@ func TestRejectSortForMPP(t *testing.T) { Plan []string } integrationSuiteData := core.GetIntegrationSuiteData() - integrationSuiteData.GetTestCases(t, &input, &output) + integrationSuiteData.LoadTestCases(t, &input, &output) for i, tt := range input { testdata.OnRecord(func() { output[i].SQL = tt @@ -6202,7 +6199,7 @@ func TestRegardNULLAsPoint(t *testing.T) { Result []string } integrationSuiteData := core.GetIntegrationSuiteData() - integrationSuiteData.GetTestCases(t, &input, &output) + integrationSuiteData.LoadTestCases(t, &input, &output) for i, tt := range input { testdata.OnRecord(func() { output[i].SQL = tt @@ -6327,7 +6324,7 @@ func TestIssue30200(t *testing.T) { Res []string } integrationSuiteData := core.GetIntegrationSuiteData() - integrationSuiteData.GetTestCases(t, &input, &output) + integrationSuiteData.LoadTestCases(t, &input, &output) for i, tt := range input { testdata.OnRecord(func() { output[i].SQL = tt @@ -6367,7 +6364,6 @@ func TestIssue30271(t *testing.T) { tk.MustExec("insert into t values ('b', 'a', '1'), ('b', 'A', '2'), ('c', 'a', '3');") tk.MustExec("set names utf8mb4 collate utf8mb4_general_ci;") tk.MustQuery("select * from t where (a>'a' and b='a') or (b = 'A' and a < 'd') order by a,c;").Check(testkit.Rows("b a 1", "b A 2", "c a 3")) - } func TestIssue30804(t *testing.T) { @@ -6446,7 +6442,7 @@ func TestIndexMergeWithCorrelatedColumns(t *testing.T) { Res []string } integrationSuiteData := core.GetIntegrationSuiteData() - integrationSuiteData.GetTestCases(t, &input, &output) + integrationSuiteData.LoadTestCases(t, &input, &output) for i, tt := range input { testdata.OnRecord(func() { output[i].SQL = tt @@ -6456,7 +6452,6 @@ func TestIndexMergeWithCorrelatedColumns(t *testing.T) { tk.MustQuery("explain format=brief " + tt).Check(testkit.Rows(output[i].Plan...)) tk.MustQuery(tt).Check(testkit.Rows(output[i].Res...)) } - } func TestIssue20510(t *testing.T) { @@ -6660,7 +6655,7 @@ func TestIssue31240(t *testing.T) { Plan []string } integrationSuiteData := core.GetIntegrationSuiteData() - integrationSuiteData.GetTestCases(t, &input, &output) + integrationSuiteData.LoadTestCases(t, &input, &output) for i, tt := range input { testdata.OnRecord(func() { output[i].SQL = tt @@ -6722,7 +6717,7 @@ func TestIssue32632(t *testing.T) { Plan []string } integrationSuiteData := core.GetIntegrationSuiteData() - integrationSuiteData.GetTestCases(t, &input, &output) + integrationSuiteData.LoadTestCases(t, &input, &output) for i, tt := range input { testdata.OnRecord(func() { output[i].SQL = tt @@ -6760,7 +6755,7 @@ func TestTiFlashPartitionTableScan(t *testing.T) { Plan []string } integrationSuiteData := core.GetIntegrationSuiteData() - integrationSuiteData.GetTestCases(t, &input, &output) + integrationSuiteData.LoadTestCases(t, &input, &output) for i, tt := range input { testdata.OnRecord(func() { output[i].SQL = tt @@ -6792,7 +6787,7 @@ func TestTiFlashFineGrainedShuffle(t *testing.T) { Plan []string } integrationSuiteData := core.GetIntegrationSuiteData() - integrationSuiteData.GetTestCases(t, &input, &output) + integrationSuiteData.LoadTestCases(t, &input, &output) for i, tt := range input { testdata.OnRecord(func() { output[i].SQL = tt diff --git a/planner/core/logical_plan_builder.go b/planner/core/logical_plan_builder.go index 4ccb19a377535..904470f4f7b61 100644 --- a/planner/core/logical_plan_builder.go +++ b/planner/core/logical_plan_builder.go @@ -4204,6 +4204,7 @@ func (b *PlanBuilder) tryBuildCTE(ctx context.Context, tn *ast.TableName, asName b.outerCTEs = b.outerCTEs[:i] o := b.buildingCTE b.buildingCTE = false + //nolint:all_revive,revive defer func() { b.outerCTEs = append(b.outerCTEs, saveCte...) b.buildingCTE = o diff --git a/planner/core/logical_plan_test.go b/planner/core/logical_plan_test.go index 7b616411b3e2c..085a656dbfdcc 100644 --- a/planner/core/logical_plan_test.go +++ b/planner/core/logical_plan_test.go @@ -81,7 +81,7 @@ func createPlannerSuite() (s *plannerSuite) { func TestPredicatePushDown(t *testing.T) { var input, output []string - planSuiteUnexportedData.GetTestCases(t, &input, &output) + planSuiteUnexportedData.LoadTestCases(t, &input, &output) s := createPlannerSuite() ctx := context.Background() for ith, ca := range input { @@ -142,7 +142,7 @@ func TestJoinPredicatePushDown(t *testing.T) { Right string } ) - planSuiteUnexportedData.GetTestCases(t, &input, &output) + planSuiteUnexportedData.LoadTestCases(t, &input, &output) s := createPlannerSuite() ctx := context.Background() @@ -181,7 +181,7 @@ func TestOuterWherePredicatePushDown(t *testing.T) { Right string } ) - planSuiteUnexportedData.GetTestCases(t, &input, &output) + planSuiteUnexportedData.LoadTestCases(t, &input, &output) s := createPlannerSuite() ctx := context.Background() @@ -226,7 +226,7 @@ func TestSimplifyOuterJoin(t *testing.T) { JoinType string } ) - planSuiteUnexportedData.GetTestCases(t, &input, &output) + planSuiteUnexportedData.LoadTestCases(t, &input, &output) s := createPlannerSuite() ctx := context.Background() @@ -293,7 +293,7 @@ func TestDeriveNotNullConds(t *testing.T) { Right string } ) - planSuiteUnexportedData.GetTestCases(t, &input, &output) + planSuiteUnexportedData.LoadTestCases(t, &input, &output) s := createPlannerSuite() ctx := context.Background() @@ -467,7 +467,7 @@ func TestTablePartition(t *testing.T) { } output []string ) - planSuiteUnexportedData.GetTestCases(t, &input, &output) + planSuiteUnexportedData.LoadTestCases(t, &input, &output) s := createPlannerSuite() ctx := context.Background() @@ -492,7 +492,7 @@ func TestTablePartition(t *testing.T) { func TestSubquery(t *testing.T) { var input, output []string - planSuiteUnexportedData.GetTestCases(t, &input, &output) + planSuiteUnexportedData.LoadTestCases(t, &input, &output) s := createPlannerSuite() ctx := context.Background() @@ -518,7 +518,7 @@ func TestSubquery(t *testing.T) { func TestPlanBuilder(t *testing.T) { var input, output []string - planSuiteUnexportedData.GetTestCases(t, &input, &output) + planSuiteUnexportedData.LoadTestCases(t, &input, &output) s := createPlannerSuite() ctx := context.Background() @@ -545,7 +545,7 @@ func TestPlanBuilder(t *testing.T) { func TestJoinReOrder(t *testing.T) { var input, output []string - planSuiteUnexportedData.GetTestCases(t, &input, &output) + planSuiteUnexportedData.LoadTestCases(t, &input, &output) s := createPlannerSuite() ctx := context.Background() @@ -569,7 +569,7 @@ func TestJoinReOrder(t *testing.T) { func TestEagerAggregation(t *testing.T) { var input []string var output []string - planSuiteUnexportedData.GetTestCases(t, &input, &output) + planSuiteUnexportedData.LoadTestCases(t, &input, &output) s := createPlannerSuite() ctx := context.Background() @@ -598,7 +598,7 @@ func TestColumnPruning(t *testing.T) { input []string output []map[int][]string ) - planSuiteUnexportedData.GetTestCases(t, &input, &output) + planSuiteUnexportedData.LoadTestCases(t, &input, &output) s := createPlannerSuite() ctx := context.Background() @@ -623,7 +623,7 @@ func TestSortByItemsPruning(t *testing.T) { input []string output [][]string ) - planSuiteUnexportedData.GetTestCases(t, &input, &output) + planSuiteUnexportedData.LoadTestCases(t, &input, &output) testdata.OnRecord(func() { output = make([][]string, len(input)) }) @@ -966,7 +966,7 @@ func checkUniqueKeys(p LogicalPlan, t *testing.T, ans map[int][][]string, sql st func TestUniqueKeyInfo(t *testing.T) { var input []string var output []map[int][][]string - planSuiteUnexportedData.GetTestCases(t, &input, &output) + planSuiteUnexportedData.LoadTestCases(t, &input, &output) testdata.OnRecord(func() { output = make([]map[int][][]string, len(input)) }) @@ -991,7 +991,7 @@ func TestUniqueKeyInfo(t *testing.T) { func TestAggPrune(t *testing.T) { var input, output []string - planSuiteUnexportedData.GetTestCases(t, &input, &output) + planSuiteUnexportedData.LoadTestCases(t, &input, &output) s := createPlannerSuite() ctx := context.Background() @@ -1492,7 +1492,7 @@ func TestUnion(t *testing.T) { Best string Err bool } - planSuiteUnexportedData.GetTestCases(t, &input, &output) + planSuiteUnexportedData.LoadTestCases(t, &input, &output) s := createPlannerSuite() ctx := context.TODO() for i, tt := range input { @@ -1525,7 +1525,7 @@ func TestUnion(t *testing.T) { func TestTopNPushDown(t *testing.T) { var input, output []string - planSuiteUnexportedData.GetTestCases(t, &input, &output) + planSuiteUnexportedData.LoadTestCases(t, &input, &output) s := createPlannerSuite() ctx := context.TODO() for i, tt := range input { @@ -1601,7 +1601,7 @@ func TestNameResolver(t *testing.T) { func TestOuterJoinEliminator(t *testing.T) { var input, output []string - planSuiteUnexportedData.GetTestCases(t, &input, &output) + planSuiteUnexportedData.LoadTestCases(t, &input, &output) s := createPlannerSuite() ctx := context.TODO() @@ -1673,7 +1673,7 @@ func TestWindowFunction(t *testing.T) { s.optimizeVars = nil }() var input, output []string - planSuiteUnexportedData.GetTestCases(t, &input, &output) + planSuiteUnexportedData.LoadTestCases(t, &input, &output) s.doTestWindowFunction(t, input, output) } @@ -1688,7 +1688,7 @@ func TestWindowParallelFunction(t *testing.T) { s.optimizeVars = nil }() var input, output []string - planSuiteUnexportedData.GetTestCases(t, &input, &output) + planSuiteUnexportedData.LoadTestCases(t, &input, &output) s.doTestWindowFunction(t, input, output) } diff --git a/planner/core/optimizer_test.go b/planner/core/optimizer_test.go index dd8a41bbab1f3..6cf80c57fa5ec 100644 --- a/planner/core/optimizer_test.go +++ b/planner/core/optimizer_test.go @@ -69,7 +69,6 @@ func testJoinKeyTypeConvert(t *testing.T, leftType, rightType, retType *types.Fi require.Equal(t, retType.GetFlag(), cType.GetFlag()) require.Equal(t, lConvert, lCon) require.Equal(t, rConvert, rCon) - } func TestMPPJoinKeyTypeConvert(t *testing.T) { diff --git a/planner/core/partition_pruner_test.go b/planner/core/partition_pruner_test.go index 387396f25235c..4549791cf4851 100644 --- a/planner/core/partition_pruner_test.go +++ b/planner/core/partition_pruner_test.go @@ -53,7 +53,7 @@ func TestHashPartitionPruner(t *testing.T) { Result []string } partitionPrunerData := plannercore.GetPartitionPrunerData() - partitionPrunerData.GetTestCases(t, &input, &output) + partitionPrunerData.LoadTestCases(t, &input, &output) for i, tt := range input { testdata.OnRecord(func() { output[i].SQL = tt @@ -304,7 +304,7 @@ func TestListPartitionPruner(t *testing.T) { Plan []string } partitionPrunerData := plannercore.GetPartitionPrunerData() - partitionPrunerData.GetTestCases(t, &input, &output) + partitionPrunerData.LoadTestCases(t, &input, &output) valid := false for i, tt := range input { testdata.OnRecord(func() { @@ -375,7 +375,7 @@ func TestListColumnsPartitionPruner(t *testing.T) { IndexPlan []string } partitionPrunerData := plannercore.GetPartitionPrunerData() - partitionPrunerData.GetTestCases(t, &input, &output) + partitionPrunerData.LoadTestCases(t, &input, &output) valid := false for i, tt := range input { // Test for table without index. @@ -673,7 +673,7 @@ func TestRangePartitionPredicatePruner(t *testing.T) { Result []string } partitionPrunerData := plannercore.GetPartitionPrunerData() - partitionPrunerData.GetTestCases(t, &input, &output) + partitionPrunerData.LoadTestCases(t, &input, &output) for i, tt := range input { testdata.OnRecord(func() { output[i].SQL = tt diff --git a/planner/core/physical_plan_test.go b/planner/core/physical_plan_test.go index 16885eecc6d6b..9bd6886af0945 100644 --- a/planner/core/physical_plan_test.go +++ b/planner/core/physical_plan_test.go @@ -54,7 +54,7 @@ func TestDAGPlanBuilderSimpleCase(t *testing.T) { Best string } planSuiteData := core.GetPlanSuiteData() - planSuiteData.GetTestCases(t, &input, &output) + planSuiteData.LoadTestCases(t, &input, &output) p := parser.New() is := infoschema.MockInfoSchema([]*model.TableInfo{core.MockSignedTable(), core.MockUnsignedTable()}) for i, tt := range input { @@ -191,7 +191,7 @@ func TestDAGPlanBuilderJoin(t *testing.T) { Best string } planSuiteData := core.GetPlanSuiteData() - planSuiteData.GetTestCases(t, &input, &output) + planSuiteData.LoadTestCases(t, &input, &output) p := parser.New() is := infoschema.MockInfoSchema([]*model.TableInfo{core.MockSignedTable(), core.MockUnsignedTable()}) @@ -229,7 +229,7 @@ func TestDAGPlanBuilderSubquery(t *testing.T) { Best string } planSuiteData := core.GetPlanSuiteData() - planSuiteData.GetTestCases(t, &input, &output) + planSuiteData.LoadTestCases(t, &input, &output) p := parser.New() is := infoschema.MockInfoSchema([]*model.TableInfo{core.MockSignedTable(), core.MockUnsignedTable()}) for i, tt := range input { @@ -260,7 +260,7 @@ func TestDAGPlanTopN(t *testing.T) { Best string } planSuiteData := core.GetPlanSuiteData() - planSuiteData.GetTestCases(t, &input, &output) + planSuiteData.LoadTestCases(t, &input, &output) p := parser.New() is := infoschema.MockInfoSchema([]*model.TableInfo{core.MockSignedTable(), core.MockUnsignedTable()}) for i, tt := range input { @@ -306,7 +306,7 @@ func TestDAGPlanBuilderBasePhysicalPlan(t *testing.T) { Hints string } planSuiteData := core.GetPlanSuiteData() - planSuiteData.GetTestCases(t, &input, &output) + planSuiteData.LoadTestCases(t, &input, &output) p := parser.New() is := infoschema.MockInfoSchema([]*model.TableInfo{core.MockSignedTable(), core.MockUnsignedTable()}) for i, tt := range input { @@ -348,7 +348,7 @@ func TestDAGPlanBuilderUnion(t *testing.T) { Best string } planSuiteData := core.GetPlanSuiteData() - planSuiteData.GetTestCases(t, &input, &output) + planSuiteData.LoadTestCases(t, &input, &output) p := parser.New() is := infoschema.MockInfoSchema([]*model.TableInfo{core.MockSignedTable(), core.MockUnsignedTable()}) for i, tt := range input { @@ -420,7 +420,7 @@ func TestDAGPlanBuilderAgg(t *testing.T) { Best string } planSuiteData := core.GetPlanSuiteData() - planSuiteData.GetTestCases(t, &input, &output) + planSuiteData.LoadTestCases(t, &input, &output) p := parser.New() is := infoschema.MockInfoSchema([]*model.TableInfo{core.MockSignedTable(), core.MockUnsignedTable()}) for i, tt := range input { @@ -451,7 +451,7 @@ func TestRefine(t *testing.T) { Best string } planSuiteData := core.GetPlanSuiteData() - planSuiteData.GetTestCases(t, &input, &output) + planSuiteData.LoadTestCases(t, &input, &output) p := parser.New() is := infoschema.MockInfoSchema([]*model.TableInfo{core.MockSignedTable(), core.MockUnsignedTable()}) for i, tt := range input { @@ -484,7 +484,7 @@ func TestAggEliminator(t *testing.T) { Best string } planSuiteData := core.GetPlanSuiteData() - planSuiteData.GetTestCases(t, &input, &output) + planSuiteData.LoadTestCases(t, &input, &output) p := parser.New() is := infoschema.MockInfoSchema([]*model.TableInfo{core.MockSignedTable(), core.MockUnsignedTable()}) for i, tt := range input { @@ -513,7 +513,7 @@ func TestINMJHint(t *testing.T) { } ) planSuiteData := core.GetPlanSuiteData() - planSuiteData.GetTestCases(t, &input, &output) + planSuiteData.LoadTestCases(t, &input, &output) store, clean := testkit.CreateMockStore(t) defer clean() tk := testkit.NewTestKit(t, store) @@ -545,7 +545,7 @@ func TestEliminateMaxOneRow(t *testing.T) { } ) planSuiteData := core.GetPlanSuiteData() - planSuiteData.GetTestCases(t, &input, &output) + planSuiteData.LoadTestCases(t, &input, &output) store, clean := testkit.CreateMockStore(t) defer clean() tk := testkit.NewTestKit(t, store) @@ -616,7 +616,7 @@ func TestIndexJoinUnionScan(t *testing.T) { Plan []string } planSuiteData := core.GetPlanSuiteData() - planSuiteData.GetTestCases(t, &input, &output) + planSuiteData.LoadTestCases(t, &input, &output) for i, ts := range input { tk.MustExec("begin") @@ -654,7 +654,7 @@ func TestMergeJoinUnionScan(t *testing.T) { Plan []string } planSuiteData := core.GetPlanSuiteData() - planSuiteData.GetTestCases(t, &input, &output) + planSuiteData.LoadTestCases(t, &input, &output) for i, ts := range input { tk.MustExec("begin") @@ -738,7 +738,7 @@ func TestSemiJoinToInner(t *testing.T) { Best string } planSuiteData := core.GetPlanSuiteData() - planSuiteData.GetTestCases(t, &input, &output) + planSuiteData.LoadTestCases(t, &input, &output) p := parser.New() is := infoschema.MockInfoSchema([]*model.TableInfo{core.MockSignedTable(), core.MockUnsignedTable()}) @@ -766,7 +766,7 @@ func TestUnmatchedTableInHint(t *testing.T) { Warning string } planSuiteData := core.GetPlanSuiteData() - planSuiteData.GetTestCases(t, &input, &output) + planSuiteData.LoadTestCases(t, &input, &output) p := parser.New() is := infoschema.MockInfoSchema([]*model.TableInfo{core.MockSignedTable(), core.MockUnsignedTable()}) for i, test := range input { @@ -804,7 +804,7 @@ func TestHintScope(t *testing.T) { Best string } planSuiteData := core.GetPlanSuiteData() - planSuiteData.GetTestCases(t, &input, &output) + planSuiteData.LoadTestCases(t, &input, &output) p := parser.New() is := infoschema.MockInfoSchema([]*model.TableInfo{core.MockSignedTable(), core.MockUnsignedTable()}) @@ -840,7 +840,7 @@ func TestJoinHints(t *testing.T) { Hints string } planSuiteData := core.GetPlanSuiteData() - planSuiteData.GetTestCases(t, &input, &output) + planSuiteData.LoadTestCases(t, &input, &output) ctx := context.Background() p := parser.New() is := infoschema.MockInfoSchema([]*model.TableInfo{core.MockSignedTable(), core.MockUnsignedTable()}) @@ -902,7 +902,7 @@ func TestAggregationHints(t *testing.T) { Warning string } planSuiteData := core.GetPlanSuiteData() - planSuiteData.GetTestCases(t, &input, &output) + planSuiteData.LoadTestCases(t, &input, &output) ctx := context.Background() p := parser.New() is := infoschema.MockInfoSchema([]*model.TableInfo{core.MockSignedTable(), core.MockUnsignedTable()}) @@ -954,7 +954,7 @@ func TestSemiJoinRewriteHints(t *testing.T) { Warning string } planSuiteData := core.GetPlanSuiteData() - planSuiteData.GetTestCases(t, &input, &output) + planSuiteData.LoadTestCases(t, &input, &output) ctx := context.Background() p := parser.New() is := infoschema.MockInfoSchema([]*model.TableInfo{core.MockSignedTable(), core.MockUnsignedTable()}) @@ -1019,7 +1019,7 @@ func TestAggToCopHint(t *testing.T) { } ) planSuiteData := core.GetPlanSuiteData() - planSuiteData.GetTestCases(t, &input, &output) + planSuiteData.LoadTestCases(t, &input, &output) ctx := context.Background() is := domain.GetDomain(tk.Session()).InfoSchema() @@ -1079,7 +1079,7 @@ func TestLimitToCopHint(t *testing.T) { ) planSuiteData := core.GetPlanSuiteData() - planSuiteData.GetTestCases(t, &input, &output) + planSuiteData.LoadTestCases(t, &input, &output) for i, ts := range input { testdata.OnRecord(func() { @@ -1151,7 +1151,7 @@ func TestCTEMergeHint(t *testing.T) { ) planSuiteData := core.GetPlanSuiteData() - planSuiteData.GetTestCases(t, &input, &output) + planSuiteData.LoadTestCases(t, &input, &output) for i, ts := range input { testdata.OnRecord(func() { @@ -1192,7 +1192,7 @@ func TestPushdownDistinctEnable(t *testing.T) { } ) planSuiteData := core.GetPlanSuiteData() - planSuiteData.GetTestCases(t, &input, &output) + planSuiteData.LoadTestCases(t, &input, &output) vars := []string{ fmt.Sprintf("set @@session.%s = 1", variable.TiDBOptDistinctAggPushDown), "set session tidb_opt_agg_push_down = 1", @@ -1211,7 +1211,7 @@ func TestPushdownDistinctDisable(t *testing.T) { ) planSuiteData := core.GetPlanSuiteData() - planSuiteData.GetTestCases(t, &input, &output) + planSuiteData.LoadTestCases(t, &input, &output) vars := []string{ fmt.Sprintf("set @@session.%s = 0", variable.TiDBOptDistinctAggPushDown), "set session tidb_opt_agg_push_down = 1", @@ -1229,7 +1229,7 @@ func TestPushdownDistinctEnableAggPushDownDisable(t *testing.T) { } ) planSuiteData := core.GetPlanSuiteData() - planSuiteData.GetTestCases(t, &input, &output) + planSuiteData.LoadTestCases(t, &input, &output) vars := []string{ fmt.Sprintf("set @@session.%s = 1", variable.TiDBOptDistinctAggPushDown), "set session tidb_opt_agg_push_down = 0", @@ -1298,7 +1298,7 @@ func TestGroupConcatOrderby(t *testing.T) { } ) planSuiteData := core.GetPlanSuiteData() - planSuiteData.GetTestCases(t, &input, &output) + planSuiteData.LoadTestCases(t, &input, &output) store, clean := testkit.CreateMockStore(t) defer clean() tk := testkit.NewTestKit(t, store) @@ -1389,7 +1389,7 @@ func TestIndexHint(t *testing.T) { Hints string } planSuiteData := core.GetPlanSuiteData() - planSuiteData.GetTestCases(t, &input, &output) + planSuiteData.LoadTestCases(t, &input, &output) ctx := context.Background() p := parser.New() is := infoschema.MockInfoSchema([]*model.TableInfo{core.MockSignedTable(), core.MockUnsignedTable()}) @@ -1442,7 +1442,7 @@ func TestIndexMergeHint(t *testing.T) { Hints string } planSuiteData := core.GetPlanSuiteData() - planSuiteData.GetTestCases(t, &input, &output) + planSuiteData.LoadTestCases(t, &input, &output) ctx := context.Background() p := parser.New() is := infoschema.MockInfoSchema([]*model.TableInfo{core.MockSignedTable(), core.MockUnsignedTable()}) @@ -1495,7 +1495,7 @@ func TestQueryBlockHint(t *testing.T) { Hints string } planSuiteData := core.GetPlanSuiteData() - planSuiteData.GetTestCases(t, &input, &output) + planSuiteData.LoadTestCases(t, &input, &output) ctx := context.TODO() p := parser.New() is := infoschema.MockInfoSchema([]*model.TableInfo{core.MockSignedTable(), core.MockUnsignedTable()}) @@ -1542,7 +1542,7 @@ func TestInlineProjection(t *testing.T) { } is := domain.GetDomain(tk.Session()).InfoSchema() planSuiteData := core.GetPlanSuiteData() - planSuiteData.GetTestCases(t, &input, &output) + planSuiteData.LoadTestCases(t, &input, &output) ctx := context.Background() p := parser.New() @@ -1661,7 +1661,7 @@ func TestIndexJoinHint(t *testing.T) { ctx := context.Background() planSuiteData := core.GetPlanSuiteData() - planSuiteData.GetTestCases(t, &input, &output) + planSuiteData.LoadTestCases(t, &input, &output) for i, tt := range input { comment := fmt.Sprintf("case:%v sql: %s", i, tt) stmt, err := p.ParseOneStmt(tt, "", "") @@ -1683,7 +1683,7 @@ func TestDAGPlanBuilderWindow(t *testing.T) { Best string } planSuiteData := core.GetPlanSuiteData() - planSuiteData.GetTestCases(t, &input, &output) + planSuiteData.LoadTestCases(t, &input, &output) vars := []string{ "set @@session.tidb_window_concurrency = 1", } @@ -1697,7 +1697,7 @@ func TestDAGPlanBuilderWindowParallel(t *testing.T) { Best string } planSuiteData := core.GetPlanSuiteData() - planSuiteData.GetTestCases(t, &input, &output) + planSuiteData.LoadTestCases(t, &input, &output) vars := []string{ "set @@session.tidb_window_concurrency = 4", } @@ -1764,7 +1764,7 @@ func TestNominalSort(t *testing.T) { tk.MustExec("insert into t values(2, 4)") tk.MustExec("insert into t values(3, 5)") planSuiteData := core.GetPlanSuiteData() - planSuiteData.GetTestCases(t, &input, &output) + planSuiteData.LoadTestCases(t, &input, &output) for i, ts := range input { testdata.OnRecord(func() { output[i].SQL = ts @@ -1799,7 +1799,7 @@ func TestHintFromDiffDatabase(t *testing.T) { ctx := context.Background() planSuiteData := core.GetPlanSuiteData() - planSuiteData.GetTestCases(t, &input, &output) + planSuiteData.LoadTestCases(t, &input, &output) for i, tt := range input { comment := fmt.Sprintf("case:%v sql: %s", i, tt) stmt, err := p.ParseOneStmt(tt, "", "") @@ -1830,7 +1830,7 @@ func TestNthPlanHintWithExplain(t *testing.T) { Plan []string } planSuiteData := core.GetPlanSuiteData() - planSuiteData.GetTestCases(t, &input, &output) + planSuiteData.LoadTestCases(t, &input, &output) for i, ts := range input { testdata.OnRecord(func() { output[i].SQL = ts @@ -1856,7 +1856,7 @@ func TestEnumIndex(t *testing.T) { } ) planSuiteData := core.GetPlanSuiteData() - planSuiteData.GetTestCases(t, &input, &output) + planSuiteData.LoadTestCases(t, &input, &output) store, clean := testkit.CreateMockStore(t) defer clean() tk := testkit.NewTestKit(t, store) @@ -1886,7 +1886,7 @@ func TestIssue27233(t *testing.T) { } ) planSuiteData := core.GetPlanSuiteData() - planSuiteData.GetTestCases(t, &input, &output) + planSuiteData.LoadTestCases(t, &input, &output) store, clean := testkit.CreateMockStore(t) defer clean() tk := testkit.NewTestKit(t, store) @@ -1930,7 +1930,7 @@ func TestSelectionPartialPushDown(t *testing.T) { } ) planSuiteData := core.GetPlanSuiteData() - planSuiteData.GetTestCases(t, &input, &output) + planSuiteData.LoadTestCases(t, &input, &output) store, clean := testkit.CreateMockStore(t) defer clean() tk := testkit.NewTestKit(t, store) @@ -1957,7 +1957,7 @@ func TestIssue28316(t *testing.T) { } ) planSuiteData := core.GetPlanSuiteData() - planSuiteData.GetTestCases(t, &input, &output) + planSuiteData.LoadTestCases(t, &input, &output) store, clean := testkit.CreateMockStore(t) defer clean() tk := testkit.NewTestKit(t, store) @@ -2002,7 +2002,7 @@ func TestSkewDistinctAgg(t *testing.T) { } ) planSuiteData := core.GetPlanSuiteData() - planSuiteData.GetTestCases(t, &input, &output) + planSuiteData.LoadTestCases(t, &input, &output) store, clean := testkit.CreateMockStore(t) defer clean() tk := testkit.NewTestKit(t, store) @@ -2053,7 +2053,7 @@ func TestMPPSinglePartitionType(t *testing.T) { } ) planSuiteData := core.GetPlanSuiteData() - planSuiteData.GetTestCases(t, &input, &output) + planSuiteData.LoadTestCases(t, &input, &output) store, dom, clean := testkit.CreateMockStoreAndDomain(t) defer clean() tk := testkit.NewTestKit(t, store) diff --git a/planner/core/plan_test.go b/planner/core/plan_test.go index 8b3c61f7a2316..8b15febbb6de5 100644 --- a/planner/core/plan_test.go +++ b/planner/core/plan_test.go @@ -68,7 +68,7 @@ func TestPreferRangeScan(t *testing.T) { Plan []string } planNormalizedSuiteData := core.GetPlanNormalizedSuiteData() - planNormalizedSuiteData.GetTestCases(t, &input, &output) + planNormalizedSuiteData.LoadTestCases(t, &input, &output) for i, tt := range input { if i == 0 { tk.MustExec("set session tidb_opt_prefer_range_scan=0") @@ -117,7 +117,7 @@ func TestNormalizedPlan(t *testing.T) { Plan []string } planNormalizedSuiteData := core.GetPlanNormalizedSuiteData() - planNormalizedSuiteData.GetTestCases(t, &input, &output) + planNormalizedSuiteData.LoadTestCases(t, &input, &output) for i, tt := range input { tk.Session().GetSessionVars().PlanID = 0 tk.MustExec(tt) @@ -163,7 +163,7 @@ func TestNormalizedPlanForDiffStore(t *testing.T) { Plan []string } planNormalizedSuiteData := core.GetPlanNormalizedSuiteData() - planNormalizedSuiteData.GetTestCases(t, &input, &output) + planNormalizedSuiteData.LoadTestCases(t, &input, &output) lastDigest := "" for i, tt := range input { tk.Session().GetSessionVars().PlanID = 0 diff --git a/planner/core/planbuilder.go b/planner/core/planbuilder.go index c82f85a10c2ed..fb6eb9475b033 100644 --- a/planner/core/planbuilder.go +++ b/planner/core/planbuilder.go @@ -1551,7 +1551,6 @@ func (b *PlanBuilder) buildPhysicalIndexLookUpReader(_ context.Context, dbName m ts.schema.Append(commonCols[pkOffset]) ts.HandleIdx = append(ts.HandleIdx, len(ts.Columns)-1) } - } } diff --git a/planner/core/point_get_plan.go b/planner/core/point_get_plan.go index 0fbd9d2866f54..b400144f873bf 100644 --- a/planner/core/point_get_plan.go +++ b/planner/core/point_get_plan.go @@ -740,7 +740,6 @@ func newBatchPointGetPlan( pos2PartitionDefinition[pos] = tmpPartitionDefinition } } - } posArr := make([]int, len(pos2PartitionDefinition)) diff --git a/planner/core/point_get_plan_test.go b/planner/core/point_get_plan_test.go index 508e21004476d..569f15dd9ad0f 100644 --- a/planner/core/point_get_plan_test.go +++ b/planner/core/point_get_plan_test.go @@ -371,7 +371,7 @@ func TestCBOPointGet(t *testing.T) { Res []string } pointGetPlanData := core.GetPointGetPlanData() - pointGetPlanData.GetTestCases(t, &input, &output) + pointGetPlanData.LoadTestCases(t, &input, &output) require.Equal(t, len(input), len(output)) for i, sql := range input { plan := tk.MustQuery("explain format = 'brief' " + sql) @@ -921,7 +921,7 @@ func TestCBOShouldNotUsePointGet(t *testing.T) { } pointGetPlanData := core.GetPointGetPlanData() - pointGetPlanData.GetTestCases(t, &input, &output) + pointGetPlanData.LoadTestCases(t, &input, &output) require.Equal(t, len(input), len(output)) for i, sql := range input { plan := tk.MustQuery("explain format = 'brief' " + sql) diff --git a/planner/core/prepare_test.go b/planner/core/prepare_test.go index 972b6ba3383ab..b0ef84dacf404 100644 --- a/planner/core/prepare_test.go +++ b/planner/core/prepare_test.go @@ -1249,8 +1249,6 @@ func TestPrepareCacheForPartition(t *testing.T) { if pruneMode == string(variable.Dynamic) { // When the temporary disabling of prepared plan cache for dynamic partition prune mode is disabled, change this to 1! tk.MustQuery(`select @@last_plan_from_cache`).Check(testkit.Rows("0")) - } else { - tk.MustQuery(`select @@last_plan_from_cache`).Check(testkit.Rows("0")) } } } @@ -2979,7 +2977,6 @@ func TestConsistencyBetweenPrepareExecuteAndNormalSql(t *testing.T) { // After beginning a new txn, the infoSchema should be the latest tk1.MustExec("begin pessimistic") tk1.MustQuery("select * from t1").Check(testkit.Rows("1", "2", "3")) - } func verifyCache(ctx context.Context, t *testing.T, tk1 *testkit.TestKit, tk2 *testkit.TestKit, stmtID uint32) { diff --git a/planner/core/rule_aggregation_push_down.go b/planner/core/rule_aggregation_push_down.go index b9763f1b572ef..bf3bbf4e81a1e 100644 --- a/planner/core/rule_aggregation_push_down.go +++ b/planner/core/rule_aggregation_push_down.go @@ -509,8 +509,8 @@ func (a *aggregationPushDownSolver) aggPushDown(p LogicalPlan, opt *logicalOptim p = proj } } - // push aggregation across projection } else if proj, ok1 := child.(*LogicalProjection); ok1 { + // push aggregation across projection // TODO: This optimization is not always reasonable. We have not supported pushing projection to kv layer yet, // so we must do this optimization. noSideEffects := true diff --git a/planner/core/rule_join_reorder_test.go b/planner/core/rule_join_reorder_test.go index 55cfe66320e77..8e3ed32fcbcde 100644 --- a/planner/core/rule_join_reorder_test.go +++ b/planner/core/rule_join_reorder_test.go @@ -33,7 +33,7 @@ func runJoinReorderTestData(t *testing.T, tk *testkit.TestKit, name string) { Warning []string } joinReorderSuiteData := plannercore.GetJoinReorderSuiteData() - joinReorderSuiteData.GetTestCasesByName(name, t, &input, &output) + joinReorderSuiteData.LoadTestCasesByName(name, t, &input, &output) require.Equal(t, len(input), len(output)) for i := range input { testdata.OnRecord(func() { diff --git a/planner/core/rule_partition_processor.go b/planner/core/rule_partition_processor.go index 6404873058b64..b3dd100c11bd5 100644 --- a/planner/core/rule_partition_processor.go +++ b/planner/core/rule_partition_processor.go @@ -1216,7 +1216,6 @@ func replaceColumnWithConst(partFn *expression.ScalarFunction, con *expression.C // No 'copy on write' for the expression here, this is a dangerous operation. args[0] = con return partFn - } // opposite turns > to <, >= to <= and so on. @@ -1421,7 +1420,6 @@ func (s *partitionProcessor) checkHintsApplicable(ds *DataSource, partitionSet s } func (s *partitionProcessor) makeUnionAllChildren(ds *DataSource, pi *model.PartitionInfo, or partitionRangeOR, opt *logicalOptimizeOp) (LogicalPlan, error) { - children := make([]LogicalPlan, 0, len(pi.Definitions)) partitionNameSet := make(set.StringSet) usedDefinition := make(map[int64]model.PartitionDefinition) diff --git a/planner/core/rule_predicate_push_down.go b/planner/core/rule_predicate_push_down.go index d013086b2e9b1..0af409cc52246 100644 --- a/planner/core/rule_predicate_push_down.go +++ b/planner/core/rule_predicate_push_down.go @@ -232,7 +232,6 @@ func (p *LogicalJoin) PredicatePushDown(predicates []expression.Expression, opt leftCond = leftPushCond rightCond = append(p.RightConditions, rightPushCond...) p.RightConditions = nil - } leftCond = expression.RemoveDupExprs(p.ctx, leftCond) rightCond = expression.RemoveDupExprs(p.ctx, rightCond) @@ -575,7 +574,6 @@ func DeriveOtherConditions( p *LogicalJoin, leftSchema *expression.Schema, rightSchema *expression.Schema, deriveLeft bool, deriveRight bool) ( leftCond []expression.Expression, rightCond []expression.Expression) { - isOuterSemi := (p.JoinType == LeftOuterSemiJoin) || (p.JoinType == AntiLeftOuterSemiJoin) for _, expr := range p.OtherConditions { if deriveLeft { diff --git a/planner/core/rule_result_reorder_test.go b/planner/core/rule_result_reorder_test.go index 567c1488508d9..7c7ec45d76aea 100644 --- a/planner/core/rule_result_reorder_test.go +++ b/planner/core/rule_result_reorder_test.go @@ -106,7 +106,7 @@ func runTestData(t *testing.T, tk *testkit.TestKit, name string) { Plan []string } statsSuiteData := plannercore.GetOrderedResultModeSuiteData() - statsSuiteData.GetTestCasesByName(name, t, &input, &output) + statsSuiteData.LoadTestCasesByName(name, t, &input, &output) require.Equal(t, len(input), len(output)) for i := range input { testdata.OnRecord(func() { diff --git a/planner/core/stats_test.go b/planner/core/stats_test.go index a7fc3d60fa779..db430dd9311d6 100644 --- a/planner/core/stats_test.go +++ b/planner/core/stats_test.go @@ -50,7 +50,7 @@ func TestGroupNDVs(t *testing.T) { JoinInput string } statsSuiteData := core.GetStatsSuiteData() - statsSuiteData.GetTestCases(t, &input, &output) + statsSuiteData.LoadTestCases(t, &input, &output) for i, tt := range input { comment := fmt.Sprintf("case:%v sql: %s", i, tt) stmt, err := p.ParseOneStmt(tt, "", "") @@ -142,7 +142,7 @@ func TestNDVGroupCols(t *testing.T) { Plan []string } statsSuiteData := core.GetStatsSuiteData() - statsSuiteData.GetTestCases(t, &input, &output) + statsSuiteData.LoadTestCases(t, &input, &output) for i, tt := range input { testdata.OnRecord(func() { output[i].SQL = tt diff --git a/planner/core/task.go b/planner/core/task.go index 47d2f2fad5973..67bd81b556d78 100644 --- a/planner/core/task.go +++ b/planner/core/task.go @@ -1603,7 +1603,6 @@ func RemoveUnnecessaryFirstRow( partialGbyItems []expression.Expression, partialSchema *expression.Schema, firstRowFuncMap map[*aggregation.AggFuncDesc]*aggregation.AggFuncDesc) []*aggregation.AggFuncDesc { - partialCursor := 0 newAggFuncs := make([]*aggregation.AggFuncDesc, 0, len(partialAggFuncs)) for _, aggFunc := range partialAggFuncs { diff --git a/planner/core/window_push_down_test.go b/planner/core/window_push_down_test.go index a8ec9c0955d56..5000bf79f312f 100644 --- a/planner/core/window_push_down_test.go +++ b/planner/core/window_push_down_test.go @@ -84,7 +84,7 @@ func TestWindowFunctionDescCanPushDown(t *testing.T) { var input Input var output Output suiteData := plannercore.GetWindowPushDownSuiteData() - suiteData.GetTestCases(t, &input, &output) + suiteData.LoadTestCases(t, &input, &output) testWithData(t, tk, input, output) } @@ -102,7 +102,7 @@ func TestWindowPushDownPlans(t *testing.T) { var input Input var output Output suiteData := plannercore.GetWindowPushDownSuiteData() - suiteData.GetTestCases(t, &input, &output) + suiteData.LoadTestCases(t, &input, &output) testWithData(t, tk, input, output) } @@ -120,7 +120,7 @@ func TestWindowPlanWithOtherOperators(t *testing.T) { var input Input var output Output suiteData := plannercore.GetWindowPushDownSuiteData() - suiteData.GetTestCases(t, &input, &output) + suiteData.LoadTestCases(t, &input, &output) testWithData(t, tk, input, output) } diff --git a/planner/funcdep/fd_graph_test.go b/planner/funcdep/fd_graph_test.go index dbe49fb2d7103..d8029dcc77cb6 100644 --- a/planner/funcdep/fd_graph_test.go +++ b/planner/funcdep/fd_graph_test.go @@ -67,7 +67,6 @@ func TestAddStrictFunctionalDependency(t *testing.T) { fd.AddStrictFunctionalDependency(fe1.from, fe1.to) fd.AddStrictFunctionalDependency(fe3.from, fe3.to) assertF() - // TODO: // test reduce col // test more edges diff --git a/planner/implementation/base_test.go b/planner/implementation/base_test.go index b1b73ed0604c3..1fffd91507c39 100644 --- a/planner/implementation/base_test.go +++ b/planner/implementation/base_test.go @@ -23,7 +23,6 @@ import ( ) func TestBaseImplementation(t *testing.T) { - sctx := plannercore.MockContext() p := plannercore.PhysicalLimit{}.Init(sctx, nil, 0, nil) impl := &baseImpl{plan: p} diff --git a/planner/optimize.go b/planner/optimize.go index 16971fb00b638..b4782d4edb8d1 100644 --- a/planner/optimize.go +++ b/planner/optimize.go @@ -34,7 +34,6 @@ import ( "github.com/pingcap/tidb/parser/ast" "github.com/pingcap/tidb/planner/cascades" "github.com/pingcap/tidb/planner/core" - plannercore "github.com/pingcap/tidb/planner/core" "github.com/pingcap/tidb/privilege" "github.com/pingcap/tidb/sessionctx" "github.com/pingcap/tidb/sessionctx/stmtctx" @@ -51,7 +50,7 @@ import ( // IsReadOnly check whether the ast.Node is a read only statement. func IsReadOnly(node ast.Node, vars *variable.SessionVars) bool { if execStmt, isExecStmt := node.(*ast.ExecuteStmt); isExecStmt { - prepareStmt, err := plannercore.GetPreparedStmt(execStmt, vars) + prepareStmt, err := core.GetPreparedStmt(execStmt, vars) if err != nil { logutil.BgLogger().Warn("GetPreparedStmt failed", zap.Error(err)) return false @@ -76,7 +75,7 @@ func matchSQLBinding(sctx sessionctx.Context, stmtNode ast.StmtNode) (bindRecord // Optimize does optimization and creates a Plan. // The node must be prepared first. -func Optimize(ctx context.Context, sctx sessionctx.Context, node ast.Node, is infoschema.InfoSchema) (plannercore.Plan, types.NameSlice, error) { +func Optimize(ctx context.Context, sctx sessionctx.Context, node ast.Node, is infoschema.InfoSchema) (core.Plan, types.NameSlice, error) { sessVars := sctx.GetSessionVars() if !sctx.GetSessionVars().InRestrictedSQL && variable.RestrictedReadOnly.Load() || variable.VarTiDBSuperReadOnly.Load() { @@ -114,12 +113,12 @@ func Optimize(ctx context.Context, sctx sessionctx.Context, node ast.Node, is in txnManger := sessiontxn.GetTxnManager(sctx) if _, isolationReadContainTiKV := sessVars.IsolationReadEngines[kv.TiKV]; isolationReadContainTiKV { - var fp plannercore.Plan - if fpv, ok := sctx.Value(plannercore.PointPlanKey).(plannercore.PointPlanVal); ok { + var fp core.Plan + if fpv, ok := sctx.Value(core.PointPlanKey).(core.PointPlanVal); ok { // point plan is already tried in a multi-statement query. fp = fpv.Plan } else { - fp = plannercore.TryFastPlan(sctx, node) + fp = core.TryFastPlan(sctx, node) } if fp != nil { return fp, fp.OutputNames(), nil @@ -140,13 +139,13 @@ func Optimize(ctx context.Context, sctx sessionctx.Context, node ast.Node, is in } if ok { // add the extra Limit after matching the bind record - stmtNode = plannercore.TryAddExtraLimit(sctx, stmtNode) + stmtNode = core.TryAddExtraLimit(sctx, stmtNode) node = stmtNode } var ( names types.NameSlice - bestPlan, bestPlanFromBind plannercore.Plan + bestPlan, bestPlanFromBind core.Plan chosenBinding bindinfo.Binding err error ) @@ -216,16 +215,16 @@ func Optimize(ctx context.Context, sctx sessionctx.Context, node ast.Node, is in if sessVars.EvolvePlanBaselines && bestPlanFromBind != nil && sessVars.SelectLimit == math.MaxUint64 { // do not evolve this query if sql_select_limit is enabled // Check bestPlanFromBind firstly to avoid nil stmtNode. - if _, ok := stmtNode.(*ast.SelectStmt); ok && !bindRecord.Bindings[0].Hint.ContainTableHint(plannercore.HintReadFromStorage) { + if _, ok := stmtNode.(*ast.SelectStmt); ok && !bindRecord.Bindings[0].Hint.ContainTableHint(core.HintReadFromStorage) { sessVars.StmtCtx.StmtHints = originStmtHints defPlan, _, _, err := optimize(ctx, sctx, node, is) if err != nil { // Ignore this evolution task. return bestPlan, names, nil } - defPlanHints := plannercore.GenHintsFromPhysicalPlan(defPlan) + defPlanHints := core.GenHintsFromPhysicalPlan(defPlan) for _, hint := range defPlanHints { - if hint.HintName.String() == plannercore.HintReadFromStorage { + if hint.HintName.String() == core.HintReadFromStorage { return bestPlan, names, nil } } @@ -286,14 +285,14 @@ func allowInReadOnlyMode(sctx sessionctx.Context, node ast.Node) (bool, error) { var planBuilderPool = sync.Pool{ New: func() interface{} { - return plannercore.NewPlanBuilder() + return core.NewPlanBuilder() }, } // optimizeCnt is a global variable only used for test. var optimizeCnt int -func optimize(ctx context.Context, sctx sessionctx.Context, node ast.Node, is infoschema.InfoSchema) (plannercore.Plan, types.NameSlice, float64, error) { +func optimize(ctx context.Context, sctx sessionctx.Context, node ast.Node, is infoschema.InfoSchema) (core.Plan, types.NameSlice, float64, error) { failpoint.Inject("checkOptimizeCountOne", func() { optimizeCnt++ if optimizeCnt > 1 { @@ -316,7 +315,7 @@ func optimize(ctx context.Context, sctx sessionctx.Context, node ast.Node, is in sctx.GetSessionVars().PlanID = rand.Intn(1000) // nolint:gosec }) - builder := planBuilderPool.Get().(*plannercore.PlanBuilder) + builder := planBuilderPool.Get().(*core.PlanBuilder) defer planBuilderPool.Put(builder.ResetForReuse()) builder.Init(sctx, is, hintProcessor) @@ -336,18 +335,18 @@ func optimize(ctx context.Context, sctx sessionctx.Context, node ast.Node, is in // we need the table information to check privilege, which is collected // into the visitInfo in the logical plan builder. if pm := privilege.GetPrivilegeManager(sctx); pm != nil { - visitInfo := plannercore.VisitInfo4PrivCheck(is, node, builder.GetVisitInfo()) - if err := plannercore.CheckPrivilege(activeRoles, pm, visitInfo); err != nil { + visitInfo := core.VisitInfo4PrivCheck(is, node, builder.GetVisitInfo()) + if err := core.CheckPrivilege(activeRoles, pm, visitInfo); err != nil { return nil, nil, 0, err } } - if err := plannercore.CheckTableLock(sctx, is, builder.GetVisitInfo()); err != nil { + if err := core.CheckTableLock(sctx, is, builder.GetVisitInfo()); err != nil { return nil, nil, 0, err } // Handle the execute statement. - if execPlan, ok := p.(*plannercore.Execute); ok { + if execPlan, ok := p.(*core.Execute); ok { err := execPlan.OptimizePreparedPlan(ctx, sctx, is) return p, p.OutputNames(), 0, err } @@ -355,7 +354,7 @@ func optimize(ctx context.Context, sctx sessionctx.Context, node ast.Node, is in names := p.OutputNames() // Handle the non-logical plan statement. - logic, isLogicalPlan := p.(plannercore.LogicalPlan) + logic, isLogicalPlan := p.(core.LogicalPlan) if !isLogicalPlan { return p, names, 0, nil } @@ -367,7 +366,7 @@ func optimize(ctx context.Context, sctx sessionctx.Context, node ast.Node, is in } beginOpt := time.Now() - finalPlan, cost, err := plannercore.DoOptimize(ctx, sctx, builder.GetOptFlag(), logic) + finalPlan, cost, err := core.DoOptimize(ctx, sctx, builder.GetOptFlag(), logic) sctx.GetSessionVars().DurationOptimization = time.Since(beginOpt) return finalPlan, names, cost, err } @@ -387,11 +386,11 @@ func ExtractSelectAndNormalizeDigest(stmtNode ast.StmtNode, specifiledDB string) switch x.Stmt.(type) { case *ast.SelectStmt, *ast.DeleteStmt, *ast.UpdateStmt, *ast.InsertStmt: normalizeSQL := parser.Normalize(utilparser.RestoreWithDefaultDB(x.Stmt, specifiledDB, x.Text())) - normalizeSQL = plannercore.EraseLastSemicolonInSQL(normalizeSQL) + normalizeSQL = core.EraseLastSemicolonInSQL(normalizeSQL) hash := parser.DigestNormalized(normalizeSQL) return x.Stmt, normalizeSQL, hash.String(), nil case *ast.SetOprStmt: - plannercore.EraseLastSemicolon(x) + core.EraseLastSemicolon(x) var normalizeExplainSQL string if specifiledDB != "" { normalizeExplainSQL = parser.Normalize(utilparser.RestoreWithDefaultDB(x, specifiledDB, x.Text())) @@ -408,7 +407,7 @@ func ExtractSelectAndNormalizeDigest(stmtNode ast.StmtNode, specifiledDB string) return x.Stmt, normalizeSQL, hash.String(), nil } case *ast.SelectStmt, *ast.SetOprStmt, *ast.DeleteStmt, *ast.UpdateStmt, *ast.InsertStmt: - plannercore.EraseLastSemicolon(x) + core.EraseLastSemicolon(x) // This function is only used to find bind record. // For some SQLs, such as `explain select * from t`, they will be entered here many times, // but some of them do not want to obtain bind record. @@ -520,17 +519,17 @@ func handleStmtHints(hints []*ast.TableOptimizerHint) (stmtHints stmtctx.StmtHin // Not all session variables are permitted for use with SET_VAR sysVar := variable.GetSysVar(setVarHint.VarName) if sysVar == nil { - warns = append(warns, plannercore.ErrUnresolvedHintName.GenWithStackByArgs(setVarHint.VarName, hint.HintName.String())) + warns = append(warns, core.ErrUnresolvedHintName.GenWithStackByArgs(setVarHint.VarName, hint.HintName.String())) continue } if !sysVar.IsHintUpdatable { - warns = append(warns, plannercore.ErrNotHintUpdatable.GenWithStackByArgs(setVarHint.VarName)) + warns = append(warns, core.ErrNotHintUpdatable.GenWithStackByArgs(setVarHint.VarName)) continue } // If several hints with the same variable name appear in the same statement, the first one is applied and the others are ignored with a warning if _, ok := setVars[setVarHint.VarName]; ok { msg := fmt.Sprintf("%s(%s=%s)", hint.HintName.String(), setVarHint.VarName, setVarHint.Value) - warns = append(warns, plannercore.ErrWarnConflictingHint.GenWithStackByArgs(msg)) + warns = append(warns, core.ErrWarnConflictingHint.GenWithStackByArgs(msg)) continue } setVars[setVarHint.VarName] = setVarHint.Value @@ -639,6 +638,6 @@ func handleStmtHints(hints []*ast.TableOptimizerHint) (stmtHints stmtctx.StmtHin } func init() { - plannercore.OptimizeAstNode = Optimize - plannercore.IsReadOnly = IsReadOnly + core.OptimizeAstNode = Optimize + core.IsReadOnly = IsReadOnly } diff --git a/privilege/privileges/cache.go b/privilege/privileges/cache.go index d18a882cdca2c..7c767cf2d40c7 100644 --- a/privilege/privileges/cache.go +++ b/privilege/privileges/cache.go @@ -303,7 +303,7 @@ func (p *MySQLPrivilege) FindAllRole(activeRoles []*auth.RoleIdentity) []*auth.R } } } - head += 1 + head++ } return ret } @@ -1212,7 +1212,6 @@ func (p *MySQLPrivilege) showGrants(user, host string, roles []*auth.RoleIdentit s = fmt.Sprintf(`GRANT %s ON *.* TO '%s'@'%s' WITH GRANT OPTION`, g, user, host) } else { s = fmt.Sprintf(`GRANT %s ON *.* TO '%s'@'%s'`, g, user, host) - } gs = append(gs, s) } diff --git a/privilege/privileges/privileges_test.go b/privilege/privileges/privileges_test.go index 72c0bc2573ab7..086b270fb8ea1 100644 --- a/privilege/privileges/privileges_test.go +++ b/privilege/privileges/privileges_test.go @@ -979,7 +979,6 @@ func TestCheckCertBasedAuth(t *testing.T) { // test old data and broken data require.True(t, tk.Session().Auth(&auth.UserIdentity{Username: "r12_old_tidb_user", Hostname: "localhost"}, nil, nil)) require.False(t, tk.Session().Auth(&auth.UserIdentity{Username: "r13_broken_user", Hostname: "localhost"}, nil, nil)) - } func connectionState(issuer, subject pkix.Name, cipher uint16, opt ...func(c *x509.Certificate)) *tls.ConnectionState { @@ -1524,7 +1523,6 @@ func TestTableNotExistNoPermissions(t *testing.T) { // Check it is permission denied, not not found. require.EqualError(t, err2, fmt.Sprintf("[planner:1142]%s command denied to user 'testnotexist'@'localhost' for table 't1'", tt.stmtType)) } - } func TestLoadDataPrivilege(t *testing.T) { @@ -1908,7 +1906,6 @@ func TestSecurityEnhancedModeStatusVars(t *testing.T) { Username: "unostatus", Hostname: "localhost", }, nil, nil) - } func TestSecurityEnhancedLocalBackupRestore(t *testing.T) { @@ -1952,7 +1949,6 @@ func TestSecurityEnhancedLocalBackupRestore(t *testing.T) { _, err = tk.Session().ExecuteInternal(ctx, "RESTORE DATABASE * FROM 'HDFS:///tmp/test';") require.EqualError(t, err, "[planner:8132]Feature 'hdfs storage' is not supported when security enhanced mode is enabled") - } func TestRenameUser(t *testing.T) { @@ -2601,7 +2597,6 @@ func TestPlacementPolicyStmt(t *testing.T) { require.True(t, tk.Session().Auth(&auth.UserIdentity{Username: "placement_user", Hostname: "localhost"}, nil, nil)) tk.MustExec(createStmt) tk.MustExec(dropStmt) - } func TestDBNameCaseSensitivityInTableLevel(t *testing.T) { @@ -2779,7 +2774,7 @@ func TestCreateTmpTablesPriv(t *testing.T) { tk.MustGetErrCode(test.sql, test.errcode) } } - + //nolint:revive,all_revive // TODO: issue #29282 to be fixed. //for i, test := range tests { // preparedStmt := fmt.Sprintf("prepare stmt%d from '%s'", i, test.sql) diff --git a/server/conn.go b/server/conn.go index 70e7fdd540645..b0a6445abf0cf 100644 --- a/server/conn.go +++ b/server/conn.go @@ -782,7 +782,6 @@ func (cc *clientConn) handleAuthPlugin(ctx context.Context, resp *handshakeRespo // authSha implements the caching_sha2_password specific part of the protocol. func (cc *clientConn) authSha(ctx context.Context) ([]byte, error) { - const ( ShaCommand = 1 RequestRsaPubKey = 2 // Not supported yet, only TLS is supported as secure channel. @@ -1873,7 +1872,6 @@ func (cc *clientConn) handleQuery(ctx context.Context, sql string) (err error) { var pointPlans []plannercore.Plan if len(stmts) > 1 { - // The client gets to choose if it allows multi-statements, and // probably defaults OFF. This helps prevent against SQL injection attacks // by early terminating the first statement, and then running an entirely diff --git a/server/conn_stmt.go b/server/conn_stmt.go index c59cba37693d3..375efd1532030 100644 --- a/server/conn_stmt.go +++ b/server/conn_stmt.go @@ -115,7 +115,6 @@ func (cc *clientConn) handleStmtPrepare(ctx context.Context, sql string) error { if err := cc.writeEOF(0); err != nil { return err } - } return cc.flush(ctx) } diff --git a/server/conn_test.go b/server/conn_test.go index ad2959588c840..75146c592017d 100644 --- a/server/conn_test.go +++ b/server/conn_test.go @@ -1260,7 +1260,6 @@ func TestAuthPlugin2(t *testing.T) { require.NoError(t, failpoint.Disable("github.com/pingcap/tidb/server/FakeAuthSwitch")) require.Equal(t, respAuthSwitch, []byte(mysql.AuthNativePassword)) require.NoError(t, err) - } func TestAuthTokenPlugin(t *testing.T) { diff --git a/server/http_handler.go b/server/http_handler.go index 6c15a3c6b3ff9..860e4a9cdd300 100644 --- a/server/http_handler.go +++ b/server/http_handler.go @@ -612,7 +612,6 @@ func (t *tikvHandlerTool) getRegionsMeta(regionIDs []uint64) ([]RegionMeta, erro Peers: region.Meta.Peers, RegionEpoch: region.Meta.RegionEpoch, } - } return regions, nil } @@ -1822,7 +1821,7 @@ func (h mvccTxnHandler) handleMvccGetByKey(params map[string]string, values url. } colMap := make(map[int64]*types.FieldType, 3) for _, col := range tb.Meta().Columns { - colMap[col.ID] = &col.FieldType + colMap[col.ID] = &(col.FieldType) } respValue := resp.Value diff --git a/server/server_test.go b/server/server_test.go index 645798c96cfa3..3eb4a2a527708 100644 --- a/server/server_test.go +++ b/server/server_test.go @@ -1873,7 +1873,6 @@ func (cli *testServerClient) runTestStatusAPI(t *testing.T) { func (cli *testServerClient) runFailedTestMultiStatements(t *testing.T) { cli.runTestsOnNewDB(t, nil, "FailedMultiStatements", func(dbt *testkit.DBTestKit) { - // Default is now OFF in new installations. // It is still WARN in upgrade installations (for now) _, err := dbt.GetDB().Exec("SELECT 1; SELECT 1; SELECT 2; SELECT 3;") @@ -1934,7 +1933,6 @@ func (cli *testServerClient) runFailedTestMultiStatements(t *testing.T) { } func (cli *testServerClient) runTestMultiStatements(t *testing.T) { - cli.runTestsOnNewDB(t, func(config *mysql.Config) { config.Params["multiStatements"] = "true" }, "MultiStatements", func(dbt *testkit.DBTestKit) { @@ -2167,7 +2165,6 @@ func (cli *testServerClient) waitUntilServerOnline() { } func (cli *testServerClient) runTestInitConnect(t *testing.T) { - cli.runTests(t, nil, func(dbt *testkit.DBTestKit) { dbt.MustExec(`SET GLOBAL init_connect="insert into test.ts VALUES (NOW());SET @a=1;"`) dbt.MustExec(`CREATE USER init_nonsuper`) @@ -2225,7 +2222,6 @@ func (cli *testServerClient) runTestInitConnect(t *testing.T) { // and not internal SQL statements. Thus, this test is in the server-test suite. func (cli *testServerClient) runTestInfoschemaClientErrors(t *testing.T) { cli.runTestsOnNewDB(t, nil, "clientErrors", func(dbt *testkit.DBTestKit) { - clientErrors := []struct { stmt string incrementWarnings bool @@ -2255,7 +2251,6 @@ func (cli *testServerClient) runTestInfoschemaClientErrors(t *testing.T) { for _, test := range clientErrors { for _, tbl := range sources { - var errors, warnings int rows := dbt.MustQuery("SELECT SUM(error_count), SUM(warning_count) FROM information_schema."+tbl+" WHERE error_number = ? GROUP BY error_number", test.errCode) if rows.Next() { @@ -2290,6 +2285,5 @@ func (cli *testServerClient) runTestInfoschemaClientErrors(t *testing.T) { require.Equalf(t, warnings, newWarnings, "source=information_schema.%s code=%d statement=%s", tbl, test.errCode, test.stmt) } } - }) } diff --git a/server/tidb_test.go b/server/tidb_test.go index 47f5a045a20b1..82a997b6dcd4b 100644 --- a/server/tidb_test.go +++ b/server/tidb_test.go @@ -613,7 +613,6 @@ func TestSocketAndIp(t *testing.T) { cli.checkRows(t, rows, "GRANT USAGE ON *.* TO 'user1'@'localhost'\nGRANT SELECT,INSERT,UPDATE,DELETE ON test.* TO 'user1'@'localhost'") require.NoError(t, rows.Close()) }) - } // TestOnlySocket for server configuration without network interface for mysql clients @@ -769,7 +768,6 @@ func TestOnlySocket(t *testing.T) { cli.checkRows(t, rows, "GRANT USAGE ON *.* TO 'user1'@'localhost'\nGRANT SELECT,INSERT,UPDATE,DELETE ON test.* TO 'user1'@'localhost'") require.NoError(t, rows.Close()) }) - } // generateCert generates a private key and a certificate in PEM format based on parameters. @@ -1185,7 +1183,6 @@ func TestNullFlag(t *testing.T) { } { - rs, err := Execute(ctx, qctx, "select if(1, null, 1) ;") require.NoError(t, err) cols := rs.Columns() @@ -2119,7 +2116,6 @@ func setupForTestTopSQLStatementStats(t *testing.T) (*tidbTestSuite, stmtstats.S err = failpoint.Disable("github.com/pingcap/tidb/store/mockstore/unistore/unistoreRPCClientSendHook") require.NoError(t, err) stmtstats.CloseAggregator() - } return ts, total, tagChecker, collectedNotifyCh, cleanFn } diff --git a/session/clustered_index_test.go b/session/clustered_index_test.go index e04052c1b6c79..4a3f6a66ac6ad 100644 --- a/session/clustered_index_test.go +++ b/session/clustered_index_test.go @@ -85,7 +85,7 @@ func TestClusteredPrefixColumn(t *testing.T) { Res []string } testData := session.GetClusteredIndexSuiteData() - testData.GetTestCases(t, &input, &output) + testData.LoadTestCases(t, &input, &output) for i, tt := range input { testdata.OnRecord(func() { output[i].SQL = tt diff --git a/session/nontransactional.go b/session/nontransactional.go index 5c7ed5303a78f..ef8adb541203e 100644 --- a/session/nontransactional.go +++ b/session/nontransactional.go @@ -164,7 +164,6 @@ func checkConstraint(stmt *ast.NonTransactionalDeleteStmt, se Session) error { // single-threaded worker. work on the key range [start, end] func splitDeleteWorker(ctx context.Context, jobs []job, stmt *ast.NonTransactionalDeleteStmt, tableName *ast.TableName, se Session, originalCondition ast.ExprNode) ([]string, error) { - // prepare for the construction of statement var shardColumnRefer *ast.ResultField var shardColumnType types.FieldType diff --git a/session/schema_amender.go b/session/schema_amender.go index 13f65571d05bf..b2ad3a4c44cda 100644 --- a/session/schema_amender.go +++ b/session/schema_amender.go @@ -248,7 +248,7 @@ func (a *amendCollector) collectIndexAmendOps(sctx sessionctx.Context, tblAtStar opInfo.schemaAndDecoder = newSchemaAndDecoder(sctx, tblAtStart.Meta()) fieldTypes := make([]*types.FieldType, 0, len(tblAtStart.Meta().Columns)) for _, col := range tblAtStart.Meta().Columns { - fieldTypes = append(fieldTypes, &col.FieldType) + fieldTypes = append(fieldTypes, &(col.FieldType)) } opInfo.chk = chunk.NewChunkWithCapacity(fieldTypes, 4) addNewIndexOp := &amendOperationAddIndex{ diff --git a/session/session_test/session_test.go b/session/session_test/session_test.go index ba6ebf112c6e8..bea9991b59046 100644 --- a/session/session_test/session_test.go +++ b/session/session_test/session_test.go @@ -3654,7 +3654,7 @@ func TestAutocommit(t *testing.T) { tk1 := testkit.NewTestKit(t, store) tk1.MustExec("use test") tk1.MustExec("insert into t select 1") - + //nolint:all_revive,revive tk.MustQuery("select * from t").Check(testkit.Rows("1")) tk.MustExec("delete from t") diff --git a/sessionctx/variable/mock_globalaccessor_test.go b/sessionctx/variable/mock_globalaccessor_test.go index 810878aed3b7e..073e6f73b4836 100644 --- a/sessionctx/variable/mock_globalaccessor_test.go +++ b/sessionctx/variable/mock_globalaccessor_test.go @@ -45,5 +45,4 @@ func TestMockAPI(t *testing.T) { require.NoError(t, err) err = mock.SetGlobalSysVarOnly(DefaultAuthPlugin, "mysql_native_password") require.NoError(t, err) - } diff --git a/sessionctx/variable/sysvar.go b/sessionctx/variable/sysvar.go index c66154bc803d4..8acfa65d37138 100644 --- a/sessionctx/variable/sysvar.go +++ b/sessionctx/variable/sysvar.go @@ -1389,8 +1389,7 @@ var defaultSysVars = []*SysVar{ }, SetSession: func(s *SessionVars, val string) error { s.AllowFallbackToTiKV = make(map[kv.StoreType]struct{}) for _, engine := range strings.Split(val, ",") { - switch engine { - case kv.TiFlash.Name(): + if engine == kv.TiFlash.Name() { s.AllowFallbackToTiKV[kv.TiFlash] = struct{}{} } } @@ -1409,11 +1408,9 @@ var defaultSysVars = []*SysVar{ return nil }}, {Scope: ScopeGlobal | ScopeSession, Name: TiDBEnableNoopFuncs, Value: DefTiDBEnableNoopFuncs, Type: TypeEnum, PossibleValues: []string{Off, On, Warn}, Validation: func(vars *SessionVars, normalizedValue string, originalValue string, scope ScopeFlag) (string, error) { - // The behavior is very weird if someone can turn TiDBEnableNoopFuncs OFF, but keep any of the following on: // TxReadOnly, TransactionReadOnly, OfflineMode, SuperReadOnly, serverReadOnly, SQLAutoIsNull // To prevent this strange position, prevent setting to OFF when any of these sysVars are ON of the same scope. - if normalizedValue == Off { for _, potentialIncompatibleSysVar := range []string{TxReadOnly, TransactionReadOnly, OfflineMode, SuperReadOnly, ReadOnly, SQLAutoIsNull} { val, _ := vars.GetSystemVar(potentialIncompatibleSysVar) // session scope diff --git a/sessionctx/variable/sysvar_test.go b/sessionctx/variable/sysvar_test.go index ebf0fd2587624..d397fd04932f9 100644 --- a/sessionctx/variable/sysvar_test.go +++ b/sessionctx/variable/sysvar_test.go @@ -394,7 +394,6 @@ func TestTimeZone(t *testing.T) { tz, err := parseTimeZone("UTC") require.NoError(t, err) require.Equal(t, tz, vars.TimeZone) - } func TestForeignKeyChecks(t *testing.T) { @@ -407,7 +406,6 @@ func TestForeignKeyChecks(t *testing.T) { warn := vars.StmtCtx.GetWarnings()[0].Err require.Equal(t, "[variable:8047]variable 'foreign_key_checks' does not yet support value: on", warn.Error()) - } func TestTxnIsolation(t *testing.T) { diff --git a/sessionctx/variable/variable.go b/sessionctx/variable/variable.go index 0266255c1e857..c62c5e02fadac 100644 --- a/sessionctx/variable/variable.go +++ b/sessionctx/variable/variable.go @@ -519,8 +519,7 @@ func (sv *SysVar) SkipInit() bool { } // These a special "Global-only" sysvars that for backward compatibility // are currently cached in the session. Please don't add to this list. - switch sv.Name { - case TiDBRowFormatVersion: + if sv.Name == TiDBRowFormatVersion { return false } return !sv.HasSessionScope() diff --git a/sessionctx/variable/varsutil_test.go b/sessionctx/variable/varsutil_test.go index 15d16a5ca4295..e7cac6d952841 100644 --- a/sessionctx/variable/varsutil_test.go +++ b/sessionctx/variable/varsutil_test.go @@ -581,7 +581,6 @@ func TestValidate(t *testing.T) { } }) } - } func TestValidateStmtSummary(t *testing.T) { @@ -664,7 +663,6 @@ func TestConcurrencyVariables(t *testing.T) { require.Equal(t, wdConcurrency, vars.WindowConcurrency()) require.Equal(t, mjConcurrency, vars.MergeJoinConcurrency()) require.Equal(t, saConcurrency, vars.StreamAggConcurrency()) - } func TestHelperFuncs(t *testing.T) { diff --git a/sessiontxn/failpoint.go b/sessiontxn/failpoint.go index da63ac753870f..35b71d075a051 100644 --- a/sessiontxn/failpoint.go +++ b/sessiontxn/failpoint.go @@ -122,7 +122,7 @@ func TsoRequestCountInc(sctx sessionctx.Context) { if !ok { count = 0 } - count += 1 + count++ sctx.SetValue(TsoRequestCount, count) } diff --git a/sessiontxn/isolation/repeatable_read.go b/sessiontxn/isolation/repeatable_read.go index a70f882758951..538a66f398b4e 100644 --- a/sessiontxn/isolation/repeatable_read.go +++ b/sessiontxn/isolation/repeatable_read.go @@ -241,8 +241,12 @@ func (p *PessimisticRRTxnContextProvider) handleAfterPessimisticLockError(lockEr zap.Uint64("lockTS", deadlock.LockTs), zap.Stringer("lockKey", kv.Key(deadlock.LockKey)), zap.Uint64("deadlockKeyHash", deadlock.DeadlockKeyHash)) - } else if terror.ErrorEqual(kv.ErrWriteConflict, lockErr) { + // Always update forUpdateTS by getting a new timestamp from PD. + // If we use the conflict commitTS as the new forUpdateTS and async commit + // is used, the commitTS of this transaction may exceed the max timestamp + // that PD allocates. Then, the change may be invisible to a new transaction, + // which means linearizability is broken. errStr := lockErr.Error() forUpdateTS := txnCtx.GetForUpdateTS() @@ -250,11 +254,6 @@ func (p *PessimisticRRTxnContextProvider) handleAfterPessimisticLockError(lockEr zap.Uint64("txn", txnCtx.StartTS), zap.Uint64("forUpdateTS", forUpdateTS), zap.String("err", errStr)) - // Always update forUpdateTS by getting a new timestamp from PD. - // If we use the conflict commitTS as the new forUpdateTS and async commit - // is used, the commitTS of this transaction may exceed the max timestamp - // that PD allocates. Then, the change may be invisible to a new transaction, - // which means linearizability is broken. } else { // This branch: if err is not nil, always update forUpdateTS to avoid problem described below. // For nowait, when ErrLock happened, ErrLockAcquireFailAndNoWaitSet will be returned, and in the same txn diff --git a/sessiontxn/txn_context_test.go b/sessiontxn/txn_context_test.go index e47211ab34fa1..fee1ddcc4b51d 100644 --- a/sessiontxn/txn_context_test.go +++ b/sessiontxn/txn_context_test.go @@ -940,7 +940,6 @@ func TestTSOCmdCountForPrepareExecute(t *testing.T) { } count := sctx.Value(sessiontxn.TsoRequestCount) require.Equal(t, uint64(99), count) - } func TestTSOCmdCountForTextSql(t *testing.T) { diff --git a/statistics/builder.go b/statistics/builder.go index 1cc821de0b962..f8ada420edc0a 100644 --- a/statistics/builder.go +++ b/statistics/builder.go @@ -308,7 +308,7 @@ func BuildHistAndTopN( } // case 1, this value is equal to the last one: current count++ if bytes.Equal(cur, sampleBytes) { - curCnt += 1 + curCnt++ continue } // case 2, meet a different value: counting for the "current" is complete diff --git a/statistics/column.go b/statistics/column.go index 6b897d5e5655b..2976329faa277 100644 --- a/statistics/column.go +++ b/statistics/column.go @@ -221,7 +221,7 @@ func (c *Column) GetColumnRowCount(sctx sessionctx.Context, ranges []*ranger.Ran if !rg.LowExclude && !rg.HighExclude { // In this case, the row count is at most 1. if pkIsHandle { - rowCount += 1 + rowCount++ continue } var cnt float64 diff --git a/statistics/feedback.go b/statistics/feedback.go index daf771d0f89a4..064b3d676a9e4 100644 --- a/statistics/feedback.go +++ b/statistics/feedback.go @@ -799,8 +799,8 @@ func UpdateHistogramWithBucketCount(h *Histogram, feedback *QueryFeedback, stats // Update the NDV of primary key column. if feedback.Tp == PkType { hist.NDV = int64(hist.TotalRowCount()) - // If we maintained the NDV of bucket. We can also update the total ndv. } else if feedback.Tp == IndexType && statsVer == 2 { + // If we maintained the NDV of bucket. We can also update the total ndv. totNdv := int64(0) for _, bkt := range buckets { totNdv += bkt.Ndv diff --git a/statistics/handle/handle.go b/statistics/handle/handle.go index 2f4d313446f24..e6e9a1cfb6799 100644 --- a/statistics/handle/handle.go +++ b/statistics/handle/handle.go @@ -274,11 +274,11 @@ func (c *statsHealthyChange) update(add bool, statsHealthy int64) { } lastIDX := len(c.bucketDelta) - 1 if add { - c.bucketDelta[idx] += 1 - c.bucketDelta[lastIDX] += 1 + c.bucketDelta[idx]++ + c.bucketDelta[lastIDX]++ } else { - c.bucketDelta[idx] -= 1 - c.bucketDelta[lastIDX] -= 1 + c.bucketDelta[idx]-- + c.bucketDelta[lastIDX]-- } } diff --git a/statistics/handle/handle_hist.go b/statistics/handle/handle_hist.go index a8f388d08ec69..44423794fbbfa 100644 --- a/statistics/handle/handle_hist.go +++ b/statistics/handle/handle_hist.go @@ -233,7 +233,7 @@ func (h *Handle) handleOneItemTask(task *NeededItemTask, readerCtx *StatsReaderC return nil, nil } // refresh statsReader to get latest stats - h.getFreshStatsReader(readerCtx, ctx) + h.loadFreshStatsReader(readerCtx, ctx) t := time.Now() needUpdate := false wrapper, err = h.readStatsForOneItem(item, wrapper, readerCtx.reader) @@ -257,7 +257,7 @@ func (h *Handle) handleOneItemTask(task *NeededItemTask, readerCtx *StatsReaderC return nil, nil } -func (h *Handle) getFreshStatsReader(readerCtx *StatsReaderContext, ctx sqlexec.RestrictedSQLExecutor) { +func (h *Handle) loadFreshStatsReader(readerCtx *StatsReaderContext, ctx sqlexec.RestrictedSQLExecutor) { if readerCtx.reader == nil || readerCtx.createdTime.Add(h.Lease()).Before(time.Now()) { if readerCtx.reader != nil { err := h.releaseStatsReader(readerCtx.reader, ctx) diff --git a/statistics/handle/handle_test.go b/statistics/handle/handle_test.go index a461a23b751d6..884eada2107f6 100644 --- a/statistics/handle/handle_test.go +++ b/statistics/handle/handle_test.go @@ -3234,73 +3234,75 @@ func TestColumnCountFromStorage(t *testing.T) { func TestIncrementalModifyCountUpdate(t *testing.T) { for _, analyzeSnapshot := range []bool{true, false} { - store, dom, clean := testkit.CreateMockStoreAndDomain(t) - defer clean() - tk := testkit.NewTestKit(t, store) - tk.MustExec("use test") - if analyzeSnapshot { - tk.MustExec("set @@session.tidb_enable_analyze_snapshot = on") - } else { - tk.MustExec("set @@session.tidb_enable_analyze_snapshot = off") - } - tk.MustExec("create table t(a int)") - tk.MustExec("set @@session.tidb_analyze_version = 2") - h := dom.StatsHandle() - err := h.HandleDDLEvent(<-h.DDLEventCh()) - require.NoError(t, err) - tbl, err := dom.InfoSchema().TableByName(model.NewCIStr("test"), model.NewCIStr("t")) - require.NoError(t, err) - tblInfo := tbl.Meta() - tid := tblInfo.ID + func(analyzeSnapshot bool) { + store, dom, clean := testkit.CreateMockStoreAndDomain(t) + defer clean() + tk := testkit.NewTestKit(t, store) + tk.MustExec("use test") + if analyzeSnapshot { + tk.MustExec("set @@session.tidb_enable_analyze_snapshot = on") + } else { + tk.MustExec("set @@session.tidb_enable_analyze_snapshot = off") + } + tk.MustExec("create table t(a int)") + tk.MustExec("set @@session.tidb_analyze_version = 2") + h := dom.StatsHandle() + err := h.HandleDDLEvent(<-h.DDLEventCh()) + require.NoError(t, err) + tbl, err := dom.InfoSchema().TableByName(model.NewCIStr("test"), model.NewCIStr("t")) + require.NoError(t, err) + tblInfo := tbl.Meta() + tid := tblInfo.ID - tk.MustExec("insert into t values(1),(2),(3)") - require.NoError(t, h.DumpStatsDeltaToKV(handle.DumpAll)) - err = h.Update(dom.InfoSchema()) - require.NoError(t, err) - tk.MustExec("analyze table t") - tk.MustQuery(fmt.Sprintf("select count, modify_count from mysql.stats_meta where table_id = %d", tid)).Check(testkit.Rows( - "3 0", - )) + tk.MustExec("insert into t values(1),(2),(3)") + require.NoError(t, h.DumpStatsDeltaToKV(handle.DumpAll)) + err = h.Update(dom.InfoSchema()) + require.NoError(t, err) + tk.MustExec("analyze table t") + tk.MustQuery(fmt.Sprintf("select count, modify_count from mysql.stats_meta where table_id = %d", tid)).Check(testkit.Rows( + "3 0", + )) - tk.MustExec("begin") - txn, err := tk.Session().Txn(false) - require.NoError(t, err) - startTS := txn.StartTS() - tk.MustExec("commit") + tk.MustExec("begin") + txn, err := tk.Session().Txn(false) + require.NoError(t, err) + startTS := txn.StartTS() + tk.MustExec("commit") - tk.MustExec("insert into t values(4),(5),(6)") - require.NoError(t, h.DumpStatsDeltaToKV(handle.DumpAll)) - err = h.Update(dom.InfoSchema()) - require.NoError(t, err) + tk.MustExec("insert into t values(4),(5),(6)") + require.NoError(t, h.DumpStatsDeltaToKV(handle.DumpAll)) + err = h.Update(dom.InfoSchema()) + require.NoError(t, err) - // Simulate that the analyze would start before and finish after the second insert. - require.NoError(t, failpoint.Enable("github.com/pingcap/tidb/executor/injectAnalyzeSnapshot", fmt.Sprintf("return(%d)", startTS))) - require.NoError(t, failpoint.Enable("github.com/pingcap/tidb/executor/injectBaseCount", "return(3)")) - require.NoError(t, failpoint.Enable("github.com/pingcap/tidb/executor/injectBaseModifyCount", "return(0)")) - tk.MustExec("analyze table t") - if analyzeSnapshot { - // Check the count / modify_count changes during the analyze are not lost. - tk.MustQuery(fmt.Sprintf("select count, modify_count from mysql.stats_meta where table_id = %d", tid)).Check(testkit.Rows( - "6 3", - )) - // Check the histogram is correct for the snapshot analyze. - tk.MustQuery(fmt.Sprintf("select distinct_count from mysql.stats_histograms where table_id = %d", tid)).Check(testkit.Rows( - "3", - )) - } else { - // Since analyze use max ts to read data, it finds the row count is 6 and directly set count to 6 rather than incrementally update it. - // But it still incrementally updates modify_count. - tk.MustQuery(fmt.Sprintf("select count, modify_count from mysql.stats_meta where table_id = %d", tid)).Check(testkit.Rows( - "6 3", - )) - // Check the histogram is collected from the latest data rather than the snapshot at startTS. - tk.MustQuery(fmt.Sprintf("select distinct_count from mysql.stats_histograms where table_id = %d", tid)).Check(testkit.Rows( - "6", - )) - } - require.NoError(t, failpoint.Disable("github.com/pingcap/tidb/executor/injectAnalyzeSnapshot")) - require.NoError(t, failpoint.Disable("github.com/pingcap/tidb/executor/injectBaseCount")) - require.NoError(t, failpoint.Disable("github.com/pingcap/tidb/executor/injectBaseModifyCount")) + // Simulate that the analyze would start before and finish after the second insert. + require.NoError(t, failpoint.Enable("github.com/pingcap/tidb/executor/injectAnalyzeSnapshot", fmt.Sprintf("return(%d)", startTS))) + require.NoError(t, failpoint.Enable("github.com/pingcap/tidb/executor/injectBaseCount", "return(3)")) + require.NoError(t, failpoint.Enable("github.com/pingcap/tidb/executor/injectBaseModifyCount", "return(0)")) + tk.MustExec("analyze table t") + if analyzeSnapshot { + // Check the count / modify_count changes during the analyze are not lost. + tk.MustQuery(fmt.Sprintf("select count, modify_count from mysql.stats_meta where table_id = %d", tid)).Check(testkit.Rows( + "6 3", + )) + // Check the histogram is correct for the snapshot analyze. + tk.MustQuery(fmt.Sprintf("select distinct_count from mysql.stats_histograms where table_id = %d", tid)).Check(testkit.Rows( + "3", + )) + } else { + // Since analyze use max ts to read data, it finds the row count is 6 and directly set count to 6 rather than incrementally update it. + // But it still incrementally updates modify_count. + tk.MustQuery(fmt.Sprintf("select count, modify_count from mysql.stats_meta where table_id = %d", tid)).Check(testkit.Rows( + "6 3", + )) + // Check the histogram is collected from the latest data rather than the snapshot at startTS. + tk.MustQuery(fmt.Sprintf("select distinct_count from mysql.stats_histograms where table_id = %d", tid)).Check(testkit.Rows( + "6", + )) + } + require.NoError(t, failpoint.Disable("github.com/pingcap/tidb/executor/injectAnalyzeSnapshot")) + require.NoError(t, failpoint.Disable("github.com/pingcap/tidb/executor/injectBaseCount")) + require.NoError(t, failpoint.Disable("github.com/pingcap/tidb/executor/injectBaseModifyCount")) + }(analyzeSnapshot) } } diff --git a/statistics/index.go b/statistics/index.go index fcb8b5439d04a..6b8a88501c30e 100644 --- a/statistics/index.go +++ b/statistics/index.go @@ -239,7 +239,7 @@ func (idx *Index) GetRowCount(sctx sessionctx.Context, coll *HistColl, indexRang if fullLen { // At most 1 in this case. if idx.Info.Unique { - totalCount += 1 + totalCount++ continue } count := idx.equalRowCount(lb, realtimeRowCount) diff --git a/statistics/integration_test.go b/statistics/integration_test.go index a0656f69a9cfc..2a37f27fc146b 100644 --- a/statistics/integration_test.go +++ b/statistics/integration_test.go @@ -281,7 +281,7 @@ func TestExpBackoffEstimation(t *testing.T) { output [][]string ) integrationSuiteData := statistics.GetIntegrationSuiteData() - integrationSuiteData.GetTestCases(t, &input, &output) + integrationSuiteData.LoadTestCases(t, &input, &output) inputLen := len(input) // The test cases are: // Query a = 1, b = 1, c = 1, d >= 3 and d <= 5 separately. We got 5, 3, 2, 3. @@ -424,7 +424,7 @@ func TestNULLOnFullSampling(t *testing.T) { require.Equal(t, int64(3), col.NullCount) } integrationSuiteData := statistics.GetIntegrationSuiteData() - integrationSuiteData.GetTestCases(t, &input, &output) + integrationSuiteData.LoadTestCases(t, &input, &output) // Check the topn and buckets contains no null values. for i := 0; i < len(input); i++ { testdata.OnRecord(func() { diff --git a/statistics/selectivity_test.go b/statistics/selectivity_test.go index 3d773eb0b51c7..02ed35c6f031c 100644 --- a/statistics/selectivity_test.go +++ b/statistics/selectivity_test.go @@ -65,7 +65,7 @@ func TestCollationColumnEstimate(t *testing.T) { output [][]string ) statsSuiteData := statistics.GetStatsSuiteData() - statsSuiteData.GetTestCases(t, &input, &output) + statsSuiteData.LoadTestCases(t, &input, &output) for i := 0; i < len(input); i++ { testdata.OnRecord(func() { output[i] = testdata.ConvertRowsToStrings(tk.MustQuery(input[i]).Rows()) @@ -147,7 +147,7 @@ func TestOutOfRangeEstimation(t *testing.T) { Count float64 } statsSuiteData := statistics.GetStatsSuiteData() - statsSuiteData.GetTestCases(t, &input, &output) + statsSuiteData.LoadTestCases(t, &input, &output) increasedTblRowCount := int64(float64(statsTbl.Count) * 1.5) for i, ran := range input { count, err = col.GetColumnRowCount(sctx, getRange(ran.Start, ran.End), increasedTblRowCount, false) @@ -283,7 +283,7 @@ func TestPrimaryKeySelectivity(t *testing.T) { testKit.MustExec("create table t(a char(10) primary key, b int)") var input, output [][]string statsSuiteData := statistics.GetStatsSuiteData() - statsSuiteData.GetTestCases(t, &input, &output) + statsSuiteData.LoadTestCases(t, &input, &output) for i, ts := range input { for j, tt := range ts { if j != len(ts)-1 { @@ -360,7 +360,7 @@ func TestStatsVer2(t *testing.T) { output [][]string ) statsSuiteData := statistics.GetStatsSuiteData() - statsSuiteData.GetTestCases(t, &input, &output) + statsSuiteData.LoadTestCases(t, &input, &output) for i := range input { testdata.OnRecord(func() { output[i] = testdata.ConvertRowsToStrings(testKit.MustQuery(input[i]).Rows()) @@ -398,7 +398,7 @@ func TestTopNOutOfHist(t *testing.T) { output [][]string ) statsSuiteData := statistics.GetStatsSuiteData() - statsSuiteData.GetTestCases(t, &input, &output) + statsSuiteData.LoadTestCases(t, &input, &output) for i := range input { testdata.OnRecord(func() { output[i] = testdata.ConvertRowsToStrings(testKit.MustQuery(input[i]).Rows()) @@ -423,7 +423,7 @@ func TestColumnIndexNullEstimation(t *testing.T) { output [][]string ) statsSuiteData := statistics.GetStatsSuiteData() - statsSuiteData.GetTestCases(t, &input, &output) + statsSuiteData.LoadTestCases(t, &input, &output) for i := 0; i < 5; i++ { testdata.OnRecord(func() { output[i] = testdata.ConvertRowsToStrings(testKit.MustQuery(input[i]).Rows()) @@ -458,7 +458,7 @@ func TestUniqCompEqualEst(t *testing.T) { output [][]string ) statsSuiteData := statistics.GetStatsSuiteData() - statsSuiteData.GetTestCases(t, &input, &output) + statsSuiteData.LoadTestCases(t, &input, &output) for i := 0; i < 1; i++ { testdata.OnRecord(func() { output[i] = testdata.ConvertRowsToStrings(testKit.MustQuery(input[i]).Rows()) @@ -581,7 +581,7 @@ func TestDiscreteDistribution(t *testing.T) { ) statsSuiteData := statistics.GetStatsSuiteData() - statsSuiteData.GetTestCases(t, &input, &output) + statsSuiteData.LoadTestCases(t, &input, &output) for i, tt := range input { testdata.OnRecord(func() { @@ -606,7 +606,7 @@ func TestSelectCombinedLowBound(t *testing.T) { ) statsSuiteData := statistics.GetStatsSuiteData() - statsSuiteData.GetTestCases(t, &input, &output) + statsSuiteData.LoadTestCases(t, &input, &output) for i, tt := range input { testdata.OnRecord(func() { @@ -645,7 +645,7 @@ func TestDNFCondSelectivity(t *testing.T) { } ) statsSuiteData := statistics.GetStatsSuiteData() - statsSuiteData.GetTestCases(t, &input, &output) + statsSuiteData.LoadTestCases(t, &input, &output) for i, tt := range input { sctx := testKit.Session().(sessionctx.Context) stmts, err := session.Parse(sctx, tt) @@ -762,7 +762,7 @@ func TestSmallRangeEstimation(t *testing.T) { Count float64 } statsSuiteData := statistics.GetStatsSuiteData() - statsSuiteData.GetTestCases(t, &input, &output) + statsSuiteData.LoadTestCases(t, &input, &output) for i, ran := range input { count, err := col.GetColumnRowCount(sctx, getRange(ran.Start, ran.End), statsTbl.Count, false) require.NoError(t, err) @@ -913,7 +913,7 @@ func TestDefaultSelectivityForStrMatch(t *testing.T) { ) statsSuiteData := statistics.GetIntegrationSuiteData() - statsSuiteData.GetTestCases(t, &input, &output) + statsSuiteData.LoadTestCases(t, &input, &output) matchExplain, err := regexp.Compile("^explain") require.NoError(t, err) @@ -942,7 +942,7 @@ func TestTopNAssistedEstimationWithoutNewCollation(t *testing.T) { output []outputType ) statsSuiteData := statistics.GetIntegrationSuiteData() - statsSuiteData.GetTestCases(t, &input, &output) + statsSuiteData.LoadTestCases(t, &input, &output) testTopNAssistedEstimationInner(t, input, output, store, dom) } @@ -955,7 +955,7 @@ func TestTopNAssistedEstimationWithNewCollation(t *testing.T) { output []outputType ) statsSuiteData := statistics.GetIntegrationSuiteData() - statsSuiteData.GetTestCases(t, &input, &output) + statsSuiteData.LoadTestCases(t, &input, &output) testTopNAssistedEstimationInner(t, input, output, store, dom) } diff --git a/statistics/trace_test.go b/statistics/trace_test.go index 0d27b09811e5b..f9dc92d00ab1f 100644 --- a/statistics/trace_test.go +++ b/statistics/trace_test.go @@ -52,7 +52,7 @@ func TestTraceCE(t *testing.T) { } ) traceSuiteData := statistics.GetTraceSuiteData() - traceSuiteData.GetTestCases(t, &in, &out) + traceSuiteData.LoadTestCases(t, &in, &out) // Load needed statistics. for _, tt := range in { diff --git a/store/copr/batch_coprocessor.go b/store/copr/batch_coprocessor.go index 859011771106c..7cf36d958ab59 100644 --- a/store/copr/batch_coprocessor.go +++ b/store/copr/batch_coprocessor.go @@ -406,7 +406,7 @@ func balanceBatchCopTask(ctx context.Context, kvStore *kvStore, originalTasks [] // if more than one store is valid, put the region // to store candidate map totalRegionCandidateNum += validStoreNum - totalRemainingRegionNum += 1 + totalRemainingRegionNum++ candidateRegionInfos = append(candidateRegionInfos, ri) taskKey := ri.Region.String() for _, storeID := range ri.AllStores { diff --git a/store/copr/batch_coprocessor_test.go b/store/copr/batch_coprocessor_test.go index 95c619e3c1b0e..aafa19071a392 100644 --- a/store/copr/batch_coprocessor_test.go +++ b/store/copr/batch_coprocessor_test.go @@ -129,7 +129,6 @@ func TestBalanceBatchCopTaskWithEmptyTaskSet(t *testing.T) { require.True(t, emptyResult != nil) require.True(t, len(emptyResult) == 0) } - } func TestDeepCopyStoreTaskMap(t *testing.T) { diff --git a/store/copr/coprocessor_cache.go b/store/copr/coprocessor_cache.go index 2ee1ccffe2547..baa8cec328ca3 100644 --- a/store/copr/coprocessor_cache.go +++ b/store/copr/coprocessor_cache.go @@ -113,7 +113,7 @@ func coprCacheBuildKey(copReq *coprocessor.Request) ([]byte, error) { totalLength += 2 + len(r.Start) + 2 + len(r.End) } if copReq.PagingSize > 0 { - totalLength += 1 + totalLength++ } key := make([]byte, totalLength) diff --git a/store/driver/txn/error.go b/store/driver/txn/error.go index 86c8f0262a322..3f3a769180945 100644 --- a/store/driver/txn/error.go +++ b/store/driver/txn/error.go @@ -72,7 +72,7 @@ func extractKeyExistsErrFromHandle(key kv.Key, value []byte, tblInfo *model.Tabl cols := make(map[int64]*types.FieldType, len(tblInfo.Columns)) for _, col := range tblInfo.Columns { - cols[col.ID] = &col.FieldType + cols[col.ID] = &(col.FieldType) } handleColIDs := make([]int64, 0, len(idxInfo.Columns)) for _, col := range idxInfo.Columns { diff --git a/store/gcworker/gc_worker.go b/store/gcworker/gc_worker.go index a92858befc8fc..92c3b535ba5d3 100644 --- a/store/gcworker/gc_worker.go +++ b/store/gcworker/gc_worker.go @@ -1350,10 +1350,10 @@ func (w *GCWorker) resolveLocksPhysical(ctx context.Context, safePoint uint64) e delete(stores, store) } // The store is checked and has been resolved before. + // If the store is checked and not resolved, we can retry to resolve it again, so leave it in dirtyStores. if _, ok := dirtyStores[store]; !ok { delete(stores, store) } - // If the store is checked and not resolved, we can retry to resolve it again, so leave it in dirtyStores. } else if _, ok := registeredStores[store]; ok { // The store has been registered and it's dirty due to too many collected locks. Fall back to legacy mode. // We can't remove the lock observer from the store and retry the whole procedure because if the store diff --git a/store/gcworker/gc_worker_test.go b/store/gcworker/gc_worker_test.go index c453545a4362b..4460bcce4048c 100644 --- a/store/gcworker/gc_worker_test.go +++ b/store/gcworker/gc_worker_test.go @@ -1448,7 +1448,7 @@ func TestMergeLockScanner(t *testing.T) { for _, useMock := range []bool{false, true} { channel := makeMergedChannel - if useMock == true { + if useMock { channel = s.makeMergedMockClient } diff --git a/store/helper/helper_test.go b/store/helper/helper_test.go index acd9bac0e5290..4174735058ac4 100644 --- a/store/helper/helper_test.go +++ b/store/helper/helper_test.go @@ -202,7 +202,6 @@ func mockHotRegionResponse(w http.ResponseWriter, _ *http.Request) { if err != nil { log.Panic("write http response failed", zap.Error(err)) } - } func getMockRegionsTableInfoSchema() []*model.DBInfo { diff --git a/store/mockstore/unistore/cophandler/cop_handler_test.go b/store/mockstore/unistore/cophandler/cop_handler_test.go index 127b233b98a82..0dc25790e3a44 100644 --- a/store/mockstore/unistore/cophandler/cop_handler_test.go +++ b/store/mockstore/unistore/cophandler/cop_handler_test.go @@ -553,7 +553,6 @@ func createTestDB(dbPath, LogPath string) (*badger.DB, error) { } func BenchmarkExecutors(b *testing.B) { - prepare := func(rows, limit int) (dagReq *tipb.DAGRequest, dagCtx *dagContext, clean func() error) { data, err := prepareTestTableData(rows, tableID) if err != nil { diff --git a/store/mockstore/unistore/cophandler/mpp_exec.go b/store/mockstore/unistore/cophandler/mpp_exec.go index 3e164da24d458..d0d0a71d5b85a 100644 --- a/store/mockstore/unistore/cophandler/mpp_exec.go +++ b/store/mockstore/unistore/cophandler/mpp_exec.go @@ -360,7 +360,7 @@ func (e *indexScanExec) open() error { func (e *indexScanExec) next() (*chunk.Chunk, error) { if e.chkIdx < len(e.chunks) { - e.chkIdx += 1 + e.chkIdx++ e.execSummary.updateOnlyRows(e.chunks[e.chkIdx-1].NumRows()) if e.paging != nil { if e.desc { diff --git a/store/mockstore/unistore/tikv/detector.go b/store/mockstore/unistore/tikv/detector.go index a804ec4bfe27a..bbff15e42034b 100644 --- a/store/mockstore/unistore/tikv/detector.go +++ b/store/mockstore/unistore/tikv/detector.go @@ -203,7 +203,6 @@ func (d *Detector) CleanUpWaitFor(txn, waitForTxn, keyHash uint64) { } } d.lock.Unlock() - } // activeExpire removes expired entries, should be called under d.lock protection diff --git a/table/tables/mutation_checker.go b/table/tables/mutation_checker.go index b3d304f5389d7..6a04b59d0d3f9 100644 --- a/table/tables/mutation_checker.go +++ b/table/tables/mutation_checker.go @@ -373,7 +373,7 @@ func getOrBuildColumnMaps( for _, col := range t.Meta().Columns { maps.ColumnIDToInfo[col.ID] = col - maps.ColumnIDToFieldType[col.ID] = &col.FieldType + maps.ColumnIDToFieldType[col.ID] = &(col.FieldType) } for _, index := range t.Indices() { if index.Meta().Primary && t.meta.IsCommonHandle { @@ -412,7 +412,7 @@ func corruptMutations(t *TableCommon, txn kv.Transaction, sh kv.StagingHandle, c indexMutation := indexMutations[0] key := make([]byte, len(indexMutation.key)) copy(key, indexMutation.key) - key[len(key)-1] += 1 + key[len(key)-1]++ if len(indexMutation.value) == 0 { if err := memBuffer.Delete(key); err != nil { return errors.Trace(err) @@ -438,7 +438,7 @@ func corruptMutations(t *TableCommon, txn kv.Transaction, sh kv.StagingHandle, c indexMutation := indexMutations[0] key := indexMutation.key memBuffer.RemoveFromBuffer(key) - key[len(key)-1] += 1 + key[len(key)-1]++ if len(indexMutation.value) == 0 { if err := memBuffer.Delete(key); err != nil { return errors.Trace(err) @@ -459,7 +459,7 @@ func corruptMutations(t *TableCommon, txn kv.Transaction, sh kv.StagingHandle, c indexMutation := indexMutations[0] value := indexMutation.value if len(value) > 0 { - value[len(value)-1] += 1 + value[len(value)-1]++ if err := memBuffer.Set(indexMutation.key, value); err != nil { return errors.Trace(err) } diff --git a/table/tables/partition_test.go b/table/tables/partition_test.go index 7efddb2052c06..9d88b82b712c0 100644 --- a/table/tables/partition_test.go +++ b/table/tables/partition_test.go @@ -686,7 +686,6 @@ func TestIssue31629(t *testing.T) { } for i, tt := range tests { - createTable := "create table t1 " + tt.create res, err := tk.Exec(createTable) if res != nil { diff --git a/table/tables/tables.go b/table/tables/tables.go index 9b35a1af8cc3b..17175f170bb24 100644 --- a/table/tables/tables.go +++ b/table/tables/tables.go @@ -1477,7 +1477,7 @@ func AllocHandle(ctx context.Context, sctx sessionctx.Context, t table.Table) (k if stmtCtx := sctx.GetSessionVars().StmtCtx; stmtCtx != nil { // First try to alloc if the statement has reserved auto ID. if stmtCtx.BaseRowID < stmtCtx.MaxRowID { - stmtCtx.BaseRowID += 1 + stmtCtx.BaseRowID++ return kv.IntHandle(stmtCtx.BaseRowID), nil } } diff --git a/testkit/testdata/testdata.go b/testkit/testdata/testdata.go index 17b647346709b..d455e02babe73 100644 --- a/testkit/testdata/testdata.go +++ b/testkit/testdata/testdata.go @@ -128,8 +128,8 @@ func ConvertSQLWarnToStrings(warns []stmtctx.SQLWarn) (rs []string) { return rs } -// GetTestCases gets the test cases for a test function. -func (td *TestData) GetTestCases(t *testing.T, in interface{}, out interface{}) { +// LoadTestCases Loads the test cases for a test function. +func (td *TestData) LoadTestCases(t *testing.T, in interface{}, out interface{}) { // Extract caller's name. pc, _, _, ok := runtime.Caller(1) require.True(t, ok) @@ -155,8 +155,8 @@ func (td *TestData) GetTestCases(t *testing.T, in interface{}, out interface{}) td.output[casesIdx].decodedOut = out } -// GetTestCasesByName gets the test cases for a test function by its name. -func (td *TestData) GetTestCasesByName(caseName string, t *testing.T, in interface{}, out interface{}) { +// LoadTestCasesByName loads the test cases for a test function by its name. +func (td *TestData) LoadTestCasesByName(caseName string, t *testing.T, in interface{}, out interface{}) { casesIdx, ok := td.funcMap[caseName] require.Truef(t, ok, "Case name: %s", caseName) require.NoError(t, json.Unmarshal(*td.input[casesIdx].Cases, in)) diff --git a/testkit/testkit.go b/testkit/testkit.go index 4b47dd61f4b96..d92163b8f8fbe 100644 --- a/testkit/testkit.go +++ b/testkit/testkit.go @@ -143,7 +143,6 @@ func (tk *TestKit) MustPartitionByList(sql string, partitions []string, args ... partitions = append(partitions[:index], partitions[index+1:]...) } } - } if !ok { tk.require.Len(partitions, 0) diff --git a/tidb-binlog/node/registry_test.go b/tidb-binlog/node/registry_test.go index 4991d053afc95..7e5f126f35031 100644 --- a/tidb-binlog/node/registry_test.go +++ b/tidb-binlog/node/registry_test.go @@ -81,13 +81,6 @@ func TestRegisterNode(t *testing.T) { err = r.UpdateNode(context.Background(), nodePrefix, ns) require.NoError(t, err) mustEqualStatus(t, r, ns.NodeID, ns) - - // TODO: now don't have function to delete node, maybe do it later - //err = r.UnregisterNode(context.Background(), nodePrefix, ns.NodeID) - //require.NoError(t, err) - //exist, err := r.checkNodeExists(context.Background(), nodePrefix, ns.NodeID) - //require.NoError(t, err) - //require.False(t, exist) } func TestRefreshNode(t *testing.T) { @@ -109,11 +102,6 @@ func TestRefreshNode(t *testing.T) { ns.IsAlive = true mustEqualStatus(t, r, ns.NodeID, ns) - - // TODO: fix it later - //time.Sleep(2 * time.Second) - //ns.IsAlive = false - //mustEqualStatus(t, r, ns.NodeID, ns) } func mustEqualStatus(t *testing.T, r RegisrerTestClient, nodeID string, status *Status) { diff --git a/types/const_test.go b/types/const_test.go index 62a4d2183eae1..c97639f9ab9c2 100644 --- a/types/const_test.go +++ b/types/const_test.go @@ -392,7 +392,6 @@ func TestIgnoreSpaceMode(t *testing.T) { require.Error(t, err) tk.MustExec("CREATE TABLE test.NOW(a bigint);") tk.MustExec("DROP TABLE NOW;") - } func TestNoBackslashEscapesMode(t *testing.T) { diff --git a/types/convert_test.go b/types/convert_test.go index 88a60171ed3ad..f11daa6d22acc 100644 --- a/types/convert_test.go +++ b/types/convert_test.go @@ -904,7 +904,7 @@ func TestGetValidInt(t *testing.T) { if tt.warning { require.Lenf(t, warnings, warningCount+1, "%d", i) require.True(t, terror.ErrorEqual(warnings[len(warnings)-1].Err, ErrTruncatedWrongVal)) - warningCount += 1 + warningCount++ } else { require.Len(t, warnings, warningCount) } diff --git a/types/json/binary_test.go b/types/json/binary_test.go index 4142c9cdf150f..d14fcc5416db5 100644 --- a/types/json/binary_test.go +++ b/types/json/binary_test.go @@ -479,7 +479,6 @@ func TestCreateBinary(t *testing.T) { bj = CreateBinary(int8(123)) require.Equal(t, bj.TypeCode, bj.TypeCode) }() - } func TestFunctions(t *testing.T) { diff --git a/types/time_test.go b/types/time_test.go index 13e9191ec4175..af88a18ea1fcf 100644 --- a/types/time_test.go +++ b/types/time_test.go @@ -1679,7 +1679,6 @@ func TestParseDurationValue(t *testing.T) { require.True(t, col.err.Equal(err)) } } - } func TestIsClockUnit(t *testing.T) { @@ -2084,7 +2083,6 @@ func TestFromGoTime(t *testing.T) { t1 := types.FromGoTime(v) require.Equalf(t, types.FromDate(ca.yy, ca.mm, ca.dd, ca.hh, ca.min, ca.sec, ca.micro), t1, "idx %d", ith) } - } func TestGetTimezone(t *testing.T) { diff --git a/util/chunk/chunk_test.go b/util/chunk/chunk_test.go index f327ad704b268..413974acc30b6 100644 --- a/util/chunk/chunk_test.go +++ b/util/chunk/chunk_test.go @@ -538,7 +538,6 @@ func TestCopyTo(t *testing.T) { cmpFunc := GetCompareFunc(allTypes[i]) require.Zero(t, cmpFunc(row, i, r1, i)) } - } } diff --git a/util/chunk/column_test.go b/util/chunk/column_test.go index 4ef6d2f2a1526..98dd20322c5c6 100644 --- a/util/chunk/column_test.go +++ b/util/chunk/column_test.go @@ -271,7 +271,6 @@ func TestMyDecimal(t *testing.T) { types.DecimalAdd(&ds[i], d, &ds[i]) require.NoError(t, err) - } it := NewIterator4Chunk(chk) diff --git a/util/dbutil/common_test.go b/util/dbutil/common_test.go index b306855d1ba1e..3e2079e448912 100644 --- a/util/dbutil/common_test.go +++ b/util/dbutil/common_test.go @@ -50,7 +50,6 @@ func TestReplacePlaceholder(t *testing.T) { str := ReplacePlaceholder(testCase.originStr, testCase.args) require.Equal(t, testCase.expectStr, str) } - } func TestTableName(t *testing.T) { diff --git a/util/deadlockhistory/deadlock_history_test.go b/util/deadlockhistory/deadlock_history_test.go index c7bde7728fa65..94ed2e893a7d4 100644 --- a/util/deadlockhistory/deadlock_history_test.go +++ b/util/deadlockhistory/deadlock_history_test.go @@ -46,7 +46,6 @@ func getAllDatum(d *DeadlockHistory, columns []*model.ColumnInfo) [][]types.Datu } return rows - } func TestDeadlockHistoryCollection(t *testing.T) { diff --git a/util/expensivequery/memory_usage_alarm.go b/util/expensivequery/memory_usage_alarm.go index 8ce19c6264640..985a51b79aeb8 100644 --- a/util/expensivequery/memory_usage_alarm.go +++ b/util/expensivequery/memory_usage_alarm.go @@ -220,34 +220,44 @@ func (record *memoryUsageAlarm) recordSQL(sm util.SessionManager) { }) } +type item struct { + Name string + Debug int +} + func (record *memoryUsageAlarm) recordProfile() { - items := []struct { - name string - debug int - }{ - {name: "heap"}, - {name: "goroutine", debug: 2}, + items := []item{ + {Name: "heap"}, + {Name: "goroutine", Debug: 2}, } for i, item := range items { - fileName := filepath.Join(record.tmpDir, item.name+record.lastCheckTime.Format(time.RFC3339)) - record.lastProfileFileName[i] = append(record.lastProfileFileName[i], fileName) - f, err := os.Create(fileName) + err := record.write(i, item) if err != nil { - logutil.BgLogger().Error(fmt.Sprintf("create %v profile file fail", item.name), zap.Error(err)) return } - //nolint: revive - defer func() { - err := f.Close() - if err != nil { - logutil.BgLogger().Error(fmt.Sprintf("close %v profile file fail", item.name), zap.Error(err)) - } - }() - p := rpprof.Lookup(item.name) - err = p.WriteTo(f, item.debug) + } +} + +func (record *memoryUsageAlarm) write(i int, item item) error { + fileName := filepath.Join(record.tmpDir, item.Name+record.lastCheckTime.Format(time.RFC3339)) + record.lastProfileFileName[i] = append(record.lastProfileFileName[i], fileName) + f, err := os.Create(fileName) + if err != nil { + logutil.BgLogger().Error(fmt.Sprintf("create %v profile file fail", item.Name), zap.Error(err)) + return err + } + //nolint: revive + defer func() { + err := f.Close() if err != nil { - logutil.BgLogger().Error(fmt.Sprintf("write %v profile file fail", item.name), zap.Error(err)) - return + logutil.BgLogger().Error(fmt.Sprintf("close %v profile file fail", item.Name), zap.Error(err)) } + }() + p := rpprof.Lookup(item.Name) + err = p.WriteTo(f, item.Debug) + if err != nil { + logutil.BgLogger().Error(fmt.Sprintf("write %v profile file fail", item.Name), zap.Error(err)) + return err } + return nil } diff --git a/util/fastrand/random_test.go b/util/fastrand/random_test.go index 6eef9eb11ac26..ad2cba5d2c90f 100644 --- a/util/fastrand/random_test.go +++ b/util/fastrand/random_test.go @@ -35,7 +35,7 @@ func TestRand(t *testing.T) { } sum := 0 for i := 0; i < 256; i++ { - if arr[i] == false { + if !arr[i] { sum++ } } diff --git a/util/generatedexpr/generated_expr.go b/util/generatedexpr/generated_expr.go index 1af0fb1adac7b..aff83341cfd6d 100644 --- a/util/generatedexpr/generated_expr.go +++ b/util/generatedexpr/generated_expr.go @@ -39,7 +39,7 @@ func (*nameResolver) Enter(inNode ast.Node) (ast.Node, bool) { // Leave implements ast.Visitor interface. func (nr *nameResolver) Leave(inNode ast.Node) (node ast.Node, ok bool) { - //nolint:revive + //nolint: revive,all_revive switch v := inNode.(type) { case *ast.ColumnNameExpr: for _, col := range nr.tableInfo.Columns { diff --git a/util/localpool/localpool_test.go b/util/localpool/localpool_test.go index 1088457c95090..cd338e6f7964e 100644 --- a/util/localpool/localpool_test.go +++ b/util/localpool/localpool_test.go @@ -57,14 +57,3 @@ func TestPool(t *testing.T) { require.Greater(t, getHit, getMiss) require.Greater(t, putHit, putMiss) } - -func GetAndPut(pool *LocalPool) { - objs := make([]interface{}, rand.Intn(4)+1) - for i := 0; i < len(objs); i++ { - objs[i] = pool.Get() - } - runtime.Gosched() - for _, obj := range objs { - pool.Put(obj) - } -} diff --git a/util/memory/tracker_test.go b/util/memory/tracker_test.go index 58e4395470994..5425bb032162d 100644 --- a/util/memory/tracker_test.go +++ b/util/memory/tracker_test.go @@ -448,7 +448,6 @@ func TestGlobalTracker(t *testing.T) { }() c2.AttachTo(commonTracker) c2.DetachFromGlobalTracker() - } func parseByteUnit(str string) (int64, error) { diff --git a/util/misc.go b/util/misc.go index cad205c1c746f..4a076cca04ddd 100644 --- a/util/misc.go +++ b/util/misc.go @@ -102,7 +102,8 @@ func WithRecovery(exec func(), recoverFn func(r interface{})) { // recoverFn: Handler will be called after recover and before dump stack, passing `nil` means noop. // quit: If this value is true, the current program exits after recovery. func Recover(metricsLabel, funcInfo string, recoverFn func(), quit bool) { - r := recover() //nolint: revive + //nolint: revive + r := recover() if r == nil { return } diff --git a/util/ranger/ranger_test.go b/util/ranger/ranger_test.go index 9a35b94140a80..db16379f53d8a 100644 --- a/util/ranger/ranger_test.go +++ b/util/ranger/ranger_test.go @@ -879,7 +879,7 @@ func TestCompIndexInExprCorrCol(t *testing.T) { SQL string Result []string } - rangerSuiteData.GetTestCases(t, &input, &output) + rangerSuiteData.LoadTestCases(t, &input, &output) for i, tt := range input { testdata.OnRecord(func() { output[i].SQL = tt @@ -906,7 +906,7 @@ func TestIndexStringIsTrueRange(t *testing.T) { SQL string Result []string } - rangerSuiteData.GetTestCases(t, &input, &output) + rangerSuiteData.LoadTestCases(t, &input, &output) for i, tt := range input { testdata.OnRecord(func() { output[i].SQL = tt @@ -933,7 +933,7 @@ func TestCompIndexDNFMatch(t *testing.T) { Plan []string Result []string } - rangerSuiteData.GetTestCases(t, &input, &output) + rangerSuiteData.LoadTestCases(t, &input, &output) for i, tt := range input { testdata.OnRecord(func() { output[i].SQL = tt @@ -963,7 +963,7 @@ func TestCompIndexMultiColDNF1(t *testing.T) { Plan []string Result []string } - rangerSuiteData.GetTestCases(t, &input, &output) + rangerSuiteData.LoadTestCases(t, &input, &output) for i, tt := range input { testdata.OnRecord(func() { output[i].SQL = tt @@ -993,7 +993,7 @@ func TestCompIndexMultiColDNF2(t *testing.T) { Plan []string Result []string } - rangerSuiteData.GetTestCases(t, &input, &output) + rangerSuiteData.LoadTestCases(t, &input, &output) for i, tt := range input { testdata.OnRecord(func() { output[i].SQL = tt @@ -1021,7 +1021,7 @@ func TestPrefixIndexMultiColDNF(t *testing.T) { Plan []string Result []string } - rangerSuiteData.GetTestCases(t, &input, &output) + rangerSuiteData.LoadTestCases(t, &input, &output) inputLen := len(input) for i, tt := range input { testdata.OnRecord(func() { @@ -1060,7 +1060,7 @@ func TestIndexRangeForBit(t *testing.T) { Plan []string Result []string } - rangerSuiteData.GetTestCases(t, &input, &output) + rangerSuiteData.LoadTestCases(t, &input, &output) for i, tt := range input { testdata.OnRecord(func() { output[i].SQL = tt @@ -1317,7 +1317,7 @@ func TestIndexRangeForDecimal(t *testing.T) { Plan []string Result []string } - rangerSuiteData.GetTestCases(t, &input, &output) + rangerSuiteData.LoadTestCases(t, &input, &output) for i, tt := range input { testdata.OnRecord(func() { output[i].SQL = tt @@ -1351,7 +1351,7 @@ func TestPrefixIndexAppendPointRanges(t *testing.T) { Plan []string Result []string } - rangerSuiteData.GetTestCases(t, &input, &output) + rangerSuiteData.LoadTestCases(t, &input, &output) for i, tt := range input { testdata.OnRecord(func() { output[i].SQL = tt @@ -1988,7 +1988,6 @@ func TestTableShardIndex(t *testing.T) { } func TestShardIndexFuncSuites(t *testing.T) { - store, clean := testkit.CreateMockStore(t) defer clean() testKit := testkit.NewTestKit(t, store) diff --git a/util/schemacmp/lattice_test.go b/util/schemacmp/lattice_test.go index 28b4e63f7453d..fa0a1e546b6a7 100644 --- a/util/schemacmp/lattice_test.go +++ b/util/schemacmp/lattice_test.go @@ -550,7 +550,6 @@ func TestCompatibilities(t *testing.T) { } for _, tc := range testCases { - cmp, err := tc.a.Compare(tc.b) if len(tc.compareError) != 0 { require.IsType(t, &IncompatibleError{}, err) diff --git a/util/schemacmp/type_test.go b/util/schemacmp/type_test.go index 044a19635429a..8345b2c7c8b57 100644 --- a/util/schemacmp/type_test.go +++ b/util/schemacmp/type_test.go @@ -232,7 +232,6 @@ func TestTypeCompareJoin(t *testing.T) { t.Log(cmp) } require.Regexp(t, tc.compareError, err) - } else { require.NoError(t, err) require.Equal(t, tc.compareResult, cmp) diff --git a/util/sqlexec/utils_test.go b/util/sqlexec/utils_test.go index 7ca0e7e468898..c3ed1d433331a 100644 --- a/util/sqlexec/utils_test.go +++ b/util/sqlexec/utils_test.go @@ -401,7 +401,6 @@ func TestEscapeSQL(t *testing.T) { require.Equal(t, v.output, r2) require.NoError(t, e3) require.Equal(t, v.output, r3.String()) - } else { require.Error(t, e1) require.Regexp(t, v.err, e1.Error()) diff --git a/util/stmtsummary/statement_summary_test.go b/util/stmtsummary/statement_summary_test.go index a883c4aa51c28..516af9df23b95 100644 --- a/util/stmtsummary/statement_summary_test.go +++ b/util/stmtsummary/statement_summary_test.go @@ -272,7 +272,7 @@ func TestAddStatement(t *testing.T) { expectedSummaryElement.maxPrewriteRegionNum = stmtExecInfo2.ExecDetail.CommitDetail.PrewriteRegionNum expectedSummaryElement.sumTxnRetry += int64(stmtExecInfo2.ExecDetail.CommitDetail.TxnRetry) expectedSummaryElement.maxTxnRetry = stmtExecInfo2.ExecDetail.CommitDetail.TxnRetry - expectedSummaryElement.sumBackoffTimes += 1 + expectedSummaryElement.sumBackoffTimes++ expectedSummaryElement.backoffTypes[boTxnLockName] = 1 expectedSummaryElement.sumMem += stmtExecInfo2.MemMax expectedSummaryElement.maxMem = stmtExecInfo2.MemMax @@ -387,7 +387,7 @@ func TestAddStatement(t *testing.T) { expectedSummaryElement.sumWriteSize += int64(stmtExecInfo3.ExecDetail.CommitDetail.WriteSize) expectedSummaryElement.sumPrewriteRegionNum += int64(stmtExecInfo3.ExecDetail.CommitDetail.PrewriteRegionNum) expectedSummaryElement.sumTxnRetry += int64(stmtExecInfo3.ExecDetail.CommitDetail.TxnRetry) - expectedSummaryElement.sumBackoffTimes += 1 + expectedSummaryElement.sumBackoffTimes++ expectedSummaryElement.backoffTypes[boTxnLockName] = 2 expectedSummaryElement.sumMem += stmtExecInfo3.MemMax expectedSummaryElement.sumDisk += stmtExecInfo3.DiskMax