diff --git a/.github/pull_request_template.md b/.github/pull_request_template.md index 4df6788fa69..d64b903faaa 100644 --- a/.github/pull_request_template.md +++ b/.github/pull_request_template.md @@ -1,4 +1,5 @@ + ## Related Issue(s) - + -- ## Checklist -- [ ] Should this PR be backported? - [ ] Tests were added or are not required - [ ] Documentation was added or is not required ## Deployment Notes - - -## Impacted Areas in Vitess -Components that this PR will affect: - -- [ ] Query Serving -- [ ] VReplication -- [ ] Cluster Management -- [ ] Build/CI -- [ ] VTAdmin + \ No newline at end of file diff --git a/.github/workflows/check_formatting.yml b/.github/workflows/check_formatting.yml index 9c64b18ced4..7801f8e883d 100644 --- a/.github/workflows/check_formatting.yml +++ b/.github/workflows/check_formatting.yml @@ -4,13 +4,23 @@ jobs: build: name: Check Formatting - runs-on: ubuntu-18.04 + runs-on: ubuntu-latest steps: - name: Set up Go uses: actions/setup-go@v1 with: - go-version: 1.15 + go-version: 1.16 + + - name: Tune the OS + run: | + echo '1024 65535' | sudo tee -a /proc/sys/net/ipv4/ip_local_port_range + + # TEMPORARY WHILE GITHUB FIXES THIS https://github.com/actions/virtual-environments/issues/3185 + - name: Add the current IP address, long hostname and short hostname record to /etc/hosts file + run: | + echo -e "$(ip addr show eth0 | grep "inet\b" | awk '{print $2}' | cut -d/ -f1)\t$(hostname -f) $(hostname -s)" | sudo tee -a /etc/hosts + # DON'T FORGET TO REMOVE CODE ABOVE WHEN ISSUE IS ADRESSED! - name: Check out code uses: actions/checkout@v2 diff --git a/.github/workflows/check_make_parser.yml b/.github/workflows/check_make_parser.yml index c856da3e580..18860b94dbf 100644 --- a/.github/workflows/check_make_parser.yml +++ b/.github/workflows/check_make_parser.yml @@ -4,13 +4,17 @@ jobs: build: name: Check Make Parser - runs-on: ubuntu-18.04 + runs-on: ubuntu-latest steps: - name: Set up Go uses: actions/setup-go@v1 with: - go-version: 1.15 + go-version: 1.16 + + - name: Tune the OS + run: | + echo '1024 65535' | sudo tee -a /proc/sys/net/ipv4/ip_local_port_range - name: Check out code uses: actions/checkout@v2 @@ -18,11 +22,8 @@ jobs: - name: Get dependencies run: | sudo apt-get update - sudo apt-get install -y mysql-server mysql-client make unzip g++ etcd curl git wget - sudo service mysql stop + sudo apt-get install -y make unzip g++ etcd curl git wget sudo service etcd stop - sudo ln -s /etc/apparmor.d/usr.sbin.mysqld /etc/apparmor.d/disable/ - sudo apparmor_parser -R /etc/apparmor.d/usr.sbin.mysqld go mod download - name: Run make minimaltools diff --git a/.github/workflows/check_make_sizegen.yml b/.github/workflows/check_make_sizegen.yml index b8eef13acf4..24e08c996f4 100644 --- a/.github/workflows/check_make_sizegen.yml +++ b/.github/workflows/check_make_sizegen.yml @@ -6,30 +6,31 @@ jobs: name: Check Make Sizegen runs-on: ubuntu-latest steps: + + - name: Set up Go + uses: actions/setup-go@v1 + with: + go-version: 1.16 - - name: Set up Go - uses: actions/setup-go@v1 - with: - go-version: 1.15 + - name: Tune the OS + run: | + echo '1024 65535' | sudo tee -a /proc/sys/net/ipv4/ip_local_port_range - - name: Check out code - uses: actions/checkout@v2 + - name: Check out code + uses: actions/checkout@v2 - - name: Get dependencies - run: | - sudo apt-get update - sudo apt-get install -y mysql-server mysql-client make unzip g++ etcd curl git wget - sudo service mysql stop - sudo service etcd stop - sudo ln -s /etc/apparmor.d/usr.sbin.mysqld /etc/apparmor.d/disable/ - sudo apparmor_parser -R /etc/apparmor.d/usr.sbin.mysqld - go mod download + - name: Get dependencies + run: | + sudo apt-get update + sudo apt-get install -y make unzip g++ etcd curl git wget + sudo service etcd stop + go mod download - - name: Run make minimaltools - run: | - make minimaltools + - name: Run make minimaltools + run: | + make minimaltools - - name: check_make_sizegen - run: | - tools/check_make_sizegen.sh + - name: check_make_sizegen + run: | + tools/check_make_sizegen.sh diff --git a/.github/workflows/check_make_visitor.yml b/.github/workflows/check_make_visitor.yml index dbb1e42d3de..2d0f0405a65 100644 --- a/.github/workflows/check_make_visitor.yml +++ b/.github/workflows/check_make_visitor.yml @@ -4,13 +4,17 @@ jobs: build: name: Check Make Visitor - runs-on: ubuntu-18.04 + runs-on: ubuntu-latest steps: - name: Set up Go uses: actions/setup-go@v1 with: - go-version: 1.15 + go-version: 1.16 + + - name: Tune the OS + run: | + echo '1024 65535' | sudo tee -a /proc/sys/net/ipv4/ip_local_port_range - name: Check out code uses: actions/checkout@v2 @@ -18,11 +22,8 @@ jobs: - name: Get dependencies run: | sudo apt-get update - sudo apt-get install -y mysql-server mysql-client make unzip g++ etcd curl git wget - sudo service mysql stop + sudo apt-get install -y make unzip g++ etcd curl git wget sudo service etcd stop - sudo ln -s /etc/apparmor.d/usr.sbin.mysqld /etc/apparmor.d/disable/ - sudo apparmor_parser -R /etc/apparmor.d/usr.sbin.mysqld go mod download - name: Run make minimaltools diff --git a/.github/workflows/cluster_endtoend_11.yml b/.github/workflows/cluster_endtoend_11.yml index 5ec5d45cc72..4b1b7c2559c 100644 --- a/.github/workflows/cluster_endtoend_11.yml +++ b/.github/workflows/cluster_endtoend_11.yml @@ -12,7 +12,7 @@ jobs: - name: Set up Go uses: actions/setup-go@v1 with: - go-version: 1.15 + go-version: 1.16 - name: Tune the OS run: | diff --git a/.github/workflows/cluster_endtoend_12.yml b/.github/workflows/cluster_endtoend_12.yml index 77934b909bd..b28b03b45e1 100644 --- a/.github/workflows/cluster_endtoend_12.yml +++ b/.github/workflows/cluster_endtoend_12.yml @@ -12,7 +12,7 @@ jobs: - name: Set up Go uses: actions/setup-go@v1 with: - go-version: 1.15 + go-version: 1.16 - name: Tune the OS run: | diff --git a/.github/workflows/cluster_endtoend_13.yml b/.github/workflows/cluster_endtoend_13.yml index c3604cef464..5ff94e79f10 100644 --- a/.github/workflows/cluster_endtoend_13.yml +++ b/.github/workflows/cluster_endtoend_13.yml @@ -12,7 +12,7 @@ jobs: - name: Set up Go uses: actions/setup-go@v1 with: - go-version: 1.15 + go-version: 1.16 - name: Tune the OS run: | diff --git a/.github/workflows/cluster_endtoend_14.yml b/.github/workflows/cluster_endtoend_14.yml index e5873512672..455540c3347 100644 --- a/.github/workflows/cluster_endtoend_14.yml +++ b/.github/workflows/cluster_endtoend_14.yml @@ -12,7 +12,7 @@ jobs: - name: Set up Go uses: actions/setup-go@v1 with: - go-version: 1.15 + go-version: 1.16 - name: Tune the OS run: | diff --git a/.github/workflows/cluster_endtoend_15.yml b/.github/workflows/cluster_endtoend_15.yml index 0661e084b26..62ce1d475c0 100644 --- a/.github/workflows/cluster_endtoend_15.yml +++ b/.github/workflows/cluster_endtoend_15.yml @@ -12,7 +12,7 @@ jobs: - name: Set up Go uses: actions/setup-go@v1 with: - go-version: 1.15 + go-version: 1.16 - name: Tune the OS run: | diff --git a/.github/workflows/cluster_endtoend_16.yml b/.github/workflows/cluster_endtoend_16.yml index a21072bf604..ee9b34d7ee2 100644 --- a/.github/workflows/cluster_endtoend_16.yml +++ b/.github/workflows/cluster_endtoend_16.yml @@ -12,7 +12,7 @@ jobs: - name: Set up Go uses: actions/setup-go@v1 with: - go-version: 1.15 + go-version: 1.16 - name: Tune the OS run: | diff --git a/.github/workflows/cluster_endtoend_17.yml b/.github/workflows/cluster_endtoend_17.yml index 38944c257f1..7412da90628 100644 --- a/.github/workflows/cluster_endtoend_17.yml +++ b/.github/workflows/cluster_endtoend_17.yml @@ -12,7 +12,7 @@ jobs: - name: Set up Go uses: actions/setup-go@v1 with: - go-version: 1.15 + go-version: 1.16 - name: Tune the OS run: | diff --git a/.github/workflows/cluster_endtoend_18.yml b/.github/workflows/cluster_endtoend_18.yml index 85b25419b48..04384af9a57 100644 --- a/.github/workflows/cluster_endtoend_18.yml +++ b/.github/workflows/cluster_endtoend_18.yml @@ -12,7 +12,7 @@ jobs: - name: Set up Go uses: actions/setup-go@v1 with: - go-version: 1.15 + go-version: 1.16 - name: Tune the OS run: | diff --git a/.github/workflows/cluster_endtoend_19.yml b/.github/workflows/cluster_endtoend_19.yml index 582b7db0227..31c708e6a93 100644 --- a/.github/workflows/cluster_endtoend_19.yml +++ b/.github/workflows/cluster_endtoend_19.yml @@ -12,7 +12,7 @@ jobs: - name: Set up Go uses: actions/setup-go@v1 with: - go-version: 1.15 + go-version: 1.16 - name: Tune the OS run: | diff --git a/.github/workflows/cluster_endtoend_20.yml b/.github/workflows/cluster_endtoend_20.yml index d53175a88f3..a35a7ee69fa 100644 --- a/.github/workflows/cluster_endtoend_20.yml +++ b/.github/workflows/cluster_endtoend_20.yml @@ -12,7 +12,7 @@ jobs: - name: Set up Go uses: actions/setup-go@v1 with: - go-version: 1.15 + go-version: 1.16 - name: Tune the OS run: | diff --git a/.github/workflows/cluster_endtoend_21.yml b/.github/workflows/cluster_endtoend_21.yml index 10f52f04178..04b385de829 100644 --- a/.github/workflows/cluster_endtoend_21.yml +++ b/.github/workflows/cluster_endtoend_21.yml @@ -12,7 +12,7 @@ jobs: - name: Set up Go uses: actions/setup-go@v1 with: - go-version: 1.15 + go-version: 1.16 - name: Tune the OS run: | diff --git a/.github/workflows/cluster_endtoend_22.yml b/.github/workflows/cluster_endtoend_22.yml index c1bcb675bae..341da50cac6 100644 --- a/.github/workflows/cluster_endtoend_22.yml +++ b/.github/workflows/cluster_endtoend_22.yml @@ -12,7 +12,7 @@ jobs: - name: Set up Go uses: actions/setup-go@v1 with: - go-version: 1.15 + go-version: 1.16 - name: Tune the OS run: | diff --git a/.github/workflows/cluster_endtoend_23.yml b/.github/workflows/cluster_endtoend_23.yml index f7105bc96cd..a2910ee2f2a 100644 --- a/.github/workflows/cluster_endtoend_23.yml +++ b/.github/workflows/cluster_endtoend_23.yml @@ -12,7 +12,7 @@ jobs: - name: Set up Go uses: actions/setup-go@v1 with: - go-version: 1.15 + go-version: 1.16 - name: Tune the OS run: | diff --git a/.github/workflows/cluster_endtoend_24.yml b/.github/workflows/cluster_endtoend_24.yml index ef3eaa00ce8..f56c3b89dc3 100644 --- a/.github/workflows/cluster_endtoend_24.yml +++ b/.github/workflows/cluster_endtoend_24.yml @@ -12,7 +12,7 @@ jobs: - name: Set up Go uses: actions/setup-go@v1 with: - go-version: 1.15 + go-version: 1.16 - name: Tune the OS run: | diff --git a/.github/workflows/cluster_endtoend_26.yml b/.github/workflows/cluster_endtoend_26.yml index fdee5988cf0..ec5cdce36b9 100644 --- a/.github/workflows/cluster_endtoend_26.yml +++ b/.github/workflows/cluster_endtoend_26.yml @@ -12,7 +12,7 @@ jobs: - name: Set up Go uses: actions/setup-go@v1 with: - go-version: 1.15 + go-version: 1.16 - name: Tune the OS run: | diff --git a/.github/workflows/cluster_endtoend_mysql80.yml b/.github/workflows/cluster_endtoend_mysql80.yml index 4f075015c7e..a0b01bf7f4a 100644 --- a/.github/workflows/cluster_endtoend_mysql80.yml +++ b/.github/workflows/cluster_endtoend_mysql80.yml @@ -5,13 +5,24 @@ on: [push, pull_request] jobs: build: + name: Run endtoend tests on Cluster (mysql80) runs-on: ubuntu-20.04 steps: - name: Set up Go uses: actions/setup-go@v1 with: - go-version: 1.15 + go-version: 1.16 + + - name: Tune the OS + run: | + echo '1024 65535' | sudo tee -a /proc/sys/net/ipv4/ip_local_port_range + + # TEMPORARY WHILE GITHUB FIXES THIS https://github.com/actions/virtual-environments/issues/3185 + - name: Add the current IP address, long hostname and short hostname record to /etc/hosts file + run: | + echo -e "$(ip addr show eth0 | grep "inet\b" | awk '{print $2}' | cut -d/ -f1)\t$(hostname -f) $(hostname -s)" | sudo tee -a /etc/hosts + # DON'T FORGET TO REMOVE CODE ABOVE WHEN ISSUE IS ADRESSED! - name: Check out code uses: actions/checkout@v2 diff --git a/.github/workflows/cluster_endtoend_onlineddl_declarative.yml b/.github/workflows/cluster_endtoend_onlineddl_declarative.yml index 3a7381b37f2..2108309d473 100644 --- a/.github/workflows/cluster_endtoend_onlineddl_declarative.yml +++ b/.github/workflows/cluster_endtoend_onlineddl_declarative.yml @@ -12,7 +12,7 @@ jobs: - name: Set up Go uses: actions/setup-go@v1 with: - go-version: 1.15 + go-version: 1.16 - name: Tune the OS run: | diff --git a/.github/workflows/cluster_endtoend_onlineddl_ghost.yml b/.github/workflows/cluster_endtoend_onlineddl_ghost.yml index 22b049e9347..87570937929 100644 --- a/.github/workflows/cluster_endtoend_onlineddl_ghost.yml +++ b/.github/workflows/cluster_endtoend_onlineddl_ghost.yml @@ -12,7 +12,7 @@ jobs: - name: Set up Go uses: actions/setup-go@v1 with: - go-version: 1.15 + go-version: 1.16 - name: Tune the OS run: | diff --git a/.github/workflows/cluster_endtoend_onlineddl_revert.yml b/.github/workflows/cluster_endtoend_onlineddl_revert.yml index a6937a9eb85..0f423454f60 100644 --- a/.github/workflows/cluster_endtoend_onlineddl_revert.yml +++ b/.github/workflows/cluster_endtoend_onlineddl_revert.yml @@ -12,7 +12,7 @@ jobs: - name: Set up Go uses: actions/setup-go@v1 with: - go-version: 1.15 + go-version: 1.16 - name: Tune the OS run: | diff --git a/.github/workflows/cluster_endtoend_onlineddl_singleton.yml b/.github/workflows/cluster_endtoend_onlineddl_singleton.yml new file mode 100644 index 00000000000..ba5681efc27 --- /dev/null +++ b/.github/workflows/cluster_endtoend_onlineddl_singleton.yml @@ -0,0 +1,50 @@ +# DO NOT MODIFY: THIS FILE IS GENERATED USING "make generate_ci_workflows" + +name: Cluster (onlineddl_singleton) +on: [push, pull_request] +jobs: + + build: + name: Run endtoend tests on Cluster (onlineddl_singleton) + runs-on: ubuntu-18.04 + + steps: + - name: Set up Go + uses: actions/setup-go@v1 + with: + go-version: 1.16 + + - name: Tune the OS + run: | + echo '1024 65535' | sudo tee -a /proc/sys/net/ipv4/ip_local_port_range + + # TEMPORARY WHILE GITHUB FIXES THIS https://github.com/actions/virtual-environments/issues/3185 + - name: Add the current IP address, long hostname and short hostname record to /etc/hosts file + run: | + echo -e "$(ip addr show eth0 | grep "inet\b" | awk '{print $2}' | cut -d/ -f1)\t$(hostname -f) $(hostname -s)" | sudo tee -a /etc/hosts + # DON'T FORGET TO REMOVE CODE ABOVE WHEN ISSUE IS ADRESSED! + + - name: Check out code + uses: actions/checkout@v2 + + - name: Get dependencies + run: | + sudo apt-get update + sudo apt-get install -y mysql-server mysql-client make unzip g++ etcd curl git wget eatmydata + sudo service mysql stop + sudo service etcd stop + sudo ln -s /etc/apparmor.d/usr.sbin.mysqld /etc/apparmor.d/disable/ + sudo apparmor_parser -R /etc/apparmor.d/usr.sbin.mysqld + go mod download + + wget https://repo.percona.com/apt/percona-release_latest.$(lsb_release -sc)_all.deb + sudo apt-get install -y gnupg2 + sudo dpkg -i percona-release_latest.$(lsb_release -sc)_all.deb + sudo apt-get update + sudo apt-get install percona-xtrabackup-24 + + - name: Run cluster endtoend test + timeout-minutes: 30 + run: | + source build.env + eatmydata -- go run test.go -docker=false -print-log -follow -shard onlineddl_singleton diff --git a/.github/workflows/cluster_endtoend_onlineddl_vrepl.yml b/.github/workflows/cluster_endtoend_onlineddl_vrepl.yml index 5bd8a46a61c..0af5dd110c9 100644 --- a/.github/workflows/cluster_endtoend_onlineddl_vrepl.yml +++ b/.github/workflows/cluster_endtoend_onlineddl_vrepl.yml @@ -12,7 +12,7 @@ jobs: - name: Set up Go uses: actions/setup-go@v1 with: - go-version: 1.15 + go-version: 1.16 - name: Tune the OS run: | diff --git a/.github/workflows/cluster_endtoend_onlineddl_vrepl_stress.yml b/.github/workflows/cluster_endtoend_onlineddl_vrepl_stress.yml index 56dd0231a4b..da4461bd75f 100644 --- a/.github/workflows/cluster_endtoend_onlineddl_vrepl_stress.yml +++ b/.github/workflows/cluster_endtoend_onlineddl_vrepl_stress.yml @@ -12,7 +12,7 @@ jobs: - name: Set up Go uses: actions/setup-go@v1 with: - go-version: 1.15 + go-version: 1.16 - name: Tune the OS run: | diff --git a/.github/workflows/cluster_endtoend_onlineddl_vrepl_suite.yml b/.github/workflows/cluster_endtoend_onlineddl_vrepl_suite.yml new file mode 100644 index 00000000000..170ea0a38fd --- /dev/null +++ b/.github/workflows/cluster_endtoend_onlineddl_vrepl_suite.yml @@ -0,0 +1,50 @@ +# DO NOT MODIFY: THIS FILE IS GENERATED USING "make generate_ci_workflows" + +name: Cluster (onlineddl_vrepl_suite) +on: [push, pull_request] +jobs: + + build: + name: Run endtoend tests on Cluster (onlineddl_vrepl_suite) + runs-on: ubuntu-18.04 + + steps: + - name: Set up Go + uses: actions/setup-go@v1 + with: + go-version: 1.16 + + - name: Tune the OS + run: | + echo '1024 65535' | sudo tee -a /proc/sys/net/ipv4/ip_local_port_range + + # TEMPORARY WHILE GITHUB FIXES THIS https://github.com/actions/virtual-environments/issues/3185 + - name: Add the current IP address, long hostname and short hostname record to /etc/hosts file + run: | + echo -e "$(ip addr show eth0 | grep "inet\b" | awk '{print $2}' | cut -d/ -f1)\t$(hostname -f) $(hostname -s)" | sudo tee -a /etc/hosts + # DON'T FORGET TO REMOVE CODE ABOVE WHEN ISSUE IS ADRESSED! + + - name: Check out code + uses: actions/checkout@v2 + + - name: Get dependencies + run: | + sudo apt-get update + sudo apt-get install -y mysql-server mysql-client make unzip g++ etcd curl git wget eatmydata + sudo service mysql stop + sudo service etcd stop + sudo ln -s /etc/apparmor.d/usr.sbin.mysqld /etc/apparmor.d/disable/ + sudo apparmor_parser -R /etc/apparmor.d/usr.sbin.mysqld + go mod download + + wget https://repo.percona.com/apt/percona-release_latest.$(lsb_release -sc)_all.deb + sudo apt-get install -y gnupg2 + sudo dpkg -i percona-release_latest.$(lsb_release -sc)_all.deb + sudo apt-get update + sudo apt-get install percona-xtrabackup-24 + + - name: Run cluster endtoend test + timeout-minutes: 30 + run: | + source build.env + eatmydata -- go run test.go -docker=false -print-log -follow -shard onlineddl_vrepl_suite diff --git a/.github/workflows/cluster_endtoend_resharding.yml b/.github/workflows/cluster_endtoend_resharding.yml new file mode 100644 index 00000000000..53af8a07030 --- /dev/null +++ b/.github/workflows/cluster_endtoend_resharding.yml @@ -0,0 +1,50 @@ +# DO NOT MODIFY: THIS FILE IS GENERATED USING "make generate_ci_workflows" + +name: Cluster (resharding) +on: [push, pull_request] +jobs: + + build: + name: Run endtoend tests on Cluster (resharding) + runs-on: ubuntu-18.04 + + steps: + - name: Set up Go + uses: actions/setup-go@v1 + with: + go-version: 1.16 + + - name: Tune the OS + run: | + echo '1024 65535' | sudo tee -a /proc/sys/net/ipv4/ip_local_port_range + + # TEMPORARY WHILE GITHUB FIXES THIS https://github.com/actions/virtual-environments/issues/3185 + - name: Add the current IP address, long hostname and short hostname record to /etc/hosts file + run: | + echo -e "$(ip addr show eth0 | grep "inet\b" | awk '{print $2}' | cut -d/ -f1)\t$(hostname -f) $(hostname -s)" | sudo tee -a /etc/hosts + # DON'T FORGET TO REMOVE CODE ABOVE WHEN ISSUE IS ADRESSED! + + - name: Check out code + uses: actions/checkout@v2 + + - name: Get dependencies + run: | + sudo apt-get update + sudo apt-get install -y mysql-server mysql-client make unzip g++ etcd curl git wget eatmydata + sudo service mysql stop + sudo service etcd stop + sudo ln -s /etc/apparmor.d/usr.sbin.mysqld /etc/apparmor.d/disable/ + sudo apparmor_parser -R /etc/apparmor.d/usr.sbin.mysqld + go mod download + + wget https://repo.percona.com/apt/percona-release_latest.$(lsb_release -sc)_all.deb + sudo apt-get install -y gnupg2 + sudo dpkg -i percona-release_latest.$(lsb_release -sc)_all.deb + sudo apt-get update + sudo apt-get install percona-xtrabackup-24 + + - name: Run cluster endtoend test + timeout-minutes: 30 + run: | + source build.env + eatmydata -- go run test.go -docker=false -print-log -follow -shard resharding diff --git a/.github/workflows/cluster_endtoend_resharding_bytes.yml b/.github/workflows/cluster_endtoend_resharding_bytes.yml new file mode 100644 index 00000000000..edec3162c4e --- /dev/null +++ b/.github/workflows/cluster_endtoend_resharding_bytes.yml @@ -0,0 +1,50 @@ +# DO NOT MODIFY: THIS FILE IS GENERATED USING "make generate_ci_workflows" + +name: Cluster (resharding_bytes) +on: [push, pull_request] +jobs: + + build: + name: Run endtoend tests on Cluster (resharding_bytes) + runs-on: ubuntu-18.04 + + steps: + - name: Set up Go + uses: actions/setup-go@v1 + with: + go-version: 1.16 + + - name: Tune the OS + run: | + echo '1024 65535' | sudo tee -a /proc/sys/net/ipv4/ip_local_port_range + + # TEMPORARY WHILE GITHUB FIXES THIS https://github.com/actions/virtual-environments/issues/3185 + - name: Add the current IP address, long hostname and short hostname record to /etc/hosts file + run: | + echo -e "$(ip addr show eth0 | grep "inet\b" | awk '{print $2}' | cut -d/ -f1)\t$(hostname -f) $(hostname -s)" | sudo tee -a /etc/hosts + # DON'T FORGET TO REMOVE CODE ABOVE WHEN ISSUE IS ADRESSED! + + - name: Check out code + uses: actions/checkout@v2 + + - name: Get dependencies + run: | + sudo apt-get update + sudo apt-get install -y mysql-server mysql-client make unzip g++ etcd curl git wget eatmydata + sudo service mysql stop + sudo service etcd stop + sudo ln -s /etc/apparmor.d/usr.sbin.mysqld /etc/apparmor.d/disable/ + sudo apparmor_parser -R /etc/apparmor.d/usr.sbin.mysqld + go mod download + + wget https://repo.percona.com/apt/percona-release_latest.$(lsb_release -sc)_all.deb + sudo apt-get install -y gnupg2 + sudo dpkg -i percona-release_latest.$(lsb_release -sc)_all.deb + sudo apt-get update + sudo apt-get install percona-xtrabackup-24 + + - name: Run cluster endtoend test + timeout-minutes: 30 + run: | + source build.env + eatmydata -- go run test.go -docker=false -print-log -follow -shard resharding_bytes diff --git a/.github/workflows/cluster_endtoend_tabletmanager_tablegc.yml b/.github/workflows/cluster_endtoend_tabletmanager_tablegc.yml new file mode 100644 index 00000000000..5877c6462ed --- /dev/null +++ b/.github/workflows/cluster_endtoend_tabletmanager_tablegc.yml @@ -0,0 +1,50 @@ +# DO NOT MODIFY: THIS FILE IS GENERATED USING "make generate_ci_workflows" + +name: Cluster (tabletmanager_tablegc) +on: [push, pull_request] +jobs: + + build: + name: Run endtoend tests on Cluster (tabletmanager_tablegc) + runs-on: ubuntu-18.04 + + steps: + - name: Set up Go + uses: actions/setup-go@v1 + with: + go-version: 1.16 + + - name: Tune the OS + run: | + echo '1024 65535' | sudo tee -a /proc/sys/net/ipv4/ip_local_port_range + + # TEMPORARY WHILE GITHUB FIXES THIS https://github.com/actions/virtual-environments/issues/3185 + - name: Add the current IP address, long hostname and short hostname record to /etc/hosts file + run: | + echo -e "$(ip addr show eth0 | grep "inet\b" | awk '{print $2}' | cut -d/ -f1)\t$(hostname -f) $(hostname -s)" | sudo tee -a /etc/hosts + # DON'T FORGET TO REMOVE CODE ABOVE WHEN ISSUE IS ADRESSED! + + - name: Check out code + uses: actions/checkout@v2 + + - name: Get dependencies + run: | + sudo apt-get update + sudo apt-get install -y mysql-server mysql-client make unzip g++ etcd curl git wget eatmydata + sudo service mysql stop + sudo service etcd stop + sudo ln -s /etc/apparmor.d/usr.sbin.mysqld /etc/apparmor.d/disable/ + sudo apparmor_parser -R /etc/apparmor.d/usr.sbin.mysqld + go mod download + + wget https://repo.percona.com/apt/percona-release_latest.$(lsb_release -sc)_all.deb + sudo apt-get install -y gnupg2 + sudo dpkg -i percona-release_latest.$(lsb_release -sc)_all.deb + sudo apt-get update + sudo apt-get install percona-xtrabackup-24 + + - name: Run cluster endtoend test + timeout-minutes: 30 + run: | + source build.env + eatmydata -- go run test.go -docker=false -print-log -follow -shard tabletmanager_tablegc diff --git a/.github/workflows/cluster_endtoend_tabletmanager_throttler.yml b/.github/workflows/cluster_endtoend_tabletmanager_throttler.yml index 7caa3ccde42..e41b646eeba 100644 --- a/.github/workflows/cluster_endtoend_tabletmanager_throttler.yml +++ b/.github/workflows/cluster_endtoend_tabletmanager_throttler.yml @@ -12,7 +12,7 @@ jobs: - name: Set up Go uses: actions/setup-go@v1 with: - go-version: 1.15 + go-version: 1.16 - name: Tune the OS run: | diff --git a/.github/workflows/cluster_endtoend_tabletmanager_throttler_custom_config.yml b/.github/workflows/cluster_endtoend_tabletmanager_throttler_custom_config.yml index 875efbed6da..ac79b6a1d5b 100644 --- a/.github/workflows/cluster_endtoend_tabletmanager_throttler_custom_config.yml +++ b/.github/workflows/cluster_endtoend_tabletmanager_throttler_custom_config.yml @@ -12,7 +12,7 @@ jobs: - name: Set up Go uses: actions/setup-go@v1 with: - go-version: 1.15 + go-version: 1.16 - name: Tune the OS run: | diff --git a/.github/workflows/cluster_endtoend_upgrade.yml b/.github/workflows/cluster_endtoend_upgrade.yml index 85528a34425..306983854ec 100644 --- a/.github/workflows/cluster_endtoend_upgrade.yml +++ b/.github/workflows/cluster_endtoend_upgrade.yml @@ -11,7 +11,7 @@ jobs: - name: Set up Go uses: actions/setup-go@v1 with: - go-version: 1.15 + go-version: 1.16 - name: Tune the OS run: | @@ -32,12 +32,35 @@ jobs: run: | # This prepares general purpose binary dependencies # as well as v9.0.0 specific go modules +<<<<<<< HEAD +======= + + sudo DEBIAN_FRONTEND="noninteractive" apt-get update + + # Uninstall any previously installed MySQL first + sudo systemctl stop apparmor + sudo DEBIAN_FRONTEND="noninteractive" apt-get remove -y --purge mysql-server mysql-client mysql-common + sudo apt-get -y autoremove + sudo apt-get -y autoclean + sudo deluser mysql + sudo rm -rf /var/lib/mysql + sudo rm -rf /etc/mysql + + # Install mysql80 + wget -c https://dev.mysql.com/get/mysql-apt-config_0.8.20-1_all.deb + echo mysql-apt-config mysql-apt-config/select-server select mysql-8.0 | sudo debconf-set-selections + sudo DEBIAN_FRONTEND="noninteractive" dpkg -i mysql-apt-config* +>>>>>>> 630259cfeb32b85d5b53be3c68b555de710bbeb7 sudo apt-get update - sudo apt-get install -y mysql-server mysql-client make unzip g++ etcd curl git wget eatmydata + sudo DEBIAN_FRONTEND="noninteractive" apt-get install -y mysql-server mysql-client + + # Install everything else we need, and configure + sudo apt-get install -y make unzip g++ etcd curl git wget eatmydata sudo service mysql stop sudo service etcd stop + sudo bash -c "echo '/usr/sbin/mysqld { }' > /etc/apparmor.d/usr.sbin.mysqld" # https://bugs.launchpad.net/ubuntu/+source/mariadb-10.1/+bug/1806263 sudo ln -s /etc/apparmor.d/usr.sbin.mysqld /etc/apparmor.d/disable/ - sudo apparmor_parser -R /etc/apparmor.d/usr.sbin.mysqld + sudo apparmor_parser -R /etc/apparmor.d/usr.sbin.mysqld || echo "could not remove mysqld profile" go mod download wget https://repo.percona.com/apt/percona-release_latest.$(lsb_release -sc)_all.deb diff --git a/.github/workflows/cluster_endtoend_vreplication_basic.yml b/.github/workflows/cluster_endtoend_vreplication_basic.yml index 427d8675d06..8f4bd5b9e71 100644 --- a/.github/workflows/cluster_endtoend_vreplication_basic.yml +++ b/.github/workflows/cluster_endtoend_vreplication_basic.yml @@ -12,7 +12,7 @@ jobs: - name: Set up Go uses: actions/setup-go@v1 with: - go-version: 1.15 + go-version: 1.16 - name: Tune the OS run: | diff --git a/.github/workflows/cluster_endtoend_vreplication_cellalias.yml b/.github/workflows/cluster_endtoend_vreplication_cellalias.yml index efdf95423d6..b0ff10e203b 100644 --- a/.github/workflows/cluster_endtoend_vreplication_cellalias.yml +++ b/.github/workflows/cluster_endtoend_vreplication_cellalias.yml @@ -12,7 +12,7 @@ jobs: - name: Set up Go uses: actions/setup-go@v1 with: - go-version: 1.15 + go-version: 1.16 - name: Tune the OS run: | diff --git a/.github/workflows/cluster_endtoend_vreplication_migrate.yml b/.github/workflows/cluster_endtoend_vreplication_migrate.yml index 1e9f02b0153..acef7c1bca6 100644 --- a/.github/workflows/cluster_endtoend_vreplication_migrate.yml +++ b/.github/workflows/cluster_endtoend_vreplication_migrate.yml @@ -12,7 +12,7 @@ jobs: - name: Set up Go uses: actions/setup-go@v1 with: - go-version: 1.15 + go-version: 1.16 - name: Tune the OS run: | diff --git a/.github/workflows/cluster_endtoend_vreplication_multicell.yml b/.github/workflows/cluster_endtoend_vreplication_multicell.yml index 72c92ebf610..3250217f4fd 100644 --- a/.github/workflows/cluster_endtoend_vreplication_multicell.yml +++ b/.github/workflows/cluster_endtoend_vreplication_multicell.yml @@ -12,7 +12,7 @@ jobs: - name: Set up Go uses: actions/setup-go@v1 with: - go-version: 1.15 + go-version: 1.16 - name: Tune the OS run: | diff --git a/.github/workflows/cluster_endtoend_vreplication_v2.yml b/.github/workflows/cluster_endtoend_vreplication_v2.yml index a80745f35c7..c5337e4cc1d 100644 --- a/.github/workflows/cluster_endtoend_vreplication_v2.yml +++ b/.github/workflows/cluster_endtoend_vreplication_v2.yml @@ -12,7 +12,7 @@ jobs: - name: Set up Go uses: actions/setup-go@v1 with: - go-version: 1.15 + go-version: 1.16 - name: Tune the OS run: | diff --git a/.github/workflows/cluster_endtoend_vtgate_buffer.yml b/.github/workflows/cluster_endtoend_vtgate_buffer.yml new file mode 100644 index 00000000000..93c4340c24a --- /dev/null +++ b/.github/workflows/cluster_endtoend_vtgate_buffer.yml @@ -0,0 +1,50 @@ +# DO NOT MODIFY: THIS FILE IS GENERATED USING "make generate_ci_workflows" + +name: Cluster (vtgate_buffer) +on: [push, pull_request] +jobs: + + build: + name: Run endtoend tests on Cluster (vtgate_buffer) + runs-on: ubuntu-18.04 + + steps: + - name: Set up Go + uses: actions/setup-go@v1 + with: + go-version: 1.16 + + - name: Tune the OS + run: | + echo '1024 65535' | sudo tee -a /proc/sys/net/ipv4/ip_local_port_range + + # TEMPORARY WHILE GITHUB FIXES THIS https://github.com/actions/virtual-environments/issues/3185 + - name: Add the current IP address, long hostname and short hostname record to /etc/hosts file + run: | + echo -e "$(ip addr show eth0 | grep "inet\b" | awk '{print $2}' | cut -d/ -f1)\t$(hostname -f) $(hostname -s)" | sudo tee -a /etc/hosts + # DON'T FORGET TO REMOVE CODE ABOVE WHEN ISSUE IS ADRESSED! + + - name: Check out code + uses: actions/checkout@v2 + + - name: Get dependencies + run: | + sudo apt-get update + sudo apt-get install -y mysql-server mysql-client make unzip g++ etcd curl git wget eatmydata + sudo service mysql stop + sudo service etcd stop + sudo ln -s /etc/apparmor.d/usr.sbin.mysqld /etc/apparmor.d/disable/ + sudo apparmor_parser -R /etc/apparmor.d/usr.sbin.mysqld + go mod download + + wget https://repo.percona.com/apt/percona-release_latest.$(lsb_release -sc)_all.deb + sudo apt-get install -y gnupg2 + sudo dpkg -i percona-release_latest.$(lsb_release -sc)_all.deb + sudo apt-get update + sudo apt-get install percona-xtrabackup-24 + + - name: Run cluster endtoend test + timeout-minutes: 30 + run: | + source build.env + eatmydata -- go run test.go -docker=false -print-log -follow -shard vtgate_buffer diff --git a/.github/workflows/cluster_endtoend_vtgate_concurrentdml.yml b/.github/workflows/cluster_endtoend_vtgate_concurrentdml.yml new file mode 100644 index 00000000000..299a01fad76 --- /dev/null +++ b/.github/workflows/cluster_endtoend_vtgate_concurrentdml.yml @@ -0,0 +1,50 @@ +# DO NOT MODIFY: THIS FILE IS GENERATED USING "make generate_ci_workflows" + +name: Cluster (vtgate_concurrentdml) +on: [push, pull_request] +jobs: + + build: + name: Run endtoend tests on Cluster (vtgate_concurrentdml) + runs-on: ubuntu-18.04 + + steps: + - name: Set up Go + uses: actions/setup-go@v1 + with: + go-version: 1.16 + + - name: Tune the OS + run: | + echo '1024 65535' | sudo tee -a /proc/sys/net/ipv4/ip_local_port_range + + # TEMPORARY WHILE GITHUB FIXES THIS https://github.com/actions/virtual-environments/issues/3185 + - name: Add the current IP address, long hostname and short hostname record to /etc/hosts file + run: | + echo -e "$(ip addr show eth0 | grep "inet\b" | awk '{print $2}' | cut -d/ -f1)\t$(hostname -f) $(hostname -s)" | sudo tee -a /etc/hosts + # DON'T FORGET TO REMOVE CODE ABOVE WHEN ISSUE IS ADRESSED! + + - name: Check out code + uses: actions/checkout@v2 + + - name: Get dependencies + run: | + sudo apt-get update + sudo apt-get install -y mysql-server mysql-client make unzip g++ etcd curl git wget eatmydata + sudo service mysql stop + sudo service etcd stop + sudo ln -s /etc/apparmor.d/usr.sbin.mysqld /etc/apparmor.d/disable/ + sudo apparmor_parser -R /etc/apparmor.d/usr.sbin.mysqld + go mod download + + wget https://repo.percona.com/apt/percona-release_latest.$(lsb_release -sc)_all.deb + sudo apt-get install -y gnupg2 + sudo dpkg -i percona-release_latest.$(lsb_release -sc)_all.deb + sudo apt-get update + sudo apt-get install percona-xtrabackup-24 + + - name: Run cluster endtoend test + timeout-minutes: 30 + run: | + source build.env + eatmydata -- go run test.go -docker=false -print-log -follow -shard vtgate_concurrentdml diff --git a/.github/workflows/cluster_endtoend_vtgate_gen4.yml b/.github/workflows/cluster_endtoend_vtgate_gen4.yml new file mode 100644 index 00000000000..433a1e8c5b7 --- /dev/null +++ b/.github/workflows/cluster_endtoend_vtgate_gen4.yml @@ -0,0 +1,50 @@ +# DO NOT MODIFY: THIS FILE IS GENERATED USING "make generate_ci_workflows" + +name: Cluster (vtgate_gen4) +on: [push, pull_request] +jobs: + + build: + name: Run endtoend tests on Cluster (vtgate_gen4) + runs-on: ubuntu-18.04 + + steps: + - name: Set up Go + uses: actions/setup-go@v1 + with: + go-version: 1.16 + + - name: Tune the OS + run: | + echo '1024 65535' | sudo tee -a /proc/sys/net/ipv4/ip_local_port_range + + # TEMPORARY WHILE GITHUB FIXES THIS https://github.com/actions/virtual-environments/issues/3185 + - name: Add the current IP address, long hostname and short hostname record to /etc/hosts file + run: | + echo -e "$(ip addr show eth0 | grep "inet\b" | awk '{print $2}' | cut -d/ -f1)\t$(hostname -f) $(hostname -s)" | sudo tee -a /etc/hosts + # DON'T FORGET TO REMOVE CODE ABOVE WHEN ISSUE IS ADRESSED! + + - name: Check out code + uses: actions/checkout@v2 + + - name: Get dependencies + run: | + sudo apt-get update + sudo apt-get install -y mysql-server mysql-client make unzip g++ etcd curl git wget eatmydata + sudo service mysql stop + sudo service etcd stop + sudo ln -s /etc/apparmor.d/usr.sbin.mysqld /etc/apparmor.d/disable/ + sudo apparmor_parser -R /etc/apparmor.d/usr.sbin.mysqld + go mod download + + wget https://repo.percona.com/apt/percona-release_latest.$(lsb_release -sc)_all.deb + sudo apt-get install -y gnupg2 + sudo dpkg -i percona-release_latest.$(lsb_release -sc)_all.deb + sudo apt-get update + sudo apt-get install percona-xtrabackup-24 + + - name: Run cluster endtoend test + timeout-minutes: 30 + run: | + source build.env + eatmydata -- go run test.go -docker=false -print-log -follow -shard vtgate_gen4 diff --git a/.github/workflows/cluster_endtoend_vtgate_readafterwrite.yml b/.github/workflows/cluster_endtoend_vtgate_readafterwrite.yml new file mode 100644 index 00000000000..7ce88438886 --- /dev/null +++ b/.github/workflows/cluster_endtoend_vtgate_readafterwrite.yml @@ -0,0 +1,50 @@ +# DO NOT MODIFY: THIS FILE IS GENERATED USING "make generate_ci_workflows" + +name: Cluster (vtgate_readafterwrite) +on: [push, pull_request] +jobs: + + build: + name: Run endtoend tests on Cluster (vtgate_readafterwrite) + runs-on: ubuntu-18.04 + + steps: + - name: Set up Go + uses: actions/setup-go@v1 + with: + go-version: 1.16 + + - name: Tune the OS + run: | + echo '1024 65535' | sudo tee -a /proc/sys/net/ipv4/ip_local_port_range + + # TEMPORARY WHILE GITHUB FIXES THIS https://github.com/actions/virtual-environments/issues/3185 + - name: Add the current IP address, long hostname and short hostname record to /etc/hosts file + run: | + echo -e "$(ip addr show eth0 | grep "inet\b" | awk '{print $2}' | cut -d/ -f1)\t$(hostname -f) $(hostname -s)" | sudo tee -a /etc/hosts + # DON'T FORGET TO REMOVE CODE ABOVE WHEN ISSUE IS ADRESSED! + + - name: Check out code + uses: actions/checkout@v2 + + - name: Get dependencies + run: | + sudo apt-get update + sudo apt-get install -y mysql-server mysql-client make unzip g++ etcd curl git wget eatmydata + sudo service mysql stop + sudo service etcd stop + sudo ln -s /etc/apparmor.d/usr.sbin.mysqld /etc/apparmor.d/disable/ + sudo apparmor_parser -R /etc/apparmor.d/usr.sbin.mysqld + go mod download + + wget https://repo.percona.com/apt/percona-release_latest.$(lsb_release -sc)_all.deb + sudo apt-get install -y gnupg2 + sudo dpkg -i percona-release_latest.$(lsb_release -sc)_all.deb + sudo apt-get update + sudo apt-get install percona-xtrabackup-24 + + - name: Run cluster endtoend test + timeout-minutes: 30 + run: | + source build.env + eatmydata -- go run test.go -docker=false -print-log -follow -shard vtgate_readafterwrite diff --git a/.github/workflows/cluster_endtoend_vtgate_reservedconn.yml b/.github/workflows/cluster_endtoend_vtgate_reservedconn.yml new file mode 100644 index 00000000000..593432bce46 --- /dev/null +++ b/.github/workflows/cluster_endtoend_vtgate_reservedconn.yml @@ -0,0 +1,50 @@ +# DO NOT MODIFY: THIS FILE IS GENERATED USING "make generate_ci_workflows" + +name: Cluster (vtgate_reservedconn) +on: [push, pull_request] +jobs: + + build: + name: Run endtoend tests on Cluster (vtgate_reservedconn) + runs-on: ubuntu-18.04 + + steps: + - name: Set up Go + uses: actions/setup-go@v1 + with: + go-version: 1.16 + + - name: Tune the OS + run: | + echo '1024 65535' | sudo tee -a /proc/sys/net/ipv4/ip_local_port_range + + # TEMPORARY WHILE GITHUB FIXES THIS https://github.com/actions/virtual-environments/issues/3185 + - name: Add the current IP address, long hostname and short hostname record to /etc/hosts file + run: | + echo -e "$(ip addr show eth0 | grep "inet\b" | awk '{print $2}' | cut -d/ -f1)\t$(hostname -f) $(hostname -s)" | sudo tee -a /etc/hosts + # DON'T FORGET TO REMOVE CODE ABOVE WHEN ISSUE IS ADRESSED! + + - name: Check out code + uses: actions/checkout@v2 + + - name: Get dependencies + run: | + sudo apt-get update + sudo apt-get install -y mysql-server mysql-client make unzip g++ etcd curl git wget eatmydata + sudo service mysql stop + sudo service etcd stop + sudo ln -s /etc/apparmor.d/usr.sbin.mysqld /etc/apparmor.d/disable/ + sudo apparmor_parser -R /etc/apparmor.d/usr.sbin.mysqld + go mod download + + wget https://repo.percona.com/apt/percona-release_latest.$(lsb_release -sc)_all.deb + sudo apt-get install -y gnupg2 + sudo dpkg -i percona-release_latest.$(lsb_release -sc)_all.deb + sudo apt-get update + sudo apt-get install percona-xtrabackup-24 + + - name: Run cluster endtoend test + timeout-minutes: 30 + run: | + source build.env + eatmydata -- go run test.go -docker=false -print-log -follow -shard vtgate_reservedconn diff --git a/.github/workflows/cluster_endtoend_vtgate_schema.yml b/.github/workflows/cluster_endtoend_vtgate_schema.yml new file mode 100644 index 00000000000..07a47fd64ff --- /dev/null +++ b/.github/workflows/cluster_endtoend_vtgate_schema.yml @@ -0,0 +1,50 @@ +# DO NOT MODIFY: THIS FILE IS GENERATED USING "make generate_ci_workflows" + +name: Cluster (vtgate_schema) +on: [push, pull_request] +jobs: + + build: + name: Run endtoend tests on Cluster (vtgate_schema) + runs-on: ubuntu-18.04 + + steps: + - name: Set up Go + uses: actions/setup-go@v1 + with: + go-version: 1.16 + + - name: Tune the OS + run: | + echo '1024 65535' | sudo tee -a /proc/sys/net/ipv4/ip_local_port_range + + # TEMPORARY WHILE GITHUB FIXES THIS https://github.com/actions/virtual-environments/issues/3185 + - name: Add the current IP address, long hostname and short hostname record to /etc/hosts file + run: | + echo -e "$(ip addr show eth0 | grep "inet\b" | awk '{print $2}' | cut -d/ -f1)\t$(hostname -f) $(hostname -s)" | sudo tee -a /etc/hosts + # DON'T FORGET TO REMOVE CODE ABOVE WHEN ISSUE IS ADRESSED! + + - name: Check out code + uses: actions/checkout@v2 + + - name: Get dependencies + run: | + sudo apt-get update + sudo apt-get install -y mysql-server mysql-client make unzip g++ etcd curl git wget eatmydata + sudo service mysql stop + sudo service etcd stop + sudo ln -s /etc/apparmor.d/usr.sbin.mysqld /etc/apparmor.d/disable/ + sudo apparmor_parser -R /etc/apparmor.d/usr.sbin.mysqld + go mod download + + wget https://repo.percona.com/apt/percona-release_latest.$(lsb_release -sc)_all.deb + sudo apt-get install -y gnupg2 + sudo dpkg -i percona-release_latest.$(lsb_release -sc)_all.deb + sudo apt-get update + sudo apt-get install percona-xtrabackup-24 + + - name: Run cluster endtoend test + timeout-minutes: 30 + run: | + source build.env + eatmydata -- go run test.go -docker=false -print-log -follow -shard vtgate_schema diff --git a/.github/workflows/cluster_endtoend_vtgate_topo.yml b/.github/workflows/cluster_endtoend_vtgate_topo.yml new file mode 100644 index 00000000000..bd6a7d9f0dd --- /dev/null +++ b/.github/workflows/cluster_endtoend_vtgate_topo.yml @@ -0,0 +1,50 @@ +# DO NOT MODIFY: THIS FILE IS GENERATED USING "make generate_ci_workflows" + +name: Cluster (vtgate_topo) +on: [push, pull_request] +jobs: + + build: + name: Run endtoend tests on Cluster (vtgate_topo) + runs-on: ubuntu-18.04 + + steps: + - name: Set up Go + uses: actions/setup-go@v1 + with: + go-version: 1.16 + + - name: Tune the OS + run: | + echo '1024 65535' | sudo tee -a /proc/sys/net/ipv4/ip_local_port_range + + # TEMPORARY WHILE GITHUB FIXES THIS https://github.com/actions/virtual-environments/issues/3185 + - name: Add the current IP address, long hostname and short hostname record to /etc/hosts file + run: | + echo -e "$(ip addr show eth0 | grep "inet\b" | awk '{print $2}' | cut -d/ -f1)\t$(hostname -f) $(hostname -s)" | sudo tee -a /etc/hosts + # DON'T FORGET TO REMOVE CODE ABOVE WHEN ISSUE IS ADRESSED! + + - name: Check out code + uses: actions/checkout@v2 + + - name: Get dependencies + run: | + sudo apt-get update + sudo apt-get install -y mysql-server mysql-client make unzip g++ etcd curl git wget eatmydata + sudo service mysql stop + sudo service etcd stop + sudo ln -s /etc/apparmor.d/usr.sbin.mysqld /etc/apparmor.d/disable/ + sudo apparmor_parser -R /etc/apparmor.d/usr.sbin.mysqld + go mod download + + wget https://repo.percona.com/apt/percona-release_latest.$(lsb_release -sc)_all.deb + sudo apt-get install -y gnupg2 + sudo dpkg -i percona-release_latest.$(lsb_release -sc)_all.deb + sudo apt-get update + sudo apt-get install percona-xtrabackup-24 + + - name: Run cluster endtoend test + timeout-minutes: 30 + run: | + source build.env + eatmydata -- go run test.go -docker=false -print-log -follow -shard vtgate_topo diff --git a/.github/workflows/cluster_endtoend_vtgate_transaction.yml b/.github/workflows/cluster_endtoend_vtgate_transaction.yml new file mode 100644 index 00000000000..e8db2e3c009 --- /dev/null +++ b/.github/workflows/cluster_endtoend_vtgate_transaction.yml @@ -0,0 +1,50 @@ +# DO NOT MODIFY: THIS FILE IS GENERATED USING "make generate_ci_workflows" + +name: Cluster (vtgate_transaction) +on: [push, pull_request] +jobs: + + build: + name: Run endtoend tests on Cluster (vtgate_transaction) + runs-on: ubuntu-18.04 + + steps: + - name: Set up Go + uses: actions/setup-go@v1 + with: + go-version: 1.16 + + - name: Tune the OS + run: | + echo '1024 65535' | sudo tee -a /proc/sys/net/ipv4/ip_local_port_range + + # TEMPORARY WHILE GITHUB FIXES THIS https://github.com/actions/virtual-environments/issues/3185 + - name: Add the current IP address, long hostname and short hostname record to /etc/hosts file + run: | + echo -e "$(ip addr show eth0 | grep "inet\b" | awk '{print $2}' | cut -d/ -f1)\t$(hostname -f) $(hostname -s)" | sudo tee -a /etc/hosts + # DON'T FORGET TO REMOVE CODE ABOVE WHEN ISSUE IS ADRESSED! + + - name: Check out code + uses: actions/checkout@v2 + + - name: Get dependencies + run: | + sudo apt-get update + sudo apt-get install -y mysql-server mysql-client make unzip g++ etcd curl git wget eatmydata + sudo service mysql stop + sudo service etcd stop + sudo ln -s /etc/apparmor.d/usr.sbin.mysqld /etc/apparmor.d/disable/ + sudo apparmor_parser -R /etc/apparmor.d/usr.sbin.mysqld + go mod download + + wget https://repo.percona.com/apt/percona-release_latest.$(lsb_release -sc)_all.deb + sudo apt-get install -y gnupg2 + sudo dpkg -i percona-release_latest.$(lsb_release -sc)_all.deb + sudo apt-get update + sudo apt-get install percona-xtrabackup-24 + + - name: Run cluster endtoend test + timeout-minutes: 30 + run: | + source build.env + eatmydata -- go run test.go -docker=false -print-log -follow -shard vtgate_transaction diff --git a/.github/workflows/cluster_endtoend_vtgate_unsharded.yml b/.github/workflows/cluster_endtoend_vtgate_unsharded.yml new file mode 100644 index 00000000000..a739f1f86bb --- /dev/null +++ b/.github/workflows/cluster_endtoend_vtgate_unsharded.yml @@ -0,0 +1,50 @@ +# DO NOT MODIFY: THIS FILE IS GENERATED USING "make generate_ci_workflows" + +name: Cluster (vtgate_unsharded) +on: [push, pull_request] +jobs: + + build: + name: Run endtoend tests on Cluster (vtgate_unsharded) + runs-on: ubuntu-18.04 + + steps: + - name: Set up Go + uses: actions/setup-go@v1 + with: + go-version: 1.16 + + - name: Tune the OS + run: | + echo '1024 65535' | sudo tee -a /proc/sys/net/ipv4/ip_local_port_range + + # TEMPORARY WHILE GITHUB FIXES THIS https://github.com/actions/virtual-environments/issues/3185 + - name: Add the current IP address, long hostname and short hostname record to /etc/hosts file + run: | + echo -e "$(ip addr show eth0 | grep "inet\b" | awk '{print $2}' | cut -d/ -f1)\t$(hostname -f) $(hostname -s)" | sudo tee -a /etc/hosts + # DON'T FORGET TO REMOVE CODE ABOVE WHEN ISSUE IS ADRESSED! + + - name: Check out code + uses: actions/checkout@v2 + + - name: Get dependencies + run: | + sudo apt-get update + sudo apt-get install -y mysql-server mysql-client make unzip g++ etcd curl git wget eatmydata + sudo service mysql stop + sudo service etcd stop + sudo ln -s /etc/apparmor.d/usr.sbin.mysqld /etc/apparmor.d/disable/ + sudo apparmor_parser -R /etc/apparmor.d/usr.sbin.mysqld + go mod download + + wget https://repo.percona.com/apt/percona-release_latest.$(lsb_release -sc)_all.deb + sudo apt-get install -y gnupg2 + sudo dpkg -i percona-release_latest.$(lsb_release -sc)_all.deb + sudo apt-get update + sudo apt-get install percona-xtrabackup-24 + + - name: Run cluster endtoend test + timeout-minutes: 30 + run: | + source build.env + eatmydata -- go run test.go -docker=false -print-log -follow -shard vtgate_unsharded diff --git a/.github/workflows/cluster_endtoend_vtgate_vindex.yml b/.github/workflows/cluster_endtoend_vtgate_vindex.yml new file mode 100644 index 00000000000..e6d43c31d51 --- /dev/null +++ b/.github/workflows/cluster_endtoend_vtgate_vindex.yml @@ -0,0 +1,50 @@ +# DO NOT MODIFY: THIS FILE IS GENERATED USING "make generate_ci_workflows" + +name: Cluster (vtgate_vindex) +on: [push, pull_request] +jobs: + + build: + name: Run endtoend tests on Cluster (vtgate_vindex) + runs-on: ubuntu-18.04 + + steps: + - name: Set up Go + uses: actions/setup-go@v1 + with: + go-version: 1.16 + + - name: Tune the OS + run: | + echo '1024 65535' | sudo tee -a /proc/sys/net/ipv4/ip_local_port_range + + # TEMPORARY WHILE GITHUB FIXES THIS https://github.com/actions/virtual-environments/issues/3185 + - name: Add the current IP address, long hostname and short hostname record to /etc/hosts file + run: | + echo -e "$(ip addr show eth0 | grep "inet\b" | awk '{print $2}' | cut -d/ -f1)\t$(hostname -f) $(hostname -s)" | sudo tee -a /etc/hosts + # DON'T FORGET TO REMOVE CODE ABOVE WHEN ISSUE IS ADRESSED! + + - name: Check out code + uses: actions/checkout@v2 + + - name: Get dependencies + run: | + sudo apt-get update + sudo apt-get install -y mysql-server mysql-client make unzip g++ etcd curl git wget eatmydata + sudo service mysql stop + sudo service etcd stop + sudo ln -s /etc/apparmor.d/usr.sbin.mysqld /etc/apparmor.d/disable/ + sudo apparmor_parser -R /etc/apparmor.d/usr.sbin.mysqld + go mod download + + wget https://repo.percona.com/apt/percona-release_latest.$(lsb_release -sc)_all.deb + sudo apt-get install -y gnupg2 + sudo dpkg -i percona-release_latest.$(lsb_release -sc)_all.deb + sudo apt-get update + sudo apt-get install percona-xtrabackup-24 + + - name: Run cluster endtoend test + timeout-minutes: 30 + run: | + source build.env + eatmydata -- go run test.go -docker=false -print-log -follow -shard vtgate_vindex diff --git a/.github/workflows/cluster_endtoend_vtgate_vschema.yml b/.github/workflows/cluster_endtoend_vtgate_vschema.yml new file mode 100644 index 00000000000..60360c49781 --- /dev/null +++ b/.github/workflows/cluster_endtoend_vtgate_vschema.yml @@ -0,0 +1,50 @@ +# DO NOT MODIFY: THIS FILE IS GENERATED USING "make generate_ci_workflows" + +name: Cluster (vtgate_vschema) +on: [push, pull_request] +jobs: + + build: + name: Run endtoend tests on Cluster (vtgate_vschema) + runs-on: ubuntu-18.04 + + steps: + - name: Set up Go + uses: actions/setup-go@v1 + with: + go-version: 1.16 + + - name: Tune the OS + run: | + echo '1024 65535' | sudo tee -a /proc/sys/net/ipv4/ip_local_port_range + + # TEMPORARY WHILE GITHUB FIXES THIS https://github.com/actions/virtual-environments/issues/3185 + - name: Add the current IP address, long hostname and short hostname record to /etc/hosts file + run: | + echo -e "$(ip addr show eth0 | grep "inet\b" | awk '{print $2}' | cut -d/ -f1)\t$(hostname -f) $(hostname -s)" | sudo tee -a /etc/hosts + # DON'T FORGET TO REMOVE CODE ABOVE WHEN ISSUE IS ADRESSED! + + - name: Check out code + uses: actions/checkout@v2 + + - name: Get dependencies + run: | + sudo apt-get update + sudo apt-get install -y mysql-server mysql-client make unzip g++ etcd curl git wget eatmydata + sudo service mysql stop + sudo service etcd stop + sudo ln -s /etc/apparmor.d/usr.sbin.mysqld /etc/apparmor.d/disable/ + sudo apparmor_parser -R /etc/apparmor.d/usr.sbin.mysqld + go mod download + + wget https://repo.percona.com/apt/percona-release_latest.$(lsb_release -sc)_all.deb + sudo apt-get install -y gnupg2 + sudo dpkg -i percona-release_latest.$(lsb_release -sc)_all.deb + sudo apt-get update + sudo apt-get install percona-xtrabackup-24 + + - name: Run cluster endtoend test + timeout-minutes: 30 + run: | + source build.env + eatmydata -- go run test.go -docker=false -print-log -follow -shard vtgate_vschema diff --git a/.github/workflows/cluster_endtoend_27.yml b/.github/workflows/cluster_endtoend_vtorc.yml similarity index 93% rename from .github/workflows/cluster_endtoend_27.yml rename to .github/workflows/cluster_endtoend_vtorc.yml index dea2e7336b9..98e77b4fe92 100644 --- a/.github/workflows/cluster_endtoend_27.yml +++ b/.github/workflows/cluster_endtoend_vtorc.yml @@ -1,18 +1,18 @@ # DO NOT MODIFY: THIS FILE IS GENERATED USING "make generate_ci_workflows" -name: Cluster (27) +name: Cluster (vtorc) on: [push, pull_request] jobs: build: - name: Run endtoend tests on Cluster (27) + name: Run endtoend tests on Cluster (vtorc) runs-on: ubuntu-18.04 steps: - name: Set up Go uses: actions/setup-go@v1 with: - go-version: 1.15 + go-version: 1.16 - name: Tune the OS run: | @@ -47,4 +47,4 @@ jobs: timeout-minutes: 30 run: | source build.env - eatmydata -- go run test.go -docker=false -print-log -follow -shard 27 + eatmydata -- go run test.go -docker=false -print-log -follow -shard vtorc diff --git a/.github/workflows/cluster_endtoend_xb_recovery.yml b/.github/workflows/cluster_endtoend_xb_recovery.yml new file mode 100644 index 00000000000..f56dc126f44 --- /dev/null +++ b/.github/workflows/cluster_endtoend_xb_recovery.yml @@ -0,0 +1,50 @@ +# DO NOT MODIFY: THIS FILE IS GENERATED USING "make generate_ci_workflows" + +name: Cluster (xb_recovery) +on: [push, pull_request] +jobs: + + build: + name: Run endtoend tests on Cluster (xb_recovery) + runs-on: ubuntu-18.04 + + steps: + - name: Set up Go + uses: actions/setup-go@v1 + with: + go-version: 1.16 + + - name: Tune the OS + run: | + echo '1024 65535' | sudo tee -a /proc/sys/net/ipv4/ip_local_port_range + + # TEMPORARY WHILE GITHUB FIXES THIS https://github.com/actions/virtual-environments/issues/3185 + - name: Add the current IP address, long hostname and short hostname record to /etc/hosts file + run: | + echo -e "$(ip addr show eth0 | grep "inet\b" | awk '{print $2}' | cut -d/ -f1)\t$(hostname -f) $(hostname -s)" | sudo tee -a /etc/hosts + # DON'T FORGET TO REMOVE CODE ABOVE WHEN ISSUE IS ADRESSED! + + - name: Check out code + uses: actions/checkout@v2 + + - name: Get dependencies + run: | + sudo apt-get update + sudo apt-get install -y mysql-server mysql-client make unzip g++ etcd curl git wget eatmydata + sudo service mysql stop + sudo service etcd stop + sudo ln -s /etc/apparmor.d/usr.sbin.mysqld /etc/apparmor.d/disable/ + sudo apparmor_parser -R /etc/apparmor.d/usr.sbin.mysqld + go mod download + + wget https://repo.percona.com/apt/percona-release_latest.$(lsb_release -sc)_all.deb + sudo apt-get install -y gnupg2 + sudo dpkg -i percona-release_latest.$(lsb_release -sc)_all.deb + sudo apt-get update + sudo apt-get install percona-xtrabackup-24 + + - name: Run cluster endtoend test + timeout-minutes: 30 + run: | + source build.env + eatmydata -- go run test.go -docker=false -print-log -follow -shard xb_recovery diff --git a/.github/workflows/cluster_initial_sharding_multi.yml b/.github/workflows/cluster_initial_sharding_multi.yml index e6e0ee36a60..fcaa2ccec41 100644 --- a/.github/workflows/cluster_initial_sharding_multi.yml +++ b/.github/workflows/cluster_initial_sharding_multi.yml @@ -4,13 +4,13 @@ jobs: build: name: cluster initial sharding multi - runs-on: ubuntu-18.04 + runs-on: ubuntu-latest steps: - name: Set up Go uses: actions/setup-go@v1 with: - go-version: 1.15 + go-version: 1.16 - name: Tune the OS run: | diff --git a/.github/workflows/create_release.yml b/.github/workflows/create_release.yml index f126dd76e35..79ddccbd446 100644 --- a/.github/workflows/create_release.yml +++ b/.github/workflows/create_release.yml @@ -10,13 +10,23 @@ on: jobs: build: name: Create Release - runs-on: ubuntu-18.04 + runs-on: ubuntu-latest steps: - name: Set up Go uses: actions/setup-go@v1 with: - go-version: 1.15 + go-version: 1.16 + + - name: Tune the OS + run: | + echo '1024 65535' | sudo tee -a /proc/sys/net/ipv4/ip_local_port_range + + # TEMPORARY WHILE GITHUB FIXES THIS https://github.com/actions/virtual-environments/issues/3185 + - name: Add the current IP address, long hostname and short hostname record to /etc/hosts file + run: | + echo -e "$(ip addr show eth0 | grep "inet\b" | awk '{print $2}' | cut -d/ -f1)\t$(hostname -f) $(hostname -s)" | sudo tee -a /etc/hosts + # DON'T FORGET TO REMOVE CODE ABOVE WHEN ISSUE IS ADRESSED! - name: Check out code uses: actions/checkout@v2 diff --git a/.github/workflows/docker_test_1.yml b/.github/workflows/docker_test_1.yml index aff66aefa73..c3a92f6db0c 100644 --- a/.github/workflows/docker_test_1.yml +++ b/.github/workflows/docker_test_1.yml @@ -4,7 +4,7 @@ jobs: build: name: Docker Test 1 - runs-on: ubuntu-18.04 + runs-on: ubuntu-latest steps: @@ -13,6 +13,16 @@ jobs: with: go-version: 1.15 + - name: Tune the OS + run: | + echo '1024 65535' | sudo tee -a /proc/sys/net/ipv4/ip_local_port_range + + # TEMPORARY WHILE GITHUB FIXES THIS https://github.com/actions/virtual-environments/issues/3185 + - name: Add the current IP address, long hostname and short hostname record to /etc/hosts file + run: | + echo -e "$(ip addr show eth0 | grep "inet\b" | awk '{print $2}' | cut -d/ -f1)\t$(hostname -f) $(hostname -s)" | sudo tee -a /etc/hosts + # DON'T FORGET TO REMOVE CODE ABOVE WHEN ISSUE IS ADRESSED! + - name: Check out code uses: actions/checkout@v2 diff --git a/.github/workflows/docker_test_2.yml b/.github/workflows/docker_test_2.yml index 6da16d0ac9a..7fd30c741a3 100644 --- a/.github/workflows/docker_test_2.yml +++ b/.github/workflows/docker_test_2.yml @@ -4,7 +4,7 @@ jobs: build: name: Docker Test 2 - runs-on: ubuntu-18.04 + runs-on: ubuntu-latest steps: @@ -13,6 +13,16 @@ jobs: with: go-version: 1.15 + - name: Tune the OS + run: | + echo '1024 65535' | sudo tee -a /proc/sys/net/ipv4/ip_local_port_range + + # TEMPORARY WHILE GITHUB FIXES THIS https://github.com/actions/virtual-environments/issues/3185 + - name: Add the current IP address, long hostname and short hostname record to /etc/hosts file + run: | + echo -e "$(ip addr show eth0 | grep "inet\b" | awk '{print $2}' | cut -d/ -f1)\t$(hostname -f) $(hostname -s)" | sudo tee -a /etc/hosts + # DON'T FORGET TO REMOVE CODE ABOVE WHEN ISSUE IS ADRESSED! + - name: Check out code uses: actions/checkout@v2 diff --git a/.github/workflows/docker_test_3.yml b/.github/workflows/docker_test_3.yml index 7c82dc4efe8..974446ccded 100644 --- a/.github/workflows/docker_test_3.yml +++ b/.github/workflows/docker_test_3.yml @@ -4,7 +4,7 @@ jobs: build: name: Docker Test 3 - runs-on: ubuntu-18.04 + runs-on: ubuntu-latest steps: @@ -13,6 +13,16 @@ jobs: with: go-version: 1.15 + - name: Tune the OS + run: | + echo '1024 65535' | sudo tee -a /proc/sys/net/ipv4/ip_local_port_range + + # TEMPORARY WHILE GITHUB FIXES THIS https://github.com/actions/virtual-environments/issues/3185 + - name: Add the current IP address, long hostname and short hostname record to /etc/hosts file + run: | + echo -e "$(ip addr show eth0 | grep "inet\b" | awk '{print $2}' | cut -d/ -f1)\t$(hostname -f) $(hostname -s)" | sudo tee -a /etc/hosts + # DON'T FORGET TO REMOVE CODE ABOVE WHEN ISSUE IS ADRESSED! + - name: Check out code uses: actions/checkout@v2 diff --git a/.github/workflows/e2e_race.yml b/.github/workflows/e2e_race.yml index d1da9666662..6c9901bfdc3 100644 --- a/.github/workflows/e2e_race.yml +++ b/.github/workflows/e2e_race.yml @@ -4,35 +4,50 @@ jobs: build: name: End-to-End Test (Race) - runs-on: ubuntu-18.04 + runs-on: ubuntu-latest steps: - name: Set up Go uses: actions/setup-go@v1 with: - go-version: 1.15 + go-version: 1.16 - name: Tune the OS run: | echo '1024 65535' | sudo tee -a /proc/sys/net/ipv4/ip_local_port_range - # TEMPORARY WHILE GITHUB FIXES THIS https://github.com/actions/virtual-environments/issues/3185 - - name: Add the current IP address, long hostname and short hostname record to /etc/hosts file - run: | - echo -e "$(ip addr show eth0 | grep "inet\b" | awk '{print $2}' | cut -d/ -f1)\t$(hostname -f) $(hostname -s)" | sudo tee -a /etc/hosts - # DON'T FORGET TO REMOVE CODE ABOVE WHEN ISSUE IS ADRESSED! - - name: Check out code uses: actions/checkout@v2 - name: Get dependencies run: | + + sudo DEBIAN_FRONTEND="noninteractive" apt-get update + + # Uninstall any previously installed MySQL first + sudo systemctl stop apparmor + sudo DEBIAN_FRONTEND="noninteractive" apt-get remove -y --purge mysql-server mysql-client mysql-common + sudo apt-get -y autoremove + sudo apt-get -y autoclean + sudo deluser mysql + sudo rm -rf /var/lib/mysql + sudo rm -rf /etc/mysql + + # Install mysql80 + wget -c https://dev.mysql.com/get/mysql-apt-config_0.8.20-1_all.deb + echo mysql-apt-config mysql-apt-config/select-server select mysql-8.0 | sudo debconf-set-selections + sudo DEBIAN_FRONTEND="noninteractive" dpkg -i mysql-apt-config* sudo apt-get update - sudo apt-get install -y mysql-server mysql-client make unzip g++ etcd curl git wget + sudo DEBIAN_FRONTEND="noninteractive" apt-get install -y mysql-server mysql-client + + # Install everything else we need, and configure + sudo apt-get install -y make unzip g++ etcd curl git wget eatmydata sudo service mysql stop sudo service etcd stop + sudo bash -c "echo '/usr/sbin/mysqld { }' > /etc/apparmor.d/usr.sbin.mysqld" # https://bugs.launchpad.net/ubuntu/+source/mariadb-10.1/+bug/1806263 sudo ln -s /etc/apparmor.d/usr.sbin.mysqld /etc/apparmor.d/disable/ - sudo apparmor_parser -R /etc/apparmor.d/usr.sbin.mysqld + sudo apparmor_parser -R /etc/apparmor.d/usr.sbin.mysqld || echo "could not remove mysqld profile" + go mod download - name: Run make minimaltools diff --git a/.github/workflows/endtoend.yml b/.github/workflows/endtoend.yml index 7e3181aaa4e..e4e568caeca 100644 --- a/.github/workflows/endtoend.yml +++ b/.github/workflows/endtoend.yml @@ -10,7 +10,7 @@ jobs: - name: Set up Go uses: actions/setup-go@v1 with: - go-version: 1.15 + go-version: 1.16 - name: Tune the OS run: | @@ -46,4 +46,4 @@ jobs: - name: endtoend timeout-minutes: 30 run: | - tools/e2e_test_runner.sh + eatmydata -- tools/e2e_test_runner.sh diff --git a/.github/workflows/ensure_bootstrap_updated.yml b/.github/workflows/ensure_bootstrap_updated.yml index 174995ad838..37f990fcdfb 100644 --- a/.github/workflows/ensure_bootstrap_updated.yml +++ b/.github/workflows/ensure_bootstrap_updated.yml @@ -4,7 +4,7 @@ jobs: build: name: Check Bootstrap Updated - runs-on: ubuntu-18.04 + runs-on: ubuntu-latest steps: - name: Set up Go @@ -12,6 +12,16 @@ jobs: with: go-version: 1.15 + - name: Tune the OS + run: | + echo '1024 65535' | sudo tee -a /proc/sys/net/ipv4/ip_local_port_range + + # TEMPORARY WHILE GITHUB FIXES THIS https://github.com/actions/virtual-environments/issues/3185 + - name: Add the current IP address, long hostname and short hostname record to /etc/hosts file + run: | + echo -e "$(ip addr show eth0 | grep "inet\b" | awk '{print $2}' | cut -d/ -f1)\t$(hostname -f) $(hostname -s)" | sudo tee -a /etc/hosts + # DON'T FORGET TO REMOVE CODE ABOVE WHEN ISSUE IS ADRESSED! + - name: Check out code uses: actions/checkout@v2 diff --git a/.github/workflows/fossa.yml b/.github/workflows/fossa.yml new file mode 100644 index 00000000000..59f15929859 --- /dev/null +++ b/.github/workflows/fossa.yml @@ -0,0 +1,19 @@ +name: FOSSA License Scanning + +on: + - pull_request + - push + +jobs: + build: + runs-on: ubuntu-latest + + steps: + - name: Checkout code + uses: actions/checkout@v2 + + - name: Run FOSSA scan and upload build data + uses: fossa-contrib/fossa-action@v1 + with: + fossa-api-key: 76d7483ea206d530d9452e44bffe7ba8 + diff --git a/.github/workflows/golangci-linter.yml b/.github/workflows/golangci-linter.yml index 98bdd56ea7b..0505751aeea 100644 --- a/.github/workflows/golangci-linter.yml +++ b/.github/workflows/golangci-linter.yml @@ -5,23 +5,33 @@ jobs: name: Lint using golangci-lint runs-on: ubuntu-latest steps: - - name: Set up Go 1.15 - uses: actions/setup-go@v1 - with: - go-version: 1.15 - id: go + - name: Set up Go 1.16 + uses: actions/setup-go@v1 + with: + go-version: 1.16 + id: go - - name: Check out code into the Go module directory - uses: actions/checkout@v2 + - name: Tune the OS + run: | + echo '1024 65535' | sudo tee -a /proc/sys/net/ipv4/ip_local_port_range - - name: Install golangci-lint - run: curl -sfL https://raw.githubusercontent.com/golangci/golangci-lint/master/install.sh| sh -s -- -b $(go env GOPATH)/bin v1.31.0 + # TEMPORARY WHILE GITHUB FIXES THIS https://github.com/actions/virtual-environments/issues/3185 + - name: Add the current IP address, long hostname and short hostname record to /etc/hosts file + run: | + echo -e "$(ip addr show eth0 | grep "inet\b" | awk '{print $2}' | cut -d/ -f1)\t$(hostname -f) $(hostname -s)" | sudo tee -a /etc/hosts + # DON'T FORGET TO REMOVE CODE ABOVE WHEN ISSUE IS ADRESSED! - - name: Clean Env - run: $(go env GOPATH)/bin/golangci-lint cache clean + - name: Check out code into the Go module directory + uses: actions/checkout@v2 - - name: Print linter version - run: $(go env GOPATH)/bin/golangci-lint --version + - name: Install golangci-lint + run: curl -sfL https://raw.githubusercontent.com/golangci/golangci-lint/master/install.sh| sh -s -- -b $(go env GOPATH)/bin v1.31.0 - - name: Run golangci-lint - run: $(go env GOPATH)/bin/golangci-lint run go/... + - name: Clean Env + run: $(go env GOPATH)/bin/golangci-lint cache clean + + - name: Print linter version + run: $(go env GOPATH)/bin/golangci-lint --version + + - name: Run golangci-lint + run: $(go env GOPATH)/bin/golangci-lint run go/... diff --git a/.github/workflows/gomod-tidy.yml b/.github/workflows/gomod-tidy.yml index 34dbdd2d5ca..dc3fb1d1e69 100644 --- a/.github/workflows/gomod-tidy.yml +++ b/.github/workflows/gomod-tidy.yml @@ -5,24 +5,34 @@ jobs: name: Check go mod tidy runs-on: ubuntu-latest steps: - - name: Set up Go 1.15 - uses: actions/setup-go@v1 - with: - go-version: 1.15 - id: go + - name: Set up Go 1.16 + uses: actions/setup-go@v1 + with: + go-version: 1.16 + id: go - - name: Check out code into the Go module directory - uses: actions/checkout@v2 + - name: Tune the OS + run: | + echo '1024 65535' | sudo tee -a /proc/sys/net/ipv4/ip_local_port_range - - name: Run go mod tidy - run: | - set -e - go mod tidy - output=$(git status -s) - if [ -z "${output}" ]; then - exit 0 - fi - echo 'We wish to maintain a tidy state for go mod. Please run `go mod tidy` on your branch, commit and push again.' - echo 'Running `go mod tidy` on this CI test yields with the following changes:' - echo "$output" - exit 1 + # TEMPORARY WHILE GITHUB FIXES THIS https://github.com/actions/virtual-environments/issues/3185 + - name: Add the current IP address, long hostname and short hostname record to /etc/hosts file + run: | + echo -e "$(ip addr show eth0 | grep "inet\b" | awk '{print $2}' | cut -d/ -f1)\t$(hostname -f) $(hostname -s)" | sudo tee -a /etc/hosts + # DON'T FORGET TO REMOVE CODE ABOVE WHEN ISSUE IS ADRESSED! + + - name: Check out code into the Go module directory + uses: actions/checkout@v2 + + - name: Run go mod tidy + run: | + set -e + go mod tidy + output=$(git status -s) + if [ -z "${output}" ]; then + exit 0 + fi + echo 'We wish to maintain a tidy state for go mod. Please run `go mod tidy` on your branch, commit and push again.' + echo 'Running `go mod tidy` on this CI test yields with the following changes:' + echo "$output" + exit 1 diff --git a/.github/workflows/legacy_local_example.yml b/.github/workflows/legacy_local_example.yml index 83f680c0406..b7c2f8145f2 100644 --- a/.github/workflows/legacy_local_example.yml +++ b/.github/workflows/legacy_local_example.yml @@ -15,30 +15,44 @@ jobs: - name: Set up Go uses: actions/setup-go@v1 with: - go-version: 1.15 + go-version: 1.16 - name: Tune the OS run: | echo '1024 65535' | sudo tee -a /proc/sys/net/ipv4/ip_local_port_range - # TEMPORARY WHILE GITHUB FIXES THIS https://github.com/actions/virtual-environments/issues/3185 - - name: Add the current IP address, long hostname and short hostname record to /etc/hosts file - run: | - echo -e "$(ip addr show eth0 | grep "inet\b" | awk '{print $2}' | cut -d/ -f1)\t$(hostname -f) $(hostname -s)" | sudo tee -a /etc/hosts - # DON'T FORGET TO REMOVE CODE ABOVE WHEN ISSUE IS ADRESSED! - - name: Check out code uses: actions/checkout@v2 - name: Get dependencies run: | if [ ${{matrix.os}} = "ubuntu-latest" ]; then + + sudo DEBIAN_FRONTEND="noninteractive" apt-get update + + # Uninstall any previously installed MySQL first + sudo systemctl stop apparmor + sudo DEBIAN_FRONTEND="noninteractive" apt-get remove -y --purge mysql-server mysql-client mysql-common + sudo apt-get -y autoremove + sudo apt-get -y autoclean + sudo deluser mysql + sudo rm -rf /var/lib/mysql + sudo rm -rf /etc/mysql + + # Install mysql80 + wget -c https://dev.mysql.com/get/mysql-apt-config_0.8.20-1_all.deb + echo mysql-apt-config mysql-apt-config/select-server select mysql-8.0 | sudo debconf-set-selections + sudo DEBIAN_FRONTEND="noninteractive" dpkg -i mysql-apt-config* sudo apt-get update - sudo apt-get install -y mysql-server mysql-client make unzip g++ etcd curl git wget eatmydata + sudo DEBIAN_FRONTEND="noninteractive" apt-get install -y mysql-server mysql-client + + # Install everything else we need, and configure + sudo apt-get install -y make unzip g++ etcd curl git wget eatmydata sudo service mysql stop sudo service etcd stop + sudo bash -c "echo '/usr/sbin/mysqld { }' > /etc/apparmor.d/usr.sbin.mysqld" # https://bugs.launchpad.net/ubuntu/+source/mariadb-10.1/+bug/1806263 sudo ln -s /etc/apparmor.d/usr.sbin.mysqld /etc/apparmor.d/disable/ - sudo apparmor_parser -R /etc/apparmor.d/usr.sbin.mysqld + sudo apparmor_parser -R /etc/apparmor.d/usr.sbin.mysqld || echo "could not remove mysqld profile" elif [ ${{matrix.os}} = "macos-latest" ]; then brew install mysql@5.7 make unzip etcd curl git wget fi @@ -61,4 +75,4 @@ jobs: fi # Make sure that testing is entirely non-reliant on config mv config config-moved - eatmydata -- test/legacy_local_example.sh + eatmydata -- go run test.go -print-log -follow -retry=1 legacy_local_example diff --git a/.github/workflows/local_example.yml b/.github/workflows/local_example.yml index 5f899bd761f..e9e1430704a 100644 --- a/.github/workflows/local_example.yml +++ b/.github/workflows/local_example.yml @@ -15,30 +15,44 @@ jobs: - name: Set up Go uses: actions/setup-go@v1 with: - go-version: 1.15 + go-version: 1.16 - name: Tune the OS run: | echo '1024 65535' | sudo tee -a /proc/sys/net/ipv4/ip_local_port_range - # TEMPORARY WHILE GITHUB FIXES THIS https://github.com/actions/virtual-environments/issues/3185 - - name: Add the current IP address, long hostname and short hostname record to /etc/hosts file - run: | - echo -e "$(ip addr show eth0 | grep "inet\b" | awk '{print $2}' | cut -d/ -f1)\t$(hostname -f) $(hostname -s)" | sudo tee -a /etc/hosts - # DON'T FORGET TO REMOVE CODE ABOVE WHEN ISSUE IS ADRESSED! - - name: Check out code uses: actions/checkout@v2 - name: Get dependencies run: | if [ ${{matrix.os}} = "ubuntu-latest" ]; then + + sudo DEBIAN_FRONTEND="noninteractive" apt-get update + + # Uninstall any previously installed MySQL first + sudo systemctl stop apparmor + sudo DEBIAN_FRONTEND="noninteractive" apt-get remove -y --purge mysql-server mysql-client mysql-common + sudo apt-get -y autoremove + sudo apt-get -y autoclean + sudo deluser mysql + sudo rm -rf /var/lib/mysql + sudo rm -rf /etc/mysql + + # Install mysql80 + wget -c https://dev.mysql.com/get/mysql-apt-config_0.8.20-1_all.deb + echo mysql-apt-config mysql-apt-config/select-server select mysql-8.0 | sudo debconf-set-selections + sudo DEBIAN_FRONTEND="noninteractive" dpkg -i mysql-apt-config* sudo apt-get update - sudo apt-get install -y mysql-server mysql-client make unzip g++ etcd curl git wget eatmydata + sudo DEBIAN_FRONTEND="noninteractive" apt-get install -y mysql-server mysql-client + + # Install everything else we need, and configure + sudo apt-get install -y make unzip g++ etcd curl git wget eatmydata sudo service mysql stop sudo service etcd stop + sudo bash -c "echo '/usr/sbin/mysqld { }' > /etc/apparmor.d/usr.sbin.mysqld" # https://bugs.launchpad.net/ubuntu/+source/mariadb-10.1/+bug/1806263 sudo ln -s /etc/apparmor.d/usr.sbin.mysqld /etc/apparmor.d/disable/ - sudo apparmor_parser -R /etc/apparmor.d/usr.sbin.mysqld + sudo apparmor_parser -R /etc/apparmor.d/usr.sbin.mysqld || echo "could not remove mysqld profile" elif [ ${{matrix.os}} = "macos-latest" ]; then brew install mysql@5.7 make unzip etcd curl git wget fi @@ -61,4 +75,4 @@ jobs: fi # Make sure that testing is entirely non-reliant on config mv config config-moved - eatmydata -- test/local_example.sh + eatmydata -- go run test.go -print-log -follow -retry=1 local_example diff --git a/.github/workflows/pr-labels.yml b/.github/workflows/pr-labels.yml new file mode 100644 index 00000000000..eef96ca36f2 --- /dev/null +++ b/.github/workflows/pr-labels.yml @@ -0,0 +1,46 @@ +name: pr-labels +on: + pull_request: + types: [opened, labeled, unlabeled, synchronize] +jobs: + analyze: + if: github.repository == 'vitessio/vitess' + name: analyze_pr_labels + runs-on: ubuntu-latest + steps: + - name: Tune the OS + run: | + echo '1024 65535' | sudo tee -a /proc/sys/net/ipv4/ip_local_port_range + + # TEMPORARY WHILE GITHUB FIXES THIS https://github.com/actions/virtual-environments/issues/3185 + - name: Add the current IP address, long hostname and short hostname record to /etc/hosts file + run: | + echo -e "$(ip addr show eth0 | grep "inet\b" | awk '{print $2}' | cut -d/ -f1)\t$(hostname -f) $(hostname -s)" | sudo tee -a /etc/hosts + # DON'T FORGET TO REMOVE CODE ABOVE WHEN ISSUE IS ADRESSED! + + - name: analyze labels + env: + PR_NUMBER: ${{ github.event.pull_request.number }} + run: | + if [ -z "$PR_NUMBER" ] ; then + exit 0 + fi + LABELS_JSON="/tmp/labels.json" + + # Get labels for this pull request + curl -s \ + -H 'authorization: Bearer ${{ secrets.GITHUB_TOKEN }}' \ + -H "Accept: application/vnd.github.v3+json" \ + -H "Content-type: application/json" \ + "https://api.github.com/repos/${GITHUB_REPOSITORY}/issues/${PR_NUMBER}/labels" \ + > "$LABELS_JSON" + + if ! cat ${LABELS_JSON} | jq -r '.[].name ' | grep -q 'Component:' ; then + echo "Expecting PR to have label 'Component: ...'" + exit 1 + fi + if ! cat ${LABELS_JSON} | jq -r '.[].name ' | grep -q 'Type:' ; then + echo "Expecting PR to have label 'Type: ...'" + exit 1 + fi + exit 0 diff --git a/.github/workflows/region_example.yml b/.github/workflows/region_example.yml index ba09878b1f0..a50784718d0 100644 --- a/.github/workflows/region_example.yml +++ b/.github/workflows/region_example.yml @@ -15,30 +15,44 @@ jobs: - name: Set up Go uses: actions/setup-go@v1 with: - go-version: 1.15 + go-version: 1.16 - name: Tune the OS run: | echo '1024 65535' | sudo tee -a /proc/sys/net/ipv4/ip_local_port_range - # TEMPORARY WHILE GITHUB FIXES THIS https://github.com/actions/virtual-environments/issues/3185 - - name: Add the current IP address, long hostname and short hostname record to /etc/hosts file - run: | - echo -e "$(ip addr show eth0 | grep "inet\b" | awk '{print $2}' | cut -d/ -f1)\t$(hostname -f) $(hostname -s)" | sudo tee -a /etc/hosts - # DON'T FORGET TO REMOVE CODE ABOVE WHEN ISSUE IS ADRESSED! - - name: Check out code uses: actions/checkout@v2 - name: Get dependencies run: | if [ ${{matrix.os}} = "ubuntu-latest" ]; then + + sudo DEBIAN_FRONTEND="noninteractive" apt-get update + + # Uninstall any previously installed MySQL first + sudo systemctl stop apparmor + sudo DEBIAN_FRONTEND="noninteractive" apt-get remove -y --purge mysql-server mysql-client mysql-common + sudo apt-get -y autoremove + sudo apt-get -y autoclean + sudo deluser mysql + sudo rm -rf /var/lib/mysql + sudo rm -rf /etc/mysql + + # Install mysql80 + wget -c https://dev.mysql.com/get/mysql-apt-config_0.8.20-1_all.deb + echo mysql-apt-config mysql-apt-config/select-server select mysql-8.0 | sudo debconf-set-selections + sudo DEBIAN_FRONTEND="noninteractive" dpkg -i mysql-apt-config* sudo apt-get update - sudo apt-get install -y mysql-server mysql-client make unzip g++ etcd curl git wget eatmydata + sudo DEBIAN_FRONTEND="noninteractive" apt-get install -y mysql-server mysql-client + + # Install everything else we need, and configure + sudo apt-get install -y make unzip g++ etcd curl git wget eatmydata sudo service mysql stop sudo service etcd stop + sudo bash -c "echo '/usr/sbin/mysqld { }' > /etc/apparmor.d/usr.sbin.mysqld" # https://bugs.launchpad.net/ubuntu/+source/mariadb-10.1/+bug/1806263 sudo ln -s /etc/apparmor.d/usr.sbin.mysqld /etc/apparmor.d/disable/ - sudo apparmor_parser -R /etc/apparmor.d/usr.sbin.mysqld + sudo apparmor_parser -R /etc/apparmor.d/usr.sbin.mysqld || echo "could not remove mysqld profile" elif [ ${{matrix.os}} = "macos-latest" ]; then brew install mysql@5.7 make unzip etcd curl git wget fi @@ -62,4 +76,4 @@ jobs: # Make sure that testing is entirely non-reliant on config mv config config-moved sed -i 's/user\/my-vitess/runner\/work\/vitess\/vitess/g' examples/region_sharding/main_vschema_sharded.json #set correct path to countries.json - eatmydata -- test/region_example.sh + eatmydata -- go run test.go -print-log -follow -retry=1 region_example diff --git a/.github/workflows/sonar_analysis.yml b/.github/workflows/sonar_analysis.yml index de80804b174..cf2f380c271 100644 --- a/.github/workflows/sonar_analysis.yml +++ b/.github/workflows/sonar_analysis.yml @@ -6,7 +6,7 @@ on: jobs: build: - runs-on: ubuntu-18.04 + runs-on: ubuntu-latest steps: - name: Set up Go @@ -14,6 +14,16 @@ jobs: with: go-version: 1.15 + - name: Tune the OS + run: | + echo '1024 65535' | sudo tee -a /proc/sys/net/ipv4/ip_local_port_range + + # TEMPORARY WHILE GITHUB FIXES THIS https://github.com/actions/virtual-environments/issues/3185 + - name: Add the current IP address, long hostname and short hostname record to /etc/hosts file + run: | + echo -e "$(ip addr show eth0 | grep "inet\b" | awk '{print $2}' | cut -d/ -f1)\t$(hostname -f) $(hostname -s)" | sudo tee -a /etc/hosts + # DON'T FORGET TO REMOVE CODE ABOVE WHEN ISSUE IS ADRESSED! + - name: Check out code uses: actions/checkout@v2 diff --git a/.github/workflows/unit_race.yml b/.github/workflows/unit_race.yml index b362b2b714e..cdae0987251 100644 --- a/.github/workflows/unit_race.yml +++ b/.github/workflows/unit_race.yml @@ -10,7 +10,7 @@ jobs: - name: Set up Go uses: actions/setup-go@v1 with: - go-version: 1.15 + go-version: 1.16 - name: Tune the OS run: | diff --git a/.github/workflows/unit_test_mariadb101.yml b/.github/workflows/unit_test_mariadb101.yml index c6120a09303..ee0fcd82973 100644 --- a/.github/workflows/unit_test_mariadb101.yml +++ b/.github/workflows/unit_test_mariadb101.yml @@ -11,7 +11,7 @@ jobs: - name: Set up Go uses: actions/setup-go@v1 with: - go-version: 1.15 + go-version: 1.16 - name: Tune the OS run: | diff --git a/.github/workflows/unit_test_mariadb102.yml b/.github/workflows/unit_test_mariadb102.yml index 17ebdba01a5..f496b0072d0 100644 --- a/.github/workflows/unit_test_mariadb102.yml +++ b/.github/workflows/unit_test_mariadb102.yml @@ -11,7 +11,7 @@ jobs: - name: Set up Go uses: actions/setup-go@v1 with: - go-version: 1.15 + go-version: 1.16 - name: Tune the OS run: | diff --git a/.github/workflows/unit_test_mariadb103.yml b/.github/workflows/unit_test_mariadb103.yml index f30d035eaa9..0011884a513 100644 --- a/.github/workflows/unit_test_mariadb103.yml +++ b/.github/workflows/unit_test_mariadb103.yml @@ -11,7 +11,7 @@ jobs: - name: Set up Go uses: actions/setup-go@v1 with: - go-version: 1.15 + go-version: 1.16 - name: Tune the OS run: | diff --git a/.github/workflows/unit_test_mysql57.yml b/.github/workflows/unit_test_mysql57.yml index 3c7e077ad32..38d706e95c7 100644 --- a/.github/workflows/unit_test_mysql57.yml +++ b/.github/workflows/unit_test_mysql57.yml @@ -11,7 +11,7 @@ jobs: - name: Set up Go uses: actions/setup-go@v1 with: - go-version: 1.15 + go-version: 1.16 - name: Tune the OS run: | diff --git a/.github/workflows/unit_test_mysql80.yml b/.github/workflows/unit_test_mysql80.yml index 80f224e77c2..6fa7eca1161 100644 --- a/.github/workflows/unit_test_mysql80.yml +++ b/.github/workflows/unit_test_mysql80.yml @@ -11,7 +11,7 @@ jobs: - name: Set up Go uses: actions/setup-go@v1 with: - go-version: 1.15 + go-version: 1.16 - name: Tune the OS run: | diff --git a/.github/workflows/unit_test_percona56.yml b/.github/workflows/unit_test_percona56.yml index 72bc5a24d61..847de3f04d9 100644 --- a/.github/workflows/unit_test_percona56.yml +++ b/.github/workflows/unit_test_percona56.yml @@ -11,7 +11,7 @@ jobs: - name: Set up Go uses: actions/setup-go@v1 with: - go-version: 1.15 + go-version: 1.16 - name: Tune the OS run: | diff --git a/MAINTAINERS.md b/MAINTAINERS.md index 125cd85560f..d6454ba04af 100644 --- a/MAINTAINERS.md +++ b/MAINTAINERS.md @@ -5,7 +5,7 @@ The following is the full list, alphabetically ordered. * Alkin Tezuysal ([askdba](https://github.com/askdba)) alkin@planetscale.com * Andres Taylor ([systay](https://github.com/systay)) andres@planetscale.com * Andrew Mason ([amason](https://github.com/ajm188)) amason@slack-corp.com -* Anthony Yeh ([enisoc](https://github.com/enisoc)) enisoc@planetscale.com +* Anthony Yeh ([enisoc](https://github.com/enisoc)) enisoc@enisoc.dev * Dan Kozlowski ([dkhenry](https://github.com/dkhenry)) dan.kozlowski@gmail.com * David Weitzman ([dweitzman](https://github.com/dweitzman)) dweitzman@pinterest.com * Deepthi Sigireddi ([deepthi](https://github.com/deepthi)) deepthi@planetscale.com @@ -19,6 +19,7 @@ The following is the full list, alphabetically ordered. * Rohit Nayak ([rohit-nayak-ps](https://github.com/rohit-nayak-ps)) rohit@planetscale.com * Shlomi Noach ([shlomi-noach](https://github.com/shlomi-noach)) shlomi@planetscale.com * Sugu Sougoumarane ([sougou](https://github.com/sougou)) sougou@planetscale.com +* Vicent Marti ([vmg])(https://github.com/vmg)) vmg@planetscale.com ## Areas of expertise @@ -26,7 +27,7 @@ The following is the full list, alphabetically ordered. sougou, demmer, rafael, dweitzman, tirsen, askdba, enisoc ### Builds -dkhenry, shlomi-noach, ajm188 +dkhenry, shlomi-noach, ajm188, vmg ### Resharding sougou, rafael, tirsen, dweitzman, systay, rohit-nayak-ps @@ -34,6 +35,9 @@ sougou, rafael, tirsen, dweitzman, systay, rohit-nayak-ps ### Parser sougou, dweitzman, deepthi, systay, harshit-gangal +### Performance +vmg + ### Cluster Management deepthi, rafael, enisoc, shlomi-noach, ajm188 diff --git a/Makefile b/Makefile index c58a70223ec..94ecd5f1772 100644 --- a/Makefile +++ b/Makefile @@ -76,6 +76,22 @@ endif # build vtorc with CGO, because it depends on sqlite CGO_ENABLED=1 go install $(EXTRA_BUILD_FLAGS) $(VT_GO_PARALLEL) -ldflags "$(shell tools/build_version_flags.sh)" ./go/cmd/vtorc/... +# cross-build can be used to cross-compile Vitess client binaries +# Outside of select client binaries (namely vtctlclient & vtexplain), cross-compiled Vitess Binaries are not recommended for production deployments +# Usage: GOOS=darwin GOARCH=amd64 make cross-build +cross-build: +ifndef NOBANNER + echo $$(date): Building source tree +endif + bash ./build.env + # In order to cross-compile, go install requires GOBIN to be unset + export GOBIN="" + # For the specified GOOS + GOARCH, build all the binaries by default with CGO disabled + CGO_ENABLED=0 GOOS=${GOOS} GOARCH=${GOARCH} go install $(EXTRA_BUILD_FLAGS) $(VT_GO_PARALLEL) -ldflags "$(shell tools/build_version_flags.sh)" ./go/... + # unset GOOS and embed local resources in the vttablet executable + (cd go/cmd/vttablet && unset GOOS && go run github.com/GeertJohan/go.rice/rice --verbose append --exec=$${HOME}/go/bin/${GOOS}_${GOARCH}/vttablet) + # Cross-compiling w/ cgo isn't trivial and we don't need vtorc, so we can skip building it + debug: ifndef NOBANNER echo $$(date): Building source tree @@ -117,7 +133,7 @@ grpcvtctldclient: go/vt/proto/vtctlservice/vtctlservice.pb.go parser: make -C go/vt/sqlparser -codegen: asthelpergen sizegen parser +codegen: asthelpergen sizegen parser astfmtgen visitor: asthelpergen echo "make visitor has been replaced by make asthelpergen" @@ -195,7 +211,9 @@ java_test: VTROOT=${PWD} mvn -f java/pom.xml -B clean verify install_protoc-gen-go: - go install github.com/gogo/protobuf/protoc-gen-gofast + go install google.golang.org/protobuf/cmd/protoc-gen-go + go install google.golang.org/grpc/cmd/protoc-gen-go-grpc + go install github.com/planetscale/vtprotobuf/cmd/protoc-gen-go-vtproto PROTO_SRCS = $(wildcard proto/*.proto) PROTO_SRC_NAMES = $(basename $(notdir $(PROTO_SRCS))) @@ -210,9 +228,14 @@ endif $(PROTO_GO_OUTS): minimaltools install_protoc-gen-go proto/*.proto for name in $(PROTO_SRC_NAMES); do \ - $(VTROOT)/bin/protoc --gofast_out=plugins=grpc:. --plugin protoc-gen-gofast="${GOBIN}/protoc-gen-gofast" \ - -I${PWD}/dist/vt-protoc-3.6.1/include:proto proto/$${name}.proto && \ - goimports -w vitess.io/vitess/go/vt/proto/$${name}/$${name}.pb.go; \ + $(VTROOT)/bin/protoc \ + --go_out=. --plugin protoc-gen-go="${GOBIN}/protoc-gen-go" \ + --go-grpc_out=. --plugin protoc-gen-go-grpc="${GOBIN}/protoc-gen-go-grpc" \ + --go-vtproto_out=. --plugin protoc-gen-go-vtproto="${GOBIN}/protoc-gen-go-vtproto" \ + --go-vtproto_opt=features=marshal+unmarshal+size+pool \ + --go-vtproto_opt=pool=vitess.io/vitess/go/vt/proto/query.Row \ + --go-vtproto_opt=pool=vitess.io/vitess/go/vt/proto/binlogdata.VStreamRowsResponse \ + -I${PWD}/dist/vt-protoc-3.6.1/include:proto proto/$${name}.proto; \ done cp -Rf vitess.io/vitess/go/vt/proto/* go/vt/proto rm -rf vitess.io/vitess/go/vt/proto/ @@ -223,7 +246,7 @@ $(PROTO_GO_OUTS): minimaltools install_protoc-gen-go proto/*.proto # This rule builds the bootstrap images for all flavors. DOCKER_IMAGES_FOR_TEST = mariadb mariadb103 mysql56 mysql57 mysql80 percona percona57 percona80 DOCKER_IMAGES = common $(DOCKER_IMAGES_FOR_TEST) -BOOTSTRAP_VERSION=1 +BOOTSTRAP_VERSION=2 ensure_bootstrap_version: find docker/ -type f -exec sed -i "s/^\(ARG bootstrap_version\)=.*/\1=${BOOTSTRAP_VERSION}/" {} \; sed -i 's/\(^.*flag.String(\"bootstrap-version\",\) *\"[^\"]\+\"/\1 \"${BOOTSTRAP_VERSION}\"/' test.go @@ -330,6 +353,7 @@ endif git add --all git commit -n -s -m "Release commit for $(RELEASE_VERSION)" git tag -m Version\ $(RELEASE_VERSION) v$(RELEASE_VERSION) + git tag -a v$(GODOC_RELEASE_VERSION) -m "Tagging $(RELEASE_VERSION) also as $(GODOC_RELEASE_VERSION) for godoc/go modules" cd java && mvn versions:set -DnewVersion=$(DEV_VERSION) echo package servenv > go/vt/servenv/version.go echo >> go/vt/servenv/version.go @@ -337,8 +361,8 @@ endif git add --all git commit -n -s -m "Back to dev mode" echo "Release preparations successful" - echo "A git tag was created, you can push it with:" - echo " git push upstream v$(RELEASE_VERSION)" + echo "Two git tags were created, you can push them with:" + echo " git push upstream v$(RELEASE_VERSION) && git push upstream v$(GODOC_RELEASE_VERSION)" echo "The git branch has also been updated. You need to push it and get it merged" tools: @@ -440,3 +464,6 @@ vtadmin_web_proto_types: vtadmin_web_install # is changed by adding a new test to an existing shard. Any new or modified files need to be committed into git generate_ci_workflows: cd test && go run ci_workflow_gen.go && cd .. + +release-notes: + go run ./go/tools/release-notes -from $(FROM) -to $(TO) diff --git a/bootstrap.sh b/bootstrap.sh index 0c459e51e44..c15e43d383f 100755 --- a/bootstrap.sh +++ b/bootstrap.sh @@ -276,7 +276,7 @@ install_all() { # chromedriver if [ "$BUILD_CHROME" == 1 ] ; then - install_dep "chromedriver" "83.0.4103.14" "$VTROOT/dist/chromedriver" install_chromedriver + install_dep "chromedriver" "90.0.4430.24" "$VTROOT/dist/chromedriver" install_chromedriver fi echo diff --git a/build.env b/build.env index ef632fa1a5a..0c7bab5e5ec 100755 --- a/build.env +++ b/build.env @@ -17,7 +17,7 @@ source ./tools/shell_functions.inc go version >/dev/null 2>&1 || fail "Go is not installed or is not in \$PATH. See https://vitess.io/contributing/build-from-source for install instructions." -goversion_min 1.15 || fail "Go version reported: `go version`. Version 1.15+ required. See https://vitess.io/contributing/build-from-source for install instructions." +goversion_min 1.16 || fail "Go version reported: `go version`. Version 1.16+ required. See https://vitess.io/contributing/build-from-source for install instructions." mkdir -p dist mkdir -p bin diff --git a/config/mycnf/default.cnf b/config/mycnf/default.cnf index 8facbbe0343..06f2b5dd82e 100644 --- a/config/mycnf/default.cnf +++ b/config/mycnf/default.cnf @@ -10,6 +10,10 @@ relay-log-index = {{.RelayLogIndexPath}} pid-file = {{.PidFile}} port = {{.MysqlPort}} +{{if .SecureFilePriv}} +secure-file-priv = {{.SecureFilePriv}} +{{end}} + # all db instances should start in read-only mode - once the db is started and # fully functional, we'll push it into read-write mode read-only diff --git a/doc/VIT-02-report-fuzzing-audit.pdf b/doc/VIT-02-report-fuzzing-audit.pdf new file mode 100644 index 00000000000..8d289cc818c Binary files /dev/null and b/doc/VIT-02-report-fuzzing-audit.pdf differ diff --git a/doc/releasenotes/10_0_0_release_notes.md b/doc/releasenotes/10_0_0_release_notes.md index ace6e57d215..025c57d2512 100644 --- a/doc/releasenotes/10_0_0_release_notes.md +++ b/doc/releasenotes/10_0_0_release_notes.md @@ -1,7 +1,7 @@ This release complies with VEP-3 which removes the upgrade order requirement. Components can be upgraded in any order. It is recommended that the upgrade order should still be followed if possible, except to canary test the new version of VTGate before upgrading the rest of the components. ## Known Issues -* Running binaries with `--version` or calling @@version from a MySQL client still shows `10.0.0-RC1` (Note: fixed in v10.0.1) +* Running binaries with `--version` or running `select @@version` from a MySQL client still shows `10.0.0-RC1` * Online DDL [cannot be used](https://github.com/vitessio/vitess/pull/7873#issuecomment-822798180) if you are using the keyspace filtering feature of VTGate * VReplication errors when a fixed-length binary column is used as the sharding key #8080 @@ -83,6 +83,13 @@ This release complies with VEP-3 which removes the upgrade order requirement. Co * VTGate: Show columns query on system schema #7729 * VTGate: Disallow foreign key constraint on ddl #7780 * VTGate: VTGate: support -enable_online_ddl flag #7694 +* VTGate: Default to false for system settings to be changed per session at the database connection level #7921 +* VTGate: vtctl: return error on invalid ddl_strategy #7924 +* VTGate: [10.0] Squashed backport of #7903 #7927 +* VTGate: [10.0] Fix bug with reserved connections to stale tablets #7935 +* VTGate: [10.0] Fix for keyspaces_to_watch regression #7936 +* VTGate: [10.0] Update healthy tablets correctly for primary down #7937 +* VTGate: [10.0] Allow modification of tablet unhealthy_threshold via debugEnv #7938 ### Testing * Fuzzing: Add vtctl fuzzer #7605 @@ -161,6 +168,7 @@ This release complies with VEP-3 which removes the upgrade order requirement. Co * VReplication: Error out if binlog compression is turned on #7670 * VReplication: Tablet throttler: support for custom query & threshold #7541 * VStream API: allow aligning streams from different shards to minimize skews across the streams #7626 +* VReplication: Backport 7809: Update rowlog for the API change made for the vstream skew alignment feature #7890 ### OnlineDDL @@ -175,6 +183,8 @@ This release complies with VEP-3 which removes the upgrade order requirement. Co ### VTAdmin +Vitess 10.0 introduces a highly-experimental multi-cluster admin API and web UI, called VTAdmin. Deploying the vtadmin-api and vtadmin-web components is completely opt-in. If you're interested in trying it out and providing early feedback, come find us in #feat-vtadmin in the Vitess slack. Note that VTAdmin relies on the new VtctldServer API, so you must be running the new grpc-vtctld service on your vtctlds in order to use it. + * VTAdmin: Add vtadmin-web build flag for configuring fetch credentials #7414 * VTAdmin: Add `cluster` field to vtadmin-api's /api/gates response #7425 * VTAdmin: Add /api/clusters endpoint to vtadmin-api #7426 @@ -201,7 +211,7 @@ This release complies with VEP-3 which removes the upgrade order requirement. Co * VTAdmin: [vtadmin] GetWorkflow(s) endpoints #7662 * VTAdmin: [vitessdriver|vtadmin] Support Ping in vitessdriver, use in vtadmin to healthcheck connections during Dial #7709 * VTAdmin: [vtadmin] Add to local example #7699 -* VTAdmin: vtexplain lock #7724 +* VTAdmin: [vtexplain] lock #7724 * VTAdmin: [vtadmin] Aggregate schema sizes #7751 * VTAdmin: [vtadmin-web] Add comments + 'options' parameter to API hooks #7754 * VTAdmin: [vtadmin-web] Add common max-width to infrastructure table views #7760 @@ -248,6 +258,9 @@ This release complies with VEP-3 which removes the upgrade order requirement. Co * Add vtorc binary for rpm,deb builds #7750 * Fixes bug that prevents creation of logs directory #7761 * [Java] Guava update to 31.1.1 #7764 +* make: build vitess as static binaries by default #7795 ← Potentially breaking change +* make: build vitess as static binaries by default (10.0 backport) #7808 +* java: prepare java version for release 10.0 #7922 ## Functionality Neutral Changes * VTGate: Remove unused key.Destination.IsUnique() #7565 @@ -261,3 +274,12 @@ This release complies with VEP-3 which removes the upgrade order requirement. Co * Fix unit test fail after merge #7550 * Add test with NULL input values for vindexes that did not have any. #7552 + +## VtctldServer +As part of an ongoing effort to transition from the VtctlServer gRPC API to the newer VtctldServer gRPC API, we have updated the local example to use the corresponding new vtctldclient to perform InitShardPrimary (formerly, InitShardMaster) operations. + +To enable the new VtctldServer in your vtctld components, update the -service_map flag to include grpc-vtctld. You may specify both grpc-vtctl,grpc-vtctld to gracefully transition. + +The migration is still underway, but you may begin to transition to the new client for migrated commands. For a full listing, refer either to proto/vtctlservice.proto or consult vtctldclient --help. + + diff --git a/doc/releasenotes/11_0_0_release_notes.md b/doc/releasenotes/11_0_0_release_notes.md new file mode 100644 index 00000000000..9b2d1458446 --- /dev/null +++ b/doc/releasenotes/11_0_0_release_notes.md @@ -0,0 +1,401 @@ +This release complies with VEP-3 which removes the upgrade order requirement. Components can be upgraded in any order. It is recommended that the upgrade order should still be followed if possible, except to canary test the new version of VTGate before upgrading the rest of the components. + +## Known Issues + +- A critical vulnerability [CVE-2021-44228](https://cve.mitre.org/cgi-bin/cvename.cgi?name=CVE-2021-44228) in the Apache Log4j logging library was disclosed on Dec 9 2021. + The project provided release `2.15.0` with a patch that mitigates the impact of this CVE. It was quickly found that the initial patch was insufficient, and additional CVEs + [CVE-2021-45046](https://cve.mitre.org/cgi-bin/cvename.cgi?name=CVE-2021-45046) and [CVE-2021-44832](https://cve.mitre.org/cgi-bin/cvename.cgi?name=CVE-2021-44832) followed. + These have been fixed in release `2.17.1`. This release of Vitess, `v11.0.0`, uses a version of Log4j below `2.17.1`, for this reason, we encourage you to use version `v11.0.4` instead, to benefit from the vulnerability patches. + +- An issue where the value of the `-force` flag is used instead of `-keep_data` flag's value in v2 vreplication workflows (#9174) is known to be present in this release. A workaround is available in the description of issue #9174. + +## Bug fixes +### Build/CI + * update moby/term to fix darwin build issue #7787 + * Removing Linux/amd64 specific subdependencies #7796 + * CI: run some tests inside docker to workaround GH Actions issue #7868 + * Fix flaky race condition in vtexplain #7930 + * Fixing flaky endtoend/tablegc tests #7947 + * Add libwww-perl to allow pt-osc wrapper to work in vitess/lite #8141 +### Cluster management + * Add missing return on a failed `GetShard` call in `FindAllShardsInKeyspace` #7992 + * Increase the default srv_topo_timeout #8011 + * parser: support index name in FOREIGN KEY clause; Online DDL to reject FK clauses #8058 + * Correctly parse cell names with dashes in tablet aliases #8167 + * /healthz should not report ok when vttablet is not connected to mysql #8238 + * [topo] Refactor `ExpandCells` to not error on valid aliases #8291 + * tm state: don't populate metadata in updateLocked #8362 + * [grpcvtctldserver] Fix backup detail limit math #8402 +### Query Serving + * fix select star, col1 orderby col1 bug. #7743 + * VTExplain: Add support for multi table join query #7829 + * Routing of Information Schema Queries #7841 + * Fix + integration test for `keyspaces_to_watch` routing regression [fixes #7882] #7873 + * Revert "[tablet, queryrules] Extend query rules to check comments" #7897 + * vtctl: return error on invalid ddl_strategy #7923 + * Panic when EOF after @ symbol #7925 + * Fixes encoding of sql strings #8033 + * Make TwoPC engine initialization async. #8048 + * Fix for issue with information_schema queries with both table name and schema name predicates #8087 + * Fix for transactions not allowed to finish during PlannedReparentShard #8089 + * Set fully parsed to false when ignoring errors #8094 + * Fix for query and sub query with limits #8097 + * Fix buffering when using reserved connections #8102 + * Parse generated columns in DDL #8117 + * More explicit message on online DDL parsing error #8118 + * healthcheck: attempt to update primary only if the current tablet is serving #8121 + * PRIMARY in index hint list for master #8160 + * Signed int parse #8189 + * Delete table reference alias support #8393 + * Fix for function calls in DEFAULT value of CREATE TABLE statement in release-11.0 #8476 + * Backport: Fixing multiple issues related to onlineddl/lifecycle #8517 + * boolean values should not be parenthesised in default clause - release 11 #8531 + + +### VReplication + * VDiff: Use byte compare if weight_string() returns null for either source or target #7696 + * Rowlog: Update rowlog for the API change made for the vstream skew alignment feature #7809 + * Pad binlog values for binary() columns to match the value returned by mysql selects #7969 + * Change vreplication error metric name to start with a string to pass prometheus validations #7983 + * Pass the provided keyspace through to `UpdateDisableQueryService` rather than hard-coding the sourceKeyspace #8020 + * Fix vreplication timing metrics #8024 + * Switchwrites: error if no tablets available on target for reverse replication #8142 + * Remove noisy vexec logs #8144 + * VReplicationExec: don't create new stats objects on a select #8166 + * Binlog JSON Parser: handle inline types correctly for large documents #8187 + * Schema Tracking Flaky Test: Ignore unrelated gtid progress events #8283 + * Adds padding to keyrange comparison #8296 + * VReplication Reverse Workflows: add keyspace scope to vindex while creating reverse vreplication streams #8385 + * OnlineDDL/Vreplication stress test: investigating failures #8390 + * Ignore SBR statements from pt-table-checksum #8396 + * Return from throttler goroutine if context is cancelled to prevent goroutine leaks #8489 + * VDiff: Add BIT datatype to list of byte comparable types #8401 + * Fix excessive VReplication logging to file and db #8521 + +### VTAdmin + * Add missing return in `vtctld-*` DSN case, and log any flag that gets ignored #7872 + * [vtadmin-web] Do not parse numbers/booleans in URL query parameters by default #8100 + * [vtadmin-web] Small bugfix where stream source displayed instead of target #8311 +## CI/Build +### Build/CI + * Planbuilder: Fix fuzzer #7952 + * java: Bump SNAPSHOT version to 11.0.0-SNAPSHOT after Vitess release v10 #7968 + * Add optional TLS feature to gRPC servers #8049 + * Add image and base image arguments to build.sh #8064 + * [fuzzing] Add report #8128 + * CI: fail PR if it does not have required labels #8147 + * trigger pr-labels workflow when labels added/removed from PR #8157 + * Add vmg as a maintainer #8186 + * Refactor vtorc endtoend tests #8215 + * Use shared testing helper which guards against races in tmclient construction #8300 + * moved multiple vtgate and tabletgateway to individual shards #8305 + * include squashed PRs in release notes #8326 + * pr-labels workflow: only check actual PRs #8327 + * Heuristic fix for a flaky test: allow for some noise to pass through #8339 + * ci: upgrade to Go 1.16 #8274 + * mod: upgrade etcd to stable version #8357 + * Resolve go.mod issue with direct dependency on planetscale/tengo #8383 + * Initial GitHub Docker Build Setup #8399 + +### Cluster management + * make: build vitess as static binaries by default #7795 + * Fix TableGC flaky test by reducing check interval #8270 +### Java + * Bump commons-io from 2.6 to 2.7 in /java #7953 +### Other + * Bump MySQL version for docker builds to 8.0.23 #7811 +### Query Serving + * Online DDL Vreplication test suite: adding tests #8213 + * Online DDL/VReplication: more passing tests (no UK/PK) #8225 + * Online DDL: reject ALTER TABLE...RENAME statements #8227 + * Online DDL/VReplication: fail on existence of FOREIGN KEY #8228 + * Online DDL/VReplication: test PK column case change #8336 + * Online DDL/VReplication: add PK DATETIME->TIMESTAMP test #8338 + * Bugfix: assign cexpr.references for column CONVERTed to utf8mb4 #8355 + + +### VReplication + * Towards a VReplication/OnlineDDL testing suite #8181 + * Online DDL/Vreplication: column type awareness #8239 + * remove duplicate ReadMigrations, fixing build #8258 + * VReplication (and by product, Online DDL): support GENERATED column as part of PRIMARY KEY #8335 +### vttestserver + * docker/vttestserver: Set max_connections in default-fast.cnf at container build time #7810 +## Documentation +### Cluster management + * Enhance k8stopo flag documentation #8458 +### Build/CI + * Update version for latest snapshot #7801 + * v10 GA Release Notes #7964 + * Updates and Corrections to v9 Release Notes #8295 +### Query Serving + * Update link to 'Why FK not supported in Online DDL' blog post #8372 +### Other + * correct several wrong words #7822 + * MAINTAINERS.md: Update enisoc's email. #7909 + * Modification of the Markdown list format in the release notes template #7962 + * Update vreplicator docs #8014 + * Release notes: add Known Issues #8027 +## Enhancement +### Observability + * [trace] Add logging support to opentracing plugins #8289 + + +### Build/CI + * Makefile: add cross-build target for cross-compiling client binaries #7806 + * git: improve signoff detection #7852 + * Release notes generation #7932 + * vitess/lite docker build fails for mysql80 #7943 + * Fix a gofmt warning #7959 + * docker/vttestserver/run.sh: Add $CHARSET environment variable #7970 + * docker/lite/install_dependencies.sh: If the dependency install loop reaches its max number of retries, consider that a failure; exit the script nonzero so the build halts. #7976 + * Add commit count and authors to the release notes #7982 + * Automate version naming and release tagging #8034 + * Make sure to only allow codegen when the rest of the code compiles #8169 + * Add options to vttestserver to pass -foreign_key_mode, -enable_online_ddl, and -enable_direct_ddl through to vtcombo #8177 +### Cluster management + * Dynamic throttle metric threshold #7742 + * Bump Bootstrap version, per CVE-2018-14040 (orchestrator/vtorc) #7824 + * Check tablet alias before removing after error stream #7915 + * vttablet/tabletmanager - add additional test for tmstate.Open() #7993 + * Add `vttablet_restore_done` hook #8007 + * Added ValidateVSchema #8012 + * Add RemoteOperationTimeout to both legacy and grpc `ChangeTabletType` implementations. #8052 + * [vtctldserver] Add guard against self-reparent, plus misc updates #8084 + * [tm_state] updateLocked should re-populate local metadata tables to reflect promotion rule changes #8107 + * [vtctld] Migrate `ApplyVSchema` to `VtctldServer` #8113 + * vtctl.generateShardRanges -> key.GenerateShardRanges #8134 + * etcd: add grpc.WithBlock to client config #8205 + * [workflow] Add tracing to `GetWorkflows` endpoint #8266 + * [vtctldclient] Add legacy shim #8284 + * [vtctldserver] Add tracing #8285 + * [vtctldserver] Add additional backup info fields #8321 + * [workflow] Call `scanWorkflow` concurrently #8272 +### Query Serving + * [Gen4] Implemented table alias functionality in the semantic analysis #7629 + * Improved error messages (tabletserver) #7747 + * Allow modification of tablet unhealthy_threshold via debugEnv #7753 + * [Gen4] Initial Horizon Planning #7775 + * [tablet, queryrules] Extend query rules to check comments #7784 + * Add support for showing global gtid executed per shard #7856 + * Minor cleanups around errors on the vtgate #7864 + * log unsupported queries #7865 + * Show databases like #7912 + * Add rank as reserved keyword #7944 + * Online DDL: introducing ddl_strategy `-singleton-context` flag #7946 + * Ignore the error and log as warn if not able to validate the current system setting for check and ignore case #8004 + * Detect and signal schema changes on vttablets #8005 + * DDL bypass plan #8013 + * Online DDL: progress & ETA for Vreplication migrations #8015 + * Scatter errors as warning in olap query #8018 + * livequeryz: livequeryz/terminate link should be relative #8025 + * Handle online DDL user creation if we do not have SUPER privs (e.g. AWS Aurora) #8038 + * Update the schema copy with minimal changes #8067 + * Gen4: Support for order by column not present in projection #8070 + * Schema tracking in vtgate #8074 + * protobuf: upgrade #8075 + * added mysql 8.0 reserved keywords #8086 + * Fix ghost/pt-osc in the external DB case where MySQL might be reporting #8115 + * Support for vtgate -enable_direct_ddl flag #8116 + * add transaction ID to query log for acquisition and analysis #8133 + * Inline reference #8136 + * Improve ScatterErrorsAsWarnings functionality #8139 + * Gen4: planning Select IN #8155 + * Primary key name #8188 + * Online DDL: read and publish gh-ost FATAL message where possible #8192 + * Expose inner net.Conn to be able to write better unit tests #8217 + * vtgate: validate important flags during startup #8218 + * Online DDL/VReplication: AUTO_INCREMENT support and tests #8223 + * [Gen4] some renaming from v4 to Gen4 #8234 + * Schema Tracking: Optimize and Bug Fix #8243 + * gen4: minor refactoring to improve readability #8245 + * gen4: plan more opcodes #8254 + * Online DDL: report rows_copied for migration (initial support in gh-ost) #8255 + * Schema tracking: new tables in sharded keyspace #8256 + * [parser] use table_alias for ENGINE option in CREATE TABLE stmt #8307 + * Add /debug/env for vtgate #8292 + * gen4: outer joins #8312 + * Gen4: expand star in projection list #8325 + * gen4: Fail all queries not handled well by gen4 #8359 + * Gen4 fail more2 #8382 + * SHOW VITESS_MIGRATION '...' LOGS, retain logs for 24 hours #8532 + * [11.0] query serving to continue when topo server restarts #8533 + * [11.0] Disable allowing set statements on system settings by default #8540 +### VReplication + * Change local example to use v2 vreplication flows and make v2 flows as the default #8527 + * Use Dba user when Vexec is runAsAdmin #7731 + * Add table for logging stream errors, state changes and key workflow steps #7831 + * Fix some static check warning #7960 + * VSchema Validation on ReshardWorkflow Creation #7977 + * Tracking `rows_copied` #7980 + * Vdiff formatting improvements #8079 + * Ignore generated columns in workflows #8129 + * VReplication Copy Phase: Increase default replica lag tolerance. Also make it and copy timeout modifiable via flags #8130 + * Materialize: Add additional comparison operators in Materialize and fix bug where they not applied for sharded keyspaces #8247 + * Copy Phase: turn on OptimizeInserts by default #8248 + * Tracker/VStreamer: only reload schema for tables in current database and not for internal table artifacts #8257 + * Online DDL/Vreplication suite: support ENUM->VARCHAR/TEXT type change #8275 + * Added TableName to DiffReport struct. #8279 + * New VReplication lag metric #8306 + * VStream API: add heartbeat for idle streams #8244 + * Online DDL/VReplication: support non-UTF8 character sets #8322 + * Online DDL/Vreplication suite: fix test for no shared UK #8334 + * Online DDL/VReplication: support DROP+ADD column of same name #8337 + * Online DDL/VReplication test suite: support ENUM as part of PRIMARY KEY #8345 + * Change local example to use v2 vreplication flows #8527 + * Tablet Picker: add metric to record lack of available tablets #8403 +### VTAdmin + * [vtadmin-web] Add useSyncedURLParam hook to persist filter parameter in the URL #7857 + * [vtadmin-web] Display more data on /gates view and add filtering #7876 + * [vtadmin] Promote ErrNoSchema to a TypedError which returns http 404 #7885 + * [vtadmin] gate/tablet/vtctld FQDNs #7886 + * [vtadmin-web] Display vindex data on Schema view #7917 + * [vtadmin-web] Add filtering and source/target shards to Workflows view #7948 + * [vtadmin-web] Add filtering + shard counts/status to Keyspaces view #7991 + * [vtadmin-web] Display shard state on Tablets view + extract tablet utilities #7999 + * [vtadmin] experimental tabletdebug #8003 + * [vtadmin-web] Display timestamps + stream counts on Workflows view #8009 + * [vtadmin-web] Updates to table styling + other CSS #8072 + * [vtadmin-web] Add Tab components #8119 + * [vtadmin-api] Update GetTablet to use alias instead of hostname #8163 + * [vtadmin-api] Rename flag 'http-tablet-fqdn-tmpl' to 'http-tablet-url-tmpl' + update vtadmin flags for local example #8164 + * Add `--tracer` flag to vtadmin and actually start tracing #8165 + * [vtadmin-web] The hastiest Tablet view (+ experimental debug vars) #8170 + * [vtadmin-web] Add tabs to Workflow view #8203 + * [vtctld] Add GetSrvVSchemas command #8221 + * [vtadmin-api] Add HTTP endpoints for /api/srvvschemas and /api/srvvschema/{cluster}/{cell} #8226 + * [vtadmin-web] Add QPS and VReplicationQPS charts to Tablet view #8263 + * [vtadmin-web] Add client-side error handling interface + Bugsnag implementation #8287 + * [vtadmin-web] Add chart for stream vreplication lag across all streams in a workflow #8331 + * [vtctldproxy] Add more annotations to vtctld Dial calls #8346 + + +### vttestserver + * Vttest create db #7989 + * Adds an environment variable to set the MySQL max connections limit in vttestserver docker image #8210 +## Feature Request +### Build/CI + * [trace] Add optional flags to support flexible jaeger configs #8199 +### Cluster management + * Add VtctldServer to vtcombo #7896 + * [vtctldserver] Migrate routing rules RPCs, and also `RebuildVSchemaGraph` #8197 + * [vtctldserver] Migrate `CellInfo`, `CellAlias` rw RPCs #8219 + * [vtctldserver] Add RefreshState RPCs #8232 +### Query Serving + * VTGate grpc implementation of Prepare and CloseSession #8211 + * Schema tracking: One schema load at a time per keyspace #8224 +### VTAdmin + * [vtadmin-web] Add DataFilter + Workspace layout components #8032 + * [vtadmin-web] Add Tooltip + HelpTooltip components #8076 + * [vtadmin-web] Add initial Stream view, render streams on Workflow view #8091 + * [vtadmin-web] The hastiest-ever VTExplain UI #8092 + * [vtadmin-web] Add source-map-explorer util #8093 + * [vtadmin-web] Add Keyspace detail view #8111 + * [vtadmin-api] Add GetKeyspace endpoint #8125 + * [workflow] Add vreplication_log data to workflow protos, and `VtctldServer.GetWorkflows` method #8261 + * [vtadmin] Add debug endpoints #8268 +## Internal Cleanup +### Build/CI + * Makefile: fix cross-build comments #8246 + * remove unused hooks with refs to master branch #8250 + * [vtctldserver] Update tests to prevent races during tmclient init #8320 + * sks-keyservers.net seems to be finally dead, replace with #8363 + * endtoend: change log level of etcd start from error to info #8370 +### Cluster management + * Online DDL: code cleanup #7589 + * [wrangler|topotools] Migrate `UpdateShardRecords`, `RefreshTabletsByShard`, and `{Get,Save}RoutingRules` #7965 + * Tear down old stream_migrater shim, now that we're fully in `package workflow` #8073 + * [mysqlctl] Restructure `MetadataManager` to reduce public API surface area #8152 + * naming: master to primary #8251 + * vtorc: code cleanup #8269 +### Query Serving + * Plan StreamExecute Queries #7941 +### VReplication + * [workflow] extract migration targets from wrangler #7934 + * [wrangler|workflow] Extract vrStream type to workflow.VReplicationStream #7966 + * [wrangler|workflow] Extract `workflowState` and `workflowType` out to `package workflow` #7967 + * [wrangler|workflow] extract `*wrangler.streamMigrater` to `workflow.StreamMigrator` #8008 + * [workflow] Migrate `getCellsWith{Shard,Table}ReadsSwitched`, `TrafficSwitchDirection` and `TableRemovalType` to package workflow #8190 + * [workflow] Cleanup wrangler wrappers, migrate `checkIfJournalExistsOnTablet` to package workflow #8193 + * Backports of #8403 #8483 #8489 #8401 #8521 #8396 from main into release 11.0 #8536 +### VTAdmin + * [vtadmin-api] Replace magic numbers with `net/http` constants #8127 + * [vtadmin-web] Move single-entity view components into subfolders #8202 + * [vtadmin] Ensure we log any errors when closing the tracer #8262 +## Other +### Build/CI + * add vtctldclient to binary directory #7889 +### Cluster management + * vttablet/tabletmanager: add isInSRVKeyspace/isShardServing #7929 +### Other + * Change VitessInputFormat key type #199 + * Online DDL plan via Send; "singleton" migrations on tablets #7785 + * flaky onlineddl tests: reduce -online_ddl_check_interval #7847 + * Looking into flaky endtoend upgrade test #7900 + * fixing flaky upgrade test #7901 +### Query Serving + * Introduce Concatenated Fixed-width Composite or CFC vindex #7537 + * Add common tags for stats backends that support it #7651 + * Add support for showing global vgtid executed #7797 + * perf: vttablet/mysql optimizations #7800 + * Fix bug with reserved connections to stale tablets #7879 + * Memory Sort to close the goroutines when callback returns error #7903 + * OnlineDDL: more migration check ticks upon migration start #7961 + * Update gh-ost binary to v1.1.3 #8021 + * VReplication Online DDL: fix classification of virtual columns #8043 +### VReplication + * VReplicationErrors metric: use . as delimiter instead of _ to behave well with Prometheus #7807 +### VTAdmin + * Update `GetSchema` filtering to exclude shards where `IsMasterServing` but no `MasterAlias` #7805 + * [vtadmin-api] Reintroduce include_non_serving_shards opt to GetSchema #7814 + * [vtadmin-web] Add DataCell component #7817 + * Rewrite useTableDefinitions hook as getTableDefinitions util #7821 + * [vtadmin-web] Display (approximate) table sizes + row count on /schemas view #7826 + * [vtadmin-web] Add Pip + TabletServingPip components #7827 +## Performance +### Query Serving + * vttablet: stream consolidation #7752 + * perf: optimize bind var generation #7828 +### VReplication + * Optimize the catchup phases by filtering out rows which not within range of copied pks during inserts #7708 + * Ability to compress gtid when stored in _vt.vreplication's pos column #7877 + * Performance & benchmarks (table copying) #7881 + * Dynamic packet sizing #7933 + * perf: vreplication client CPU usage #7951 + * VDIff: fix performance regression introduced by progress logging #8016 + * proto: Generate faster code using vtprotobuf #8173 + * proto: enable pooling for vreplication #8273 +## Testing +### Build/CI + * Attempt to fix TLS Server Flaky test #7842 + * TestMakeCommonTags Flaky Test: match elements #7843 + * upgrade tests: test against v9.0.0 #7848 + * FOSSA scan added #7862 + * ci: fix all racy tests #7904 + * CI: Revert docker change for unit tests from #7868 #7940 + * Add online ddl on start queries to schema list in vtexplain tablet #8397 +### Java + * [Java] JDBC mysql driver test #8154 +### Other + * Fuzzing: Fixup oss-fuzz build script #7782 + * Wrangler tests: Return a fake tablet in the wrangler test dialer to avoid tablet picker errors spamming the test logs #7863 +### Query Serving + * clean up test #7816 + * tabletserver: fix flaky test #7851 + * Planbuilder: Add fuzzer #7902 + * mysql: Small adjustments to fuzzer #7907 + * vtgate/engine: Add fuzzer #7914 + * Adding Fuzzer Test Cases #8106 + * Addition of fuzzer issues #8195 +### VReplication + * vstreamer: Add fuzzer #7918 +### vttestserver + * Speedup new vttestserver tests #8229 + * Vttestserver docker test #8253 +### Cluster management + * Make timestamp authoritative for master information #8381 + + +The release includes 1080 commits (excluding merges) + +Thanks to all our contributors: @AdamKorcz, @GuptaManan100, @Hellcatlk, @Johnny-Three, @acharisshopify, @ajm188, @alexrs, @aquarapid, @askdba, @deepthi, @doeg, @dyv, @enisoc, @frouioui, @gedgar, @guidoiaquinti, @harshit-gangal, @hkdsun, @idvoretskyi, @jmoldow, @kirs, @mcronce, @narcsfz, @noxiouz, @rafael, @rohit-nayak-ps, @setassociative, @shlomi-noach, @systay, @tokikanno, @vmg, @wangmeng99, @yangxuanjia, @zhangshj-inspur diff --git a/doc/releasenotes/11_0_1_release_notes.md b/doc/releasenotes/11_0_1_release_notes.md new file mode 100644 index 00000000000..77c9b4acd1e --- /dev/null +++ b/doc/releasenotes/11_0_1_release_notes.md @@ -0,0 +1,27 @@ +## Known Issues + +- A critical vulnerability [CVE-2021-44228](https://cve.mitre.org/cgi-bin/cvename.cgi?name=CVE-2021-44228) in the Apache Log4j logging library was disclosed on Dec 9 2021. + The project provided release `2.15.0` with a patch that mitigates the impact of this CVE. It was quickly found that the initial patch was insufficient, and additional CVEs + [CVE-2021-45046](https://cve.mitre.org/cgi-bin/cvename.cgi?name=CVE-2021-45046) and [CVE-2021-44832](https://cve.mitre.org/cgi-bin/cvename.cgi?name=CVE-2021-44832) followed. + These have been fixed in release `2.17.1`. This release of Vitess, `v11.0.1`, uses a version of Log4j below `2.17.1`, for this reason, we encourage you to use version `v11.0.4` instead, to benefit from the vulnerability patches. + +- An issue where the value of the `-force` flag is used instead of `-keep_data` flag's value in v2 vreplication workflows (#9174) is known to be present in this release. A workaround is available in the description of issue #9174. + + +## Bug fixes +### Cluster management + * Port #8422 to 11.0 branch #8744 +### Query Serving + * Handle subquery merging with references correctly #8661 + * onlineddl Executor: build schema with DBA user #8667 + * Backport to 11: Fixing a panic in vtgate with OLAP mode #8746 + * Backport into 11: default to primary tablet if not set in VStream api #8766 +### VReplication + * Refresh SrvVSchema after an ExternalizeVindex: was missing #8669 +## CI/Build +### Build/CI + * Vitess Release 11.0.0 #8549 + * Backport to 11: Updated Makefile do_release script to include godoc steps #8787 + +The release includes 18 commits (excluding merges) +Thanks to all our contributors: @aquarapid, @askdba, @frouioui, @harshit-gangal, @rohit-nayak-ps, @shlomi-noach, @systay diff --git a/doc/releasenotes/11_0_2_release_notes.md b/doc/releasenotes/11_0_2_release_notes.md new file mode 100644 index 00000000000..cc079f27566 --- /dev/null +++ b/doc/releasenotes/11_0_2_release_notes.md @@ -0,0 +1,31 @@ +# Release of Vitess v11.0.2 +## Announcement + +This patch is providing an update regarding the Apache Log4j security vulnerability (CVE-2021-44228) (#9364), along with a few bug fixes. + +## Known Issues + +- A critical vulnerability [CVE-2021-44228](https://cve.mitre.org/cgi-bin/cvename.cgi?name=CVE-2021-44228) in the Apache Log4j logging library was disclosed on Dec 9 2021. + The project provided release `2.15.0` with a patch that mitigates the impact of this CVE. It was quickly found that the initial patch was insufficient, and additional CVEs + [CVE-2021-45046](https://cve.mitre.org/cgi-bin/cvename.cgi?name=CVE-2021-45046) and [CVE-2021-44832](https://cve.mitre.org/cgi-bin/cvename.cgi?name=CVE-2021-44832) followed. + These have been fixed in release `2.17.1`. This release of Vitess, `v11.0.2`, uses a version of Log4j below `2.17.1`, for this reason, we encourage you to use version `v11.0.4` instead, to benefit from the vulnerability patches. + +- An issue where the value of the `-force` flag is used instead of `-keep_data` flag's value in v2 vreplication workflows (#9174) is known to be present in this release. A workaround is available in the description of issue #9174. + +------------ +## Changelog + +### Bug fixes +#### VReplication +* Fix how we identify MySQL generated columns #8796 +### CI/Build +#### Build/CI +* CI: ubuntu-latest now has MySQL 8.0.26, let us override it with latest 8.0.x #9374 +### Internal Cleanup +#### Java +* build(deps): bump log4j-api from 2.13.3 to 2.15.0 in /java #9364 + + +The release includes 7 commits (excluding merges) + +Thanks to all our contributors: @askdba, @deepthi, @systay, @tokikanno \ No newline at end of file diff --git a/doc/releasenotes/11_0_2_summary.md b/doc/releasenotes/11_0_2_summary.md new file mode 100644 index 00000000000..248b19c7d18 --- /dev/null +++ b/doc/releasenotes/11_0_2_summary.md @@ -0,0 +1,12 @@ +## Major Changes + +This patch is providing an update regarding the Apache Log4j security vulnerability (CVE-2021-44228) (#9364), along with a few bug fixes. + +## Known Issues + +- A critical vulnerability [CVE-2021-44228](https://cve.mitre.org/cgi-bin/cvename.cgi?name=CVE-2021-44228) in the Apache Log4j logging library was disclosed on Dec 9 2021. + The project provided release `2.15.0` with a patch that mitigates the impact of this CVE. It was quickly found that the initial patch was insufficient, and additional CVEs + [CVE-2021-45046](https://cve.mitre.org/cgi-bin/cvename.cgi?name=CVE-2021-45046) and [CVE-2021-44832](https://cve.mitre.org/cgi-bin/cvename.cgi?name=CVE-2021-44832) followed. + These have been fixed in release `2.17.1`. This release of Vitess, `v11.0.2`, uses a version of Log4j below `2.17.1`, for this reason, we encourage you to use version `v11.0.4` instead, to benefit from the vulnerability patches. + +- An issue where the value of the `-force` flag is used instead of `-keep_data` flag's value in v2 vreplication workflows (#9174) is known to be present in this release. A workaround is available in the description of issue #9174. diff --git a/doc/releasenotes/11_0_3_release_notes.md b/doc/releasenotes/11_0_3_release_notes.md new file mode 100644 index 00000000000..96f74b2987e --- /dev/null +++ b/doc/releasenotes/11_0_3_release_notes.md @@ -0,0 +1,27 @@ +# Release of Vitess v11.0.3 +## Announcement + +This patch is providing an update regarding the Apache Log4j security vulnerability (CVE-2021-45046) (#9395). + +## Known Issues + +- A critical vulnerability [CVE-2021-44228](https://cve.mitre.org/cgi-bin/cvename.cgi?name=CVE-2021-44228) in the Apache Log4j logging library was disclosed on Dec 9 2021. + The project provided release `2.15.0` with a patch that mitigates the impact of this CVE. It was quickly found that the initial patch was insufficient, and additional CVEs + [CVE-2021-45046](https://cve.mitre.org/cgi-bin/cvename.cgi?name=CVE-2021-45046) and [CVE-2021-44832](https://cve.mitre.org/cgi-bin/cvename.cgi?name=CVE-2021-44832) followed. + These have been fixed in release `2.17.1`. This release of Vitess, `v11.0.3`, uses a version of Log4j below `2.17.1`, for this reason, we encourage you to use version `v11.0.4` instead, to benefit from the vulnerability patches. + +- An issue where the value of the `-force` flag is used instead of `-keep_data` flag's value in v2 vreplication workflows (#9174) is known to be present in this release. A workaround is available in the description of issue #9174. + +------------ +## Changelog + +### Dependabot +#### Java +* build(deps): bump log4j-core from 2.15.0 to 2.16.0 in /java #9395 +### Documentation +#### Examples +* change operator example to use v11.0.3 docker images #9403 + + +The release includes 3 commits (excluding merges) +Thanks to all our contributors: @frouioui \ No newline at end of file diff --git a/doc/releasenotes/11_0_3_summary.md b/doc/releasenotes/11_0_3_summary.md new file mode 100644 index 00000000000..af73528db91 --- /dev/null +++ b/doc/releasenotes/11_0_3_summary.md @@ -0,0 +1,12 @@ +## Major Changes + +This patch is providing an update regarding the Apache Log4j security vulnerability (CVE-2021-45046) (#9395). + +## Known Issues + +- A critical vulnerability [CVE-2021-44228](https://cve.mitre.org/cgi-bin/cvename.cgi?name=CVE-2021-44228) in the Apache Log4j logging library was disclosed on Dec 9 2021. + The project provided release `2.15.0` with a patch that mitigates the impact of this CVE. It was quickly found that the initial patch was insufficient, and additional CVEs + [CVE-2021-45046](https://cve.mitre.org/cgi-bin/cvename.cgi?name=CVE-2021-45046) and [CVE-2021-44832](https://cve.mitre.org/cgi-bin/cvename.cgi?name=CVE-2021-44832) followed. + These have been fixed in release `2.17.1`. This release of Vitess, `v11.0.3`, uses a version of Log4j below `2.17.1`, for this reason, we encourage you to use version `v11.0.4` instead, to benefit from the vulnerability patches. + +- An issue where the value of the `-force` flag is used instead of `-keep_data` flag's value in v2 vreplication workflows (#9174) is known to be present in this release. A workaround is available in the description of issue #9174. diff --git a/doc/releasenotes/11_0_4_release_notes.md b/doc/releasenotes/11_0_4_release_notes.md new file mode 100644 index 00000000000..90222c96a66 --- /dev/null +++ b/doc/releasenotes/11_0_4_release_notes.md @@ -0,0 +1,19 @@ +# Release of Vitess v11.0.4 +## Announcement + +This patch is providing an update regarding the Apache Log4j security vulnerability ([CVE-2021-44832](https://cve.mitre.org/cgi-bin/cvename.cgi?name=CVE-2021-44832)) (#9464). + +## Known Issues + +- An issue where the value of the `-force` flag is used instead of `-keep_data` flag's value in v2 vreplication workflows (#9174) is known to be present in this release. A workaround is available in the description of issue #9174. + +------------ +## Changelog + +### Dependabot +#### Java +* build(deps): bump log4j-api from 2.16.0 to 2.17.1 in /java #9464 + +The release includes 6 commits (excluding merges) + +Thanks to all our contributors: @dbussink, @frouioui \ No newline at end of file diff --git a/doc/releasenotes/11_0_4_summary.md b/doc/releasenotes/11_0_4_summary.md new file mode 100644 index 00000000000..3ed0c070782 --- /dev/null +++ b/doc/releasenotes/11_0_4_summary.md @@ -0,0 +1,7 @@ +## Announcement + +This patch is providing an update regarding the Apache Log4j security vulnerability ([CVE-2021-44832](https://cve.mitre.org/cgi-bin/cvename.cgi?name=CVE-2021-44832)) (#9464). + +## Known Issues + +- An issue where the value of the `-force` flag is used instead of `-keep_data` flag's value in v2 vreplication workflows (#9174) is known to be present in this release. A workaround is available in the description of issue #9174. diff --git a/doc/releasenotes/9_0_0_release_notes.md b/doc/releasenotes/9_0_0_release_notes.md index 43c5510b845..6dc076df1ce 100644 --- a/doc/releasenotes/9_0_0_release_notes.md +++ b/doc/releasenotes/9_0_0_release_notes.md @@ -21,6 +21,7 @@ Vitess 9.0 is not compatible with the previous release of the Vitess Kubernetes * Bug fix regression in /healthz #7090 * Fix metadata related operation hangs when zk down #7228 * Fix accidentally-broken legacy vtctl output format #7285 +* Healthcheck: use isIncluded correctly to fix replica/rdonly routing bug #6904 ## Functionality Added or Changed @@ -68,26 +69,34 @@ Vitess 9.0 is not compatible with the previous release of the Vitess Kubernetes * VTGate: Cache only dml and select plans #7196 * VTGate: Planning and Parsing Support for Alter Table #7199 * VTGate: Add FindAllShardsInKeyspace to vtctldserver #7201 +* VTGate: Initial implementation of vtctld service #7128 * VTGate: improve-log: FAILED_PRECONDITION #7215 -* VTGate: Planner refactoring #7103 -* VTGate: Migrate `vtctlclient InitShardMaster` => `vtctldclient InitShardPrimary` #7220 -* VTGate: Add Planning and Parsing Support for Truncate, Rename, Drop Index and Flush #7242 -* VTGate: Fix create table format function to include if not exists #7250 -* VTGate: Added default databases when calling 'show databases' #7256 -* VTGate : Add Update.AddWhere to mirror Select.AddWhere #7277 -* VTGate :Rremoved resolver usage from StreamExecute #7281 -* VTGate: Adding a MySQL connection at Vtgate to run queries on it directly in case of testing mode #7291 -* VTGate: Added vitess_version as variable #7295 * VTGate: Default to false for system settings to be changed per session at the database connection level #7299 -* VTGate: Gen4: Add Limit clause support #7312 -* VTGate: Gen4: Handling subquery in query graph #7313 -* VTGate: Addition of @@enable_system_settings #7300 * VTGate: Route INFORMATION_SCHEMA queries #6932 * VTGate: Adds Planning and Parsing Support for Create Index of MySQL 5.7 #7024 * VTGate: Log sql which exceeds max memory rows #7055 +* VTGate: Enable Client Session Tracking feature in mysql protocol #6783 +* VTGate: Show columns from table_name targeted like select queries #6825 +* VTGate: This PR adds logic to simplify subquery expressions that are simple to +* VTGate: Adding MySQL Check Constraints #6865 +* VTGate: Manage read your own writes system settings #6871 +* VTGate: Allow table_schema comparisons #6887 +* VTGate: Additional options support for SELECT INTO and LOAD DATA #6872 +* VTGate: Fixes vtgate which throws an error in case of empty statements #6947 +* VTGate: [Forward Port] #6940 - Fix error handling in olap mode #6949 +* VTGate: Adds Planning and Parsing Support for Create View of MySQL 5.7 #7060 +* VTGate: fix error: cannot run Select on table "dual" #7118 +* VTGate: Allow system table to be set as default database #7150 +* VTGate: Move auto_increment from reserved to non reserved keyword #7162 +* VTGate: Add only expr of aliasedExpr to weightstring function #7165 +* VTGate: [9.0] don't try to compare varchars in vtgate #7271 +* VTGate: Load Data From S3 #6823 +* VTGate: Unnest simple subqueries #6831 +* VTGate: Adding MySQL Check Constraints #6869 * VTExplain: Add sequence table support for vtexplain #7186 * VSchema: Support back-quoted names #7073 * Healthcheck: healthy list should be recomputed when a tablet is removed #7176 +* Healthcheck: Hellcatlk wants to merge 1 commit into master from master #6953 ### Set Statement Support @@ -108,18 +117,19 @@ Set statement support has been added in Vitess. There are [some system variables * VReplication: MoveTables: delete routing rules and update vschema on Complete and Abort #7234 * VReplication: V2 Workflow Start: wait for streams to start and report errors if any while starting a workflow #7248 * VReplication: Ignore temp tables created by onlineddl #7159 -* VReplication V2 Workflows: rename Abort to Cancel #7276 -* VReplication DryRun: Report current dry run results for v2 commands #7255 -* VReplication: Miscellaneous improvements #7275 -* VReplication: Tablet throttle support "/throttle/check-self" available on all tablets #7319 -* VStreamer Events: remove preceding zeroes from decimals in Row Events #7297 -* Workflow Show: use timeUpdated to calculate vreplication lag #7342 -* vtctl: Add missing err checks for VReplication v2 #7361 +* VReplication: Set time zone to UTC while streaming rows #6845 +* VReplication: Materialization and character sets: Add test to verify/demo a workaround for charset issues while using string functions in filters #6847 +* VReplication: Tool to diagnose vreplication issues in production #6892 +* VReplication: Allow multiple blacklists for master #6816 * VStreamer Field Event: add allowed values for set/enum #6981 * VDiff: lock keyspace while snapshoting, restart target in case of errors #7012 +* VDiff: make enums comparable #6880 +* VDiff: add ability to limit number of rows to compare #6890 +* VDiff/Tablet Picker: fix issue where vdiff sleeps occasionally for tablet picker retry interval #6944 * [vtctld]: fix error state in Workflow Show #6970 * [vtctld] Workflow command: minor fixes #7008 -* [vtctl] Add missing err checks for VReplication v2 #7361 +* MoveTables: validate that source tables exist, move all tables #7018 +* SwitchWrites bug: reverse replication workflows can have wrong start positions #7169 ### VTTablet @@ -129,7 +139,6 @@ Set statement support has been added in Vitess. There are [some system variables * VTTablet: Adds better errors when there are timeouts in resource pools #7002 * VTTablet: Return to re-using server IDs for binlog connections #6941 * VTTablet: Correctly initialize the TabletType stats #6989 -* Backup: Use provided xtrabackup_root_path to find xbstream #7359 * Backup: Use pargzip instead of pgzip for compression. #7037 * Backup: Add s3 server-side encryption and decryption with customer provided key #7088 @@ -155,6 +164,12 @@ Automatically terminate migrations run by a failed tablet * Online DDL: Adding @@session_uuid to vtgate; used as 'context' #7263 * Online DDL: ignore errors if extracted gh-ost binary is identical to installed binary #6928 * Online DDL: Table lifecycle: skip time hint for unspecified states #7151 +* Online DDL: Migration uses low priority throttling #6830 +* Online DDL: Fix parsing of online-ddl command line options #6900 +* OnlineDDL bugfix: make sure schema is applied on tablet #6910 +* OnlineDDL: request_context/migration_context #7082 +* OnlineDDL: Fix missed rename in onlineddl_test #7148 +* OnlineDDL: Online DDL endtoend tests to support MacOS #7168 ### VTadmin @@ -163,21 +178,8 @@ Automatically terminate migrations run by a failed tablet * VTadmin: Add cluster protos to discovery and vtsql package constructors #7224 * VTadmin: Add static file service discovery implementation #7229 * VTadmin: Query vtadmin-api from vtadmin-web with fetch + react-query #7239 -* VTadmin: Add vtctld proxy to vtadmin API, add GetKeyspaces endpoint #7266 -* VTadmin: [vtctld] Expose vtctld gRPC port in local Docker example + update VTAdmin README #7306 -* VTadmin: Add CSS variables + fonts to VTAdmin #7309 -* VTadmin: Add React Router + a skeleton /debug page to VTAdmin #7310 -* VTadmin: Add NavRail component #7316 -* VTadmin: Add Button + Icon components #7350 +* VTadmin: Move allow_alias option in MySqlFlag enum to precede the aliased IDs #7166 * [vtctld]: vtctldclient generator #7238 -* [vtctld] Migrate cell getters #7302 -* [vtctld] Migrate tablet getters #7311 -* [vtctld] Migrate GetSchema #7346 -* [vtctld] vtctldclient command pkg #7321 -* [vtctld] Add GetSrvVSchema command #7334 -* [vtctld] Migrate ListBackups as GetBackups in new vtctld server #7352 - Merged -* [vtctld] Migrate GetVSchema to VtctldServer #7360 ### Other @@ -186,12 +188,13 @@ Automatically terminate migrations run by a failed tablet * Fix incorrect comments #7257 * Fix comment for IDPool #7212 * IsInternalOperationTableName: see if a table is used internally by vitess #7104 +* Add timeout for mysqld_shutdown #6849 +* Should receive healthcheck updates from all tablets in cells_to_watch #6852 +* Workflow listall with no workflows was missing newline #6853 +* Allow incomplete SNAPSHOT keyspaces #6863 ## Examples / Tutorials -* Update demo #7205 -* Delete select_commerce_data.sql #7245 -* Docker/vttestserver: Add MYSQL_BIND_HOST env #7293 * Examples/operator: fix tags and add vtorc example #7358 * local docker: copy examples/common into /vt/common to match MoveTables user guide #7252 * Update docker-compose examples to take advantage of improvements in Vitess #7009 @@ -201,7 +204,18 @@ Automatically terminate migrations run by a failed tablet * Vitess Slack Guidelines v1.0 #6961 * Do vschema_customer_sharded.json before create_customer_sharded.sql #7210 * Added readme for the demo example #7226 -* Pull Request template: link to contribution guide #7314 +* Adding @shlomi-noach to CODEOWNERS #6855 +* Add Rohit Nayak to maintainers #6903 +* 7.0.3 Release Notes #6902 +* 8_0_0 Release Notes #6958 +* Update maintainers of Vitess #7093 +* Updating Email Address #7095 +* Update morgo changes #7105 +* Move PR template to .github directory #7126 +* Fix trivial typo #7179 +* Add @ajm188 + @doeg to CODEOWNERS for vtctld service files #7202 +* Add @ajm188 + @doeg as vtadmin codeowners #7223:w + ## Build Environment Changes @@ -223,16 +237,24 @@ Automatically terminate migrations run by a failed tablet * Add unit test case to improve test coverage for go/sqltypes/result.go #7227 * Update Golang to 1.15 #7204 * Add linter configuration #7247 -* Tracking failed check runs #7026 -* Github Actions CI Builds: convert matrix strategy for unit and cluster tests to individual tests #7258 -* Add Update.AddWhere to mirror Select.AddWhere #7277 -* Descriptive names for CI checks #7289 -* Testing upgrade path from / downgrade path to v8.0.0 #7294 -* Add mysqlctl to docker images #7326 +* Modify targets to restore behavior of make install #6842 +* Download zookeeper 3.4.14 from archive site #6865 +* Bump junit from 4.12 to 4.13.1 in /java #6870 +* Fix ListBackups for gcp and az to work with root directory #6873 +* Pulling bootstrap resources from vitess-resources #6875 +* [Java] Bump SNAPSHOT version to 9.0 after Vitess release 8.0 #6907 +* Change dependencies for lite builds #6933 +* Truncate logged query in dbconn.go. #6959 +* [GO] go mod tidy #7137 +* goimport proto files correctly #7264 +* Cherry pick version of #7233 for release-9.0 #7265 +* Update Java version to 9.0 #7369 +* Adding curl as dependency #6965 ## Functionality Neutral Changes * Healthcheck: add unit test for multi-cell replica configurations #6978 +* Healthcheck: Correct Health Check for Non-Serving Types #6908 * Adds timeout to checking for tablets. #7106 * Remove deprecated vtctl commands, flags and vttablet rpcs #7115 * Fixes comment to mention the existence of reference tables. #7122 @@ -241,3 +263,26 @@ Automatically terminate migrations run by a failed tablet * action_repository: no need for http.Request #7124 * Testing version upgrade/downgrade path from/to 8.0 #7323 * Use `context` from Go's standard library #7235 +* Update `operator.yaml` backup engine description #6832 +* Docker - upgrade to Debian Buster #6833 +* Updating azblob to remove directory after removing backup #6836 +* Fixing some flaky tests #6874 +* Flaky test: attempt to fix TestConnection in go/test/endtoend/messaging #6879 +* Stabilize test #6882 +* Tablet streaming health fix: never silently skip health state changes #6885 +* Add owners to /go/mysql #6886 +* Fixes a bug in Load From statement #6911 +* Query consolidator: fix to ignore leading margin comments #6917 +* Updates to Contacts section as Reporting #7023 +* Create pull_request_template #7027 +* Fixed pull request template path #7062 + +## Backport +* Backport: [vtctld] Fix accidentally-broken legacy vtctl output format #7292 +* Backport #7276: Vreplication V2 Workflows: rename Abort to Cancel #7339 +* Backport #7297: VStreamer Events: remove preceding zeroes from decimals in Row Events +* Backport #7255: VReplication DryRun: Report current dry run results for v2 commands #7345 +* Backport #7275: VReplication: Miscellaneous improvements #7349 +* Backport 7342: Workflow Show: use timeUpdated to calculate vreplication lag #7354 +* Backport 7361: vtctl: Add missing err checks for VReplication v2 #7363 +* Backport 7297: VStreamer Events: remove preceding zeroes from decimals in Row Events #7340 diff --git a/docker/base/Dockerfile b/docker/base/Dockerfile index 2fba751592e..767f7519c7b 100644 --- a/docker/base/Dockerfile +++ b/docker/base/Dockerfile @@ -21,7 +21,7 @@ # TODO(mberlin): Remove the symlink and this note once # https://github.com/docker/hub-feedback/issues/292 is fixed. -ARG bootstrap_version=1 +ARG bootstrap_version=2 ARG image="vitess/bootstrap:${bootstrap_version}-mysql57" FROM "${image}" diff --git a/docker/base/Dockerfile.mariadb b/docker/base/Dockerfile.mariadb index 41ebbaec688..8b7bb37c002 100644 --- a/docker/base/Dockerfile.mariadb +++ b/docker/base/Dockerfile.mariadb @@ -1,4 +1,4 @@ -ARG bootstrap_version=1 +ARG bootstrap_version=2 ARG image="vitess/bootstrap:${bootstrap_version}-mariadb" FROM "${image}" diff --git a/docker/base/Dockerfile.mariadb103 b/docker/base/Dockerfile.mariadb103 index 83b708094e3..86b3391dc96 100644 --- a/docker/base/Dockerfile.mariadb103 +++ b/docker/base/Dockerfile.mariadb103 @@ -1,4 +1,4 @@ -ARG bootstrap_version=1 +ARG bootstrap_version=2 ARG image="vitess/bootstrap:${bootstrap_version}-mariadb103" FROM "${image}" diff --git a/docker/base/Dockerfile.mysql56 b/docker/base/Dockerfile.mysql56 index 2f3f246d5d0..339204edab6 100644 --- a/docker/base/Dockerfile.mysql56 +++ b/docker/base/Dockerfile.mysql56 @@ -1,4 +1,4 @@ -ARG bootstrap_version=1 +ARG bootstrap_version=2 ARG image="vitess/bootstrap:${bootstrap_version}-mysql56" FROM "${image}" diff --git a/docker/base/Dockerfile.mysql80 b/docker/base/Dockerfile.mysql80 index e159f834b33..00684f063eb 100644 --- a/docker/base/Dockerfile.mysql80 +++ b/docker/base/Dockerfile.mysql80 @@ -1,4 +1,4 @@ -ARG bootstrap_version=1 +ARG bootstrap_version=2 ARG image="vitess/bootstrap:${bootstrap_version}-mysql80" FROM "${image}" diff --git a/docker/base/Dockerfile.percona b/docker/base/Dockerfile.percona index a660ee72a09..8dd42c97d9b 100644 --- a/docker/base/Dockerfile.percona +++ b/docker/base/Dockerfile.percona @@ -1,4 +1,4 @@ -ARG bootstrap_version=1 +ARG bootstrap_version=2 ARG image="vitess/bootstrap:${bootstrap_version}-percona" FROM "${image}" diff --git a/docker/base/Dockerfile.percona57 b/docker/base/Dockerfile.percona57 index 1fdc430cf5c..bb4b27e5d00 100644 --- a/docker/base/Dockerfile.percona57 +++ b/docker/base/Dockerfile.percona57 @@ -1,4 +1,4 @@ -ARG bootstrap_version=1 +ARG bootstrap_version=2 ARG image="vitess/bootstrap:${bootstrap_version}-percona57" FROM "${image}" diff --git a/docker/base/Dockerfile.percona80 b/docker/base/Dockerfile.percona80 index 12be156b8f2..509dd76e390 100644 --- a/docker/base/Dockerfile.percona80 +++ b/docker/base/Dockerfile.percona80 @@ -1,4 +1,4 @@ -ARG bootstrap_version=1 +ARG bootstrap_version=2 ARG image="vitess/bootstrap:${bootstrap_version}-percona80" FROM "${image}" diff --git a/docker/bootstrap/Dockerfile.common b/docker/bootstrap/Dockerfile.common index caf0c6c3374..ec8613cd68b 100644 --- a/docker/bootstrap/Dockerfile.common +++ b/docker/bootstrap/Dockerfile.common @@ -1,4 +1,4 @@ -FROM golang:1.15-buster +FROM golang:1.16-buster # Install Vitess build dependencies RUN apt-get update && DEBIAN_FRONTEND=noninteractive apt-get install -y --no-install-recommends \ diff --git a/docker/bootstrap/Dockerfile.mariadb b/docker/bootstrap/Dockerfile.mariadb index bc125dd82fb..bff31af344d 100644 --- a/docker/bootstrap/Dockerfile.mariadb +++ b/docker/bootstrap/Dockerfile.mariadb @@ -4,7 +4,7 @@ ARG image="vitess/bootstrap:${bootstrap_version}-common" FROM "${image}" # Install MariaDB 10 -RUN for i in $(seq 1 10); do apt-key adv --no-tty --keyserver keys.gnupg.net --recv-keys 9334A25F8507EFA5 && break; done && \ +RUN for i in $(seq 1 10); do apt-key adv --no-tty --keyserver keyserver.ubuntu.com --recv-keys 9334A25F8507EFA5 && break; done && \ add-apt-repository 'deb http://repo.percona.com/apt buster main' && \ { \ echo debconf debconf/frontend select Noninteractive; \ diff --git a/docker/bootstrap/Dockerfile.mysql56 b/docker/bootstrap/Dockerfile.mysql56 index 9905e5b0999..7cfb8e6876f 100644 --- a/docker/bootstrap/Dockerfile.mysql56 +++ b/docker/bootstrap/Dockerfile.mysql56 @@ -10,9 +10,9 @@ FROM "${image}" # # I think it's fine as MySQL 5.6 will be EOL pretty soon (February 5, 2021) # -RUN for i in $(seq 1 10); do apt-key adv --no-tty --recv-keys --keyserver pool.sks-keyservers.net 8C718D3B5072E1F5 && break; done && \ +RUN for i in $(seq 1 10); do apt-key adv --no-tty --recv-keys --keyserver keyserver.ubuntu.com 8C718D3B5072E1F5 && break; done && \ add-apt-repository 'deb http://repo.mysql.com/apt/debian/ stretch mysql-5.6' && \ - for i in $(seq 1 10); do apt-key adv --no-tty --keyserver keys.gnupg.net --recv-keys 9334A25F8507EFA5 && break; done && \ + for i in $(seq 1 10); do apt-key adv --no-tty --keyserver keyserver.ubuntu.com --recv-keys 9334A25F8507EFA5 && break; done && \ echo 'deb http://repo.percona.com/apt buster main' > /etc/apt/sources.list.d/percona.list && \ { \ echo debconf debconf/frontend select Noninteractive; \ diff --git a/docker/bootstrap/Dockerfile.mysql57 b/docker/bootstrap/Dockerfile.mysql57 index 3ebae48a4d7..58565b1a377 100644 --- a/docker/bootstrap/Dockerfile.mysql57 +++ b/docker/bootstrap/Dockerfile.mysql57 @@ -5,9 +5,9 @@ FROM "${image}" # Install MySQL 5.7 RUN apt-get update && DEBIAN_FRONTEND=noninteractive apt-get install -y --no-install-recommends gnupg dirmngr ca-certificates && \ - for i in $(seq 1 10); do apt-key adv --no-tty --recv-keys --keyserver ha.pool.sks-keyservers.net 8C718D3B5072E1F5 && break; done && \ + for i in $(seq 1 10); do apt-key adv --no-tty --recv-keys --keyserver keyserver.ubuntu.com 8C718D3B5072E1F5 && break; done && \ add-apt-repository 'deb http://repo.mysql.com/apt/debian/ buster mysql-5.7' && \ - for i in $(seq 1 10); do apt-key adv --no-tty --keyserver keys.gnupg.net --recv-keys 9334A25F8507EFA5 && break; done && \ + for i in $(seq 1 10); do apt-key adv --no-tty --keyserver keyserver.ubuntu.com --recv-keys 9334A25F8507EFA5 && break; done && \ echo 'deb http://repo.percona.com/apt buster main' > /etc/apt/sources.list.d/percona.list && \ { \ echo debconf debconf/frontend select Noninteractive; \ diff --git a/docker/bootstrap/Dockerfile.mysql80 b/docker/bootstrap/Dockerfile.mysql80 index b2a60cdf5c0..ab1c9b97898 100644 --- a/docker/bootstrap/Dockerfile.mysql80 +++ b/docker/bootstrap/Dockerfile.mysql80 @@ -4,9 +4,9 @@ ARG image="vitess/bootstrap:${bootstrap_version}-common" FROM "${image}" # Install MySQL 8.0 -RUN for i in $(seq 1 10); do apt-key adv --no-tty --recv-keys --keyserver ha.pool.sks-keyservers.net 8C718D3B5072E1F5 && break; done && \ +RUN for i in $(seq 1 10); do apt-key adv --no-tty --recv-keys --keyserver keyserver.ubuntu.com 8C718D3B5072E1F5 && break; done && \ add-apt-repository 'deb http://repo.mysql.com/apt/debian/ buster mysql-8.0' && \ - for i in $(seq 1 10); do apt-key adv --no-tty --keyserver keys.gnupg.net --recv-keys 9334A25F8507EFA5 && break; done && \ + for i in $(seq 1 10); do apt-key adv --no-tty --keyserver keyserver.ubuntu.com --recv-keys 9334A25F8507EFA5 && break; done && \ echo 'deb http://repo.percona.com/apt buster main' > /etc/apt/sources.list.d/percona.list && \ { \ echo debconf debconf/frontend select Noninteractive; \ diff --git a/docker/bootstrap/Dockerfile.percona b/docker/bootstrap/Dockerfile.percona index f91f1aaa3cd..c94605881c6 100644 --- a/docker/bootstrap/Dockerfile.percona +++ b/docker/bootstrap/Dockerfile.percona @@ -14,7 +14,7 @@ FROM "${image}" # 'buster' repository as the 'stretch' package requires 'libcurl3' that is not present # in Debian Buster. # -RUN for i in $(seq 1 10); do apt-key adv --no-tty --keyserver keys.gnupg.net --recv-keys 9334A25F8507EFA5 && break; done && \ +RUN for i in $(seq 1 10); do apt-key adv --no-tty --keyserver keyserver.ubuntu.com --recv-keys 9334A25F8507EFA5 && break; done && \ add-apt-repository 'deb http://repo.percona.com/apt stretch main' && \ add-apt-repository 'deb http://repo.percona.com/apt buster main' && \ { \ diff --git a/docker/bootstrap/Dockerfile.percona57 b/docker/bootstrap/Dockerfile.percona57 index f5ff6612fda..a7a90b1fe21 100644 --- a/docker/bootstrap/Dockerfile.percona57 +++ b/docker/bootstrap/Dockerfile.percona57 @@ -4,7 +4,7 @@ ARG image="vitess/bootstrap:${bootstrap_version}-common" FROM "${image}" # Install Percona 5.7 -RUN for i in $(seq 1 10); do apt-key adv --no-tty --keyserver keys.gnupg.net --recv-keys 9334A25F8507EFA5 && break; done && \ +RUN for i in $(seq 1 10); do apt-key adv --no-tty --keyserver keyserver.ubuntu.com --recv-keys 9334A25F8507EFA5 && break; done && \ add-apt-repository 'deb http://repo.percona.com/apt buster main' && \ { \ echo debconf debconf/frontend select Noninteractive; \ diff --git a/docker/bootstrap/Dockerfile.percona80 b/docker/bootstrap/Dockerfile.percona80 index 2442f3649c9..59bd6134851 100644 --- a/docker/bootstrap/Dockerfile.percona80 +++ b/docker/bootstrap/Dockerfile.percona80 @@ -4,7 +4,7 @@ ARG image="vitess/bootstrap:${bootstrap_version}-common" FROM "${image}" # Install Percona 8.0 -RUN for i in $(seq 1 10); do apt-key adv --no-tty --keyserver keys.gnupg.net --recv-keys 9334A25F8507EFA5 && break; done \ +RUN for i in $(seq 1 10); do apt-key adv --no-tty --keyserver keyserver.ubuntu.com --recv-keys 9334A25F8507EFA5 && break; done \ && echo 'deb http://repo.percona.com/ps-80/apt buster main' > /etc/apt/sources.list.d/percona.list && \ { \ echo debconf debconf/frontend select Noninteractive; \ diff --git a/docker/bootstrap/README.md b/docker/bootstrap/README.md index 9014d8bbb86..3d19d0b750b 100644 --- a/docker/bootstrap/README.md +++ b/docker/bootstrap/README.md @@ -25,6 +25,24 @@ vitess$ docker/bootstrap/build.sh common vitess$ docker/bootstrap/build.sh mysql56 ``` +Is it also possible to specify the resulting image name: + +```sh +vitess$ docker/bootstrap/build.sh common --image my-common-image +``` + +If custom image names are specified, you might need to set the base image name when building flavors: + +```sh +vitess$ docker/bootstrap/build.sh mysql56 --base_image my-common-image +``` + +Both arguments can be combined. For example: + +```sh +vitess$ docker/bootstrap/build.sh mysql56 --base_image my-common-image --image my-mysql-image +``` + ## For Vitess Project Maintainers To update all bootstrap images on Docker Hub, you can use the `docker_bootstrap` diff --git a/docker/bootstrap/build.sh b/docker/bootstrap/build.sh index 7d0d558c194..1b640aaa638 100755 --- a/docker/bootstrap/build.sh +++ b/docker/bootstrap/build.sh @@ -13,6 +13,21 @@ # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. +# +# Usage: +# +# First build the `common` image, then any flavors you want. For example: +# $ docker/bootstrap/build.sh common +# $ docker/bootstrap/build.sh mysql56 +# +# Is it also possible to specify the resulting image name: +# $ docker/bootstrap/build.sh common --image my-common-image +# +# If custom image names are specified, you might need to set the base image name when building flavors: +# $ docker/bootstrap/build.sh mysql56 --base_image my-common-image +# Both arguments can be combined. For example: +# $ docker/bootstrap/build.sh mysql56 --base_image my-common-image --image my-mysql-image + flavor=$1 if [[ -z "$flavor" ]]; then @@ -33,6 +48,19 @@ chmod -R o=g * arch=$(uname -m) [ "$arch" == "aarch64" ] && [ $flavor != "common" ] && arch_ext='-arm64v8' + + +base_image="${base_image:-vitess/bootstrap:$version-common}" +image="${image:-vitess/bootstrap:$version-$flavor$arch_ext}" + +while [ $# -gt 0 ]; do + if [[ $1 == *"--"* ]]; then + param="${1/--/}" + declare $param="$2" + fi + shift +done + if [ -f "docker/bootstrap/Dockerfile.$flavor$arch_ext" ]; then - docker build --no-cache -f docker/bootstrap/Dockerfile.$flavor$arch_ext -t vitess/bootstrap:$version-$flavor$arch_ext --build-arg bootstrap_version=$version . + docker build --no-cache -f docker/bootstrap/Dockerfile.$flavor$arch_ext -t $image --build-arg bootstrap_version=$version --build-arg image=$base_image . fi diff --git a/docker/lite/Dockerfile.alpine b/docker/lite/Dockerfile.alpine index 55f83f8564c..4a8f18789de 100644 --- a/docker/lite/Dockerfile.alpine +++ b/docker/lite/Dockerfile.alpine @@ -17,7 +17,7 @@ # ensure images contain the right binaries. # Use a temporary layer for the build stage. -ARG bootstrap_version=1 +ARG bootstrap_version=2 ARG image="vitess/bootstrap:${bootstrap_version}-mariadb103" FROM "${image}" AS builder diff --git a/docker/lite/Dockerfile.mariadb b/docker/lite/Dockerfile.mariadb index ab637422088..6aab010dae3 100644 --- a/docker/lite/Dockerfile.mariadb +++ b/docker/lite/Dockerfile.mariadb @@ -17,7 +17,7 @@ # ensure images contain the right binaries. # Use a temporary layer for the build stage. -ARG bootstrap_version=1 +ARG bootstrap_version=2 ARG image="vitess/bootstrap:${bootstrap_version}-mariadb" FROM "${image}" AS builder diff --git a/docker/lite/Dockerfile.mariadb103 b/docker/lite/Dockerfile.mariadb103 index 2325a98ac34..36b2cbc5c13 100644 --- a/docker/lite/Dockerfile.mariadb103 +++ b/docker/lite/Dockerfile.mariadb103 @@ -17,7 +17,7 @@ # ensure images contain the right binaries. # Use a temporary layer for the build stage. -ARG bootstrap_version=1 +ARG bootstrap_version=2 ARG image="vitess/bootstrap:${bootstrap_version}-mariadb103" FROM "${image}" AS builder diff --git a/docker/lite/Dockerfile.mysql56 b/docker/lite/Dockerfile.mysql56 index e98361338ae..bd1fe35820a 100644 --- a/docker/lite/Dockerfile.mysql56 +++ b/docker/lite/Dockerfile.mysql56 @@ -17,7 +17,7 @@ # ensure images contain the right binaries. # Use a temporary layer for the build stage. -ARG bootstrap_version=1 +ARG bootstrap_version=2 ARG image="vitess/bootstrap:${bootstrap_version}-mysql56" FROM "${image}" AS builder diff --git a/docker/lite/Dockerfile.mysql57 b/docker/lite/Dockerfile.mysql57 index ffc498fc64f..568fc1d3a38 100644 --- a/docker/lite/Dockerfile.mysql57 +++ b/docker/lite/Dockerfile.mysql57 @@ -17,7 +17,7 @@ # ensure images contain the right binaries. # Use a temporary layer for the build stage. -ARG bootstrap_version=1 +ARG bootstrap_version=2 ARG image="vitess/bootstrap:${bootstrap_version}-mysql57" FROM "${image}" AS builder diff --git a/docker/lite/Dockerfile.mysql80 b/docker/lite/Dockerfile.mysql80 index 1de6cb2af93..e4de2f42d79 100644 --- a/docker/lite/Dockerfile.mysql80 +++ b/docker/lite/Dockerfile.mysql80 @@ -17,7 +17,7 @@ # ensure images contain the right binaries. # Use a temporary layer for the build stage. -ARG bootstrap_version=1 +ARG bootstrap_version=2 ARG image="vitess/bootstrap:${bootstrap_version}-mysql80" FROM "${image}" AS builder diff --git a/docker/lite/Dockerfile.percona b/docker/lite/Dockerfile.percona index e1778f9184e..4c4cbd6e96d 100644 --- a/docker/lite/Dockerfile.percona +++ b/docker/lite/Dockerfile.percona @@ -17,7 +17,7 @@ # ensure images contain the right binaries. # Use a temporary layer for the build stage. -ARG bootstrap_version=1 +ARG bootstrap_version=2 ARG image="vitess/bootstrap:${bootstrap_version}-percona" FROM "${image}" AS builder diff --git a/docker/lite/Dockerfile.percona57 b/docker/lite/Dockerfile.percona57 index 476dfe27eb2..1f6b5da41e3 100644 --- a/docker/lite/Dockerfile.percona57 +++ b/docker/lite/Dockerfile.percona57 @@ -17,7 +17,7 @@ # ensure images contain the right binaries. # Use a temporary layer for the build stage. -ARG bootstrap_version=1 +ARG bootstrap_version=2 ARG image="vitess/bootstrap:${bootstrap_version}-percona57" FROM "${image}" AS builder diff --git a/docker/lite/Dockerfile.percona80 b/docker/lite/Dockerfile.percona80 index f8d9bd00ca4..aef39f9b4f1 100644 --- a/docker/lite/Dockerfile.percona80 +++ b/docker/lite/Dockerfile.percona80 @@ -17,7 +17,7 @@ # ensure images contain the right binaries. # Use a temporary layer for the build stage. -ARG bootstrap_version=1 +ARG bootstrap_version=2 ARG image="vitess/bootstrap:${bootstrap_version}-percona80" FROM "${image}" AS builder diff --git a/docker/lite/Dockerfile.testing b/docker/lite/Dockerfile.testing index 3d4d31342ff..0430e80b1df 100644 --- a/docker/lite/Dockerfile.testing +++ b/docker/lite/Dockerfile.testing @@ -17,7 +17,7 @@ # ensure images contain the right binaries. # Use a temporary layer for the build stage. -ARG bootstrap_version=1 +ARG bootstrap_version=2 ARG image="vitess/bootstrap:${bootstrap_version}-mysql57" FROM "${image}" AS builder diff --git a/docker/lite/Dockerfile.ubi7.mysql57 b/docker/lite/Dockerfile.ubi7.mysql57 index a98bda48381..a8e4786867b 100644 --- a/docker/lite/Dockerfile.ubi7.mysql57 +++ b/docker/lite/Dockerfile.ubi7.mysql57 @@ -17,7 +17,7 @@ # ensure images contain the right binaries. # Use a temporary layer for the build stage. -ARG bootstrap_version=1 +ARG bootstrap_version=2 ARG image="vitess/bootstrap:${bootstrap_version}-mysql57" FROM "${image}" AS builder @@ -38,8 +38,7 @@ FROM registry.access.redhat.com/ubi7/ubi:latest # Install keys and dependencies RUN mkdir /tmp/gpg && chmod 700 /tmp/gpg && export GNUPGHOME=/tmp/gpg \ && yum install -y --setopt=alwaysprompt=no gnupg \ - && ( gpg --keyserver keyserver.ubuntu.com --recv-keys 430BDF5C56E7C94E848EE60C1C4CBDCDCD2EFD2A 4D1BB29D63D98E422B2113B19334A25F8507EFA5 6341AB2753D78A78A7C27BB124C6A8A7F4A80EB5 A4A9406876FCBD3C456770C88C718D3B5072E1F5 \ - || gpg --keyserver ha.pool.sks-keyservers.net --recv-keys 430BDF5C56E7C94E848EE60C1C4CBDCDCD2EFD2A 4D1BB29D63D98E422B2113B19334A25F8507EFA5 6341AB2753D78A78A7C27BB124C6A8A7F4A80EB5 A4A9406876FCBD3C456770C88C718D3B5072E1F5 ) \ + && ( gpg --keyserver keyserver.ubuntu.com --recv-keys 430BDF5C56E7C94E848EE60C1C4CBDCDCD2EFD2A 4D1BB29D63D98E422B2113B19334A25F8507EFA5 6341AB2753D78A78A7C27BB124C6A8A7F4A80EB5 A4A9406876FCBD3C456770C88C718D3B5072E1F5 ) \ && gpg --export --armor 430BDF5C56E7C94E848EE60C1C4CBDCDCD2EFD2A > ${GNUPGHOME}/RPM-GPG-KEY-Percona.1 \ && gpg --export --armor 4D1BB29D63D98E422B2113B19334A25F8507EFA5 > ${GNUPGHOME}/RPM-GPG-KEY-Percona.2 \ && gpg --export --armor 6341AB2753D78A78A7C27BB124C6A8A7F4A80EB5 > ${GNUPGHOME}/RPM-GPG-KEY-CentOS-7 \ diff --git a/docker/lite/Dockerfile.ubi7.mysql80 b/docker/lite/Dockerfile.ubi7.mysql80 index fce90776a2e..6388dd701bb 100644 --- a/docker/lite/Dockerfile.ubi7.mysql80 +++ b/docker/lite/Dockerfile.ubi7.mysql80 @@ -17,7 +17,7 @@ # ensure images contain the right binaries. # Use a temporary layer for the build stage. -ARG bootstrap_version=1 +ARG bootstrap_version=2 ARG image="vitess/bootstrap:${bootstrap_version}-mysql80" FROM "${image}" AS builder @@ -38,8 +38,7 @@ FROM registry.access.redhat.com/ubi7/ubi:latest # Install keys and dependencies RUN mkdir /tmp/gpg && chmod 700 /tmp/gpg && export GNUPGHOME=/tmp/gpg \ && yum install -y --setopt=alwaysprompt=no gnupg \ - && ( gpg --keyserver keyserver.ubuntu.com --recv-keys 430BDF5C56E7C94E848EE60C1C4CBDCDCD2EFD2A 4D1BB29D63D98E422B2113B19334A25F8507EFA5 6341AB2753D78A78A7C27BB124C6A8A7F4A80EB5 A4A9406876FCBD3C456770C88C718D3B5072E1F5 \ - || gpg --keyserver ha.pool.sks-keyservers.net --recv-keys 430BDF5C56E7C94E848EE60C1C4CBDCDCD2EFD2A 4D1BB29D63D98E422B2113B19334A25F8507EFA5 6341AB2753D78A78A7C27BB124C6A8A7F4A80EB5 A4A9406876FCBD3C456770C88C718D3B5072E1F5 ) \ + && ( gpg --keyserver keyserver.ubuntu.com --recv-keys 430BDF5C56E7C94E848EE60C1C4CBDCDCD2EFD2A 4D1BB29D63D98E422B2113B19334A25F8507EFA5 6341AB2753D78A78A7C27BB124C6A8A7F4A80EB5 A4A9406876FCBD3C456770C88C718D3B5072E1F5 ) \ && gpg --export --armor 430BDF5C56E7C94E848EE60C1C4CBDCDCD2EFD2A > ${GNUPGHOME}/RPM-GPG-KEY-Percona.1 \ && gpg --export --armor 4D1BB29D63D98E422B2113B19334A25F8507EFA5 > ${GNUPGHOME}/RPM-GPG-KEY-Percona.2 \ && gpg --export --armor 6341AB2753D78A78A7C27BB124C6A8A7F4A80EB5 > ${GNUPGHOME}/RPM-GPG-KEY-CentOS-7 \ diff --git a/docker/lite/Dockerfile.ubi7.percona57 b/docker/lite/Dockerfile.ubi7.percona57 index a8911e23a9d..b32950cf35d 100644 --- a/docker/lite/Dockerfile.ubi7.percona57 +++ b/docker/lite/Dockerfile.ubi7.percona57 @@ -17,7 +17,7 @@ # ensure images contain the right binaries. # Use a temporary layer for the build stage. -ARG bootstrap_version=1 +ARG bootstrap_version=2 ARG image="vitess/bootstrap:${bootstrap_version}-percona57" FROM "${image}" AS builder @@ -38,8 +38,7 @@ FROM registry.access.redhat.com/ubi7/ubi:latest # Install keys and dependencies RUN mkdir /tmp/gpg && chmod 700 /tmp/gpg && export GNUPGHOME=/tmp/gpg \ && yum install -y --setopt=alwaysprompt=no gnupg \ - && ( gpg --keyserver keyserver.ubuntu.com --recv-keys 430BDF5C56E7C94E848EE60C1C4CBDCDCD2EFD2A 4D1BB29D63D98E422B2113B19334A25F8507EFA5 6341AB2753D78A78A7C27BB124C6A8A7F4A80EB5 \ - || gpg --keyserver ha.pool.sks-keyservers.net --recv-keys 430BDF5C56E7C94E848EE60C1C4CBDCDCD2EFD2A 4D1BB29D63D98E422B2113B19334A25F8507EFA5 6341AB2753D78A78A7C27BB124C6A8A7F4A80EB5 ) \ + && ( gpg --keyserver keyserver.ubuntu.com --recv-keys 430BDF5C56E7C94E848EE60C1C4CBDCDCD2EFD2A 4D1BB29D63D98E422B2113B19334A25F8507EFA5 6341AB2753D78A78A7C27BB124C6A8A7F4A80EB5 ) \ && gpg --export --armor 430BDF5C56E7C94E848EE60C1C4CBDCDCD2EFD2A > ${GNUPGHOME}/RPM-GPG-KEY-Percona.1 \ && gpg --export --armor 4D1BB29D63D98E422B2113B19334A25F8507EFA5 > ${GNUPGHOME}/RPM-GPG-KEY-Percona.2 \ && gpg --export --armor 6341AB2753D78A78A7C27BB124C6A8A7F4A80EB5 > ${GNUPGHOME}/RPM-GPG-KEY-CentOS-7 \ diff --git a/docker/lite/Dockerfile.ubi7.percona80 b/docker/lite/Dockerfile.ubi7.percona80 index e1ed4c89a1b..83ec99ae719 100644 --- a/docker/lite/Dockerfile.ubi7.percona80 +++ b/docker/lite/Dockerfile.ubi7.percona80 @@ -17,7 +17,7 @@ # ensure images contain the right binaries. # Use a temporary layer for the build stage. -ARG bootstrap_version=1 +ARG bootstrap_version=2 ARG image="vitess/bootstrap:${bootstrap_version}-percona80" FROM "${image}" AS builder @@ -38,8 +38,7 @@ FROM registry.access.redhat.com/ubi7/ubi:latest # Install keys and dependencies RUN mkdir /tmp/gpg && chmod 700 /tmp/gpg && export GNUPGHOME=/tmp/gpg \ && yum install -y --setopt=alwaysprompt=no gnupg \ - && ( gpg --keyserver keyserver.ubuntu.com --recv-keys 430BDF5C56E7C94E848EE60C1C4CBDCDCD2EFD2A 4D1BB29D63D98E422B2113B19334A25F8507EFA5 6341AB2753D78A78A7C27BB124C6A8A7F4A80EB5 \ - || gpg --keyserver ha.pool.sks-keyservers.net --recv-keys 430BDF5C56E7C94E848EE60C1C4CBDCDCD2EFD2A 4D1BB29D63D98E422B2113B19334A25F8507EFA5 6341AB2753D78A78A7C27BB124C6A8A7F4A80EB5 ) \ + && ( gpg --keyserver keyserver.ubuntu.com --recv-keys 430BDF5C56E7C94E848EE60C1C4CBDCDCD2EFD2A 4D1BB29D63D98E422B2113B19334A25F8507EFA5 6341AB2753D78A78A7C27BB124C6A8A7F4A80EB5 ) \ && gpg --export --armor 430BDF5C56E7C94E848EE60C1C4CBDCDCD2EFD2A > ${GNUPGHOME}/RPM-GPG-KEY-Percona.1 \ && gpg --export --armor 4D1BB29D63D98E422B2113B19334A25F8507EFA5 > ${GNUPGHOME}/RPM-GPG-KEY-Percona.2 \ && gpg --export --armor 6341AB2753D78A78A7C27BB124C6A8A7F4A80EB5 > ${GNUPGHOME}/RPM-GPG-KEY-CentOS-7 \ diff --git a/docker/lite/install_dependencies.sh b/docker/lite/install_dependencies.sh index 45a7b53dc4a..ac257d871fb 100755 --- a/docker/lite/install_dependencies.sh +++ b/docker/lite/install_dependencies.sh @@ -11,9 +11,7 @@ FLAVOR="$1" export DEBIAN_FRONTEND=noninteractive KEYSERVERS=( - ha.pool.sks-keyservers.net keyserver.ubuntu.com - hkp://p80.pool.sks-keyservers.net:80 hkp://keyserver.ubuntu.com:80 ) @@ -49,6 +47,7 @@ BASE_PACKAGES=( libatomic1 libcurl4 libdbd-mysql-perl + libwww-perl libev4 libjemalloc2 libtcmalloc-minimal4 @@ -92,9 +91,10 @@ mysql57) ) ;; mysql80) - mysql8_version=8.0.21 + mysql8_version=8.0.23 do_fetch https://repo.mysql.com/apt/debian/pool/mysql-8.0/m/mysql-community/libmysqlclient21_${mysql8_version}-1debian10_amd64.deb /tmp/libmysqlclient21_${mysql8_version}-1debian10_amd64.deb do_fetch https://repo.mysql.com/apt/debian/pool/mysql-8.0/m/mysql-community/mysql-community-client-core_${mysql8_version}-1debian10_amd64.deb /tmp/mysql-community-client-core_${mysql8_version}-1debian10_amd64.deb + do_fetch https://repo.mysql.com/apt/debian/pool/mysql-8.0/m/mysql-community/mysql-community-client-plugins_${mysql8_version}-1debian10_amd64.deb /tmp/mysql-community-client-plugins_${mysql8_version}-1debian10_amd64.deb do_fetch https://repo.mysql.com/apt/debian/pool/mysql-8.0/m/mysql-community/mysql-community-client_${mysql8_version}-1debian10_amd64.deb /tmp/mysql-community-client_${mysql8_version}-1debian10_amd64.deb do_fetch https://repo.mysql.com/apt/debian/pool/mysql-8.0/m/mysql-community/mysql-client_${mysql8_version}-1debian10_amd64.deb /tmp/mysql-client_${mysql8_version}-1debian10_amd64.deb do_fetch https://repo.mysql.com/apt/debian/pool/mysql-8.0/m/mysql-community/mysql-community-server-core_${mysql8_version}-1debian10_amd64.deb /tmp/mysql-community-server-core_${mysql8_version}-1debian10_amd64.deb @@ -103,6 +103,7 @@ mysql80) PACKAGES=( /tmp/libmysqlclient21_${mysql8_version}-1debian10_amd64.deb /tmp/mysql-community-client-core_${mysql8_version}-1debian10_amd64.deb + /tmp/mysql-community-client-plugins_${mysql8_version}-1debian10_amd64.deb /tmp/mysql-community-client_${mysql8_version}-1debian10_amd64.deb /tmp/mysql-client_${mysql8_version}-1debian10_amd64.deb /tmp/mysql-community-server-core_${mysql8_version}-1debian10_amd64.deb @@ -230,6 +231,9 @@ esac # Install flavor-specific packages apt-get update for i in $(seq 1 $MAX_RETRY); do apt-get install -y --no-install-recommends "${PACKAGES[@]}" && break; done +if [[ "$i" = "$MAX_RETRY" ]]; then + exit 1 +fi # Clean up files we won't need in the final image. rm -rf /var/lib/apt/lists/* diff --git a/docker/local/Dockerfile b/docker/local/Dockerfile index 61117bd35ed..23f206e08b7 100644 --- a/docker/local/Dockerfile +++ b/docker/local/Dockerfile @@ -1,4 +1,4 @@ -ARG bootstrap_version=1 +ARG bootstrap_version=2 ARG image="vitess/bootstrap:${bootstrap_version}-common" FROM "${image}" diff --git a/docker/orchestrator/orchestrator.conf.json b/docker/orchestrator/orchestrator.conf.json index 73234cf20e8..729594044ed 100644 --- a/docker/orchestrator/orchestrator.conf.json +++ b/docker/orchestrator/orchestrator.conf.json @@ -5,7 +5,6 @@ "AuditToSyslog": false, "AuthenticationMethod": "", "AuthUserHeader": "", - "AutoPseudoGTID": false, "BackendDB": "sqlite", "BinlogEventsChunkSize": 10000, "CandidateInstanceExpireMinutes": 60, @@ -19,7 +18,6 @@ "DetectInstanceAliasQuery": "SELECT value FROM _vt.local_metadata WHERE name='Alias'", "DetectPromotionRuleQuery": "SELECT value FROM _vt.local_metadata WHERE name='PromotionRule'", "DetectDataCenterQuery": "SELECT value FROM _vt.local_metadata WHERE name='DataCenter'", - "DetectPseudoGTIDQuery": "", "DetectSemiSyncEnforcedQuery": "SELECT @@global.rpl_semi_sync_master_wait_no_slave AND @@global.rpl_semi_sync_master_timeout > 1000000", "DiscoverByShowSlaveHosts": false, "EnableSyslog": false, @@ -77,8 +75,6 @@ ], "PromotionIgnoreHostnameFilters": [ ], - "PseudoGTIDMonotonicHint": "asc:", - "PseudoGTIDPattern": "drop view if exists .*?`_pseudo_gtid_hint__", "ReadLongRunningQueries": false, "ReadOnly": false, "ReasonableMaintenanceReplicationLagSeconds": 20, @@ -97,7 +93,6 @@ "SkipBinlogEventsContaining": [ ], "SkipBinlogServerUnresolveCheck": true, - "SkipMaxScaleCheck": true, "SkipOrchestratorDatabaseUpdate": false, "SlaveStartPostWaitMilliseconds": 1000, "SnapshotTopologiesIntervalHours": 0, diff --git a/docker/vttestserver/Dockerfile.mysql57 b/docker/vttestserver/Dockerfile.mysql57 index 20b25c8ae09..106e77d9c1c 100644 --- a/docker/vttestserver/Dockerfile.mysql57 +++ b/docker/vttestserver/Dockerfile.mysql57 @@ -17,7 +17,7 @@ # ensure images contain the right binaries. # Use a temporary layer for the build stage. -ARG bootstrap_version=1 +ARG bootstrap_version=2 ARG image="vitess/bootstrap:${bootstrap_version}-mysql57" FROM "${image}" AS builder diff --git a/docker/vttestserver/Dockerfile.mysql80 b/docker/vttestserver/Dockerfile.mysql80 index 10c68e24e2b..6e00ed2a3bf 100644 --- a/docker/vttestserver/Dockerfile.mysql80 +++ b/docker/vttestserver/Dockerfile.mysql80 @@ -17,7 +17,7 @@ # ensure images contain the right binaries. # Use a temporary layer for the build stage. -ARG bootstrap_version=1 +ARG bootstrap_version=2 ARG image="vitess/bootstrap:${bootstrap_version}-mysql80" FROM "${image}" AS builder diff --git a/docker/vttestserver/run.sh b/docker/vttestserver/run.sh index 23de153459e..7df15ad1ddf 100755 --- a/docker/vttestserver/run.sh +++ b/docker/vttestserver/run.sh @@ -16,5 +16,25 @@ # Setup the Vschema Folder /vt/setup_vschema_folder.sh "$KEYSPACES" "$NUM_SHARDS" + +# Set the maximum connections in the cnf file +# use 1000 as the default if it is unspecified +if [[ -z $MYSQL_MAX_CONNECTIONS ]]; then + MYSQL_MAX_CONNECTIONS=1000 +fi +echo "max_connections = $MYSQL_MAX_CONNECTIONS" >> /vt/config/mycnf/default-fast.cnf + # Run the vttestserver binary -/vt/bin/vttestserver -port "$PORT" -keyspaces "$KEYSPACES" -num_shards "$NUM_SHARDS" -mysql_bind_host "${MYSQL_BIND_HOST:-127.0.0.1}" -mysql_server_version "${MYSQL_SERVER_VERSION:-$1}" -vschema_ddl_authorized_users=% -schema_dir="/vt/schema/" \ No newline at end of file +/vt/bin/vttestserver \ + -port "$PORT" \ + -keyspaces "$KEYSPACES" \ + -num_shards "$NUM_SHARDS" \ + -mysql_bind_host "${MYSQL_BIND_HOST:-127.0.0.1}" \ + -mysql_server_version "${MYSQL_SERVER_VERSION:-$1}" \ + -charset "${CHARSET:-utf8mb4}" \ + -foreign_key_mode "${FOREIGN_KEY_MODE:-allow}" \ + -enable_online_ddl="${ENABLE_ONLINE_DDL:-true}" \ + -enable_direct_ddl="${ENABLE_DIRECT_DDL:-true}" \ + -vschema_ddl_authorized_users=% \ + -schema_dir="/vt/schema/" + diff --git a/examples/are-you-alive/cmd/are-you-alive/main.go b/examples/are-you-alive/cmd/are-you-alive/main.go index 4dba7311e4f..393b27c1fba 100644 --- a/examples/are-you-alive/cmd/are-you-alive/main.go +++ b/examples/are-you-alive/cmd/are-you-alive/main.go @@ -50,8 +50,8 @@ func writeNextRecord(connectionString string) error { // Check to see if this is a duplicate key error. We've seen this // sometimes happen, and when it does this client app gets stuck in an // infinite loop of failure to write a duplicate key. It's possible - // that happens because a write is succesful but something goes wrong - // before the client recieves a response, so the client thinks the write + // that happens because a write is successful but something goes wrong + // before the client receives a response, so the client thinks the write // failed and does not increment the count. // // So when we specifically see a duplicate key error, assume that's what @@ -98,7 +98,7 @@ func readRandomRecord(connectionString string) error { // lag, so ignore the missing row if we are a replica. // TODO: Should we attempt to roughly figure out replication lag in // this client, at least to catch major failures? We could probably - // multiply delay by the difference betwen maxCount and the page we + // multiply delay by the difference between maxCount and the page we // are trying to read to figure out how long ago the row we were // trying to write was written. if client.ParseTabletType(connectionString) == "replica" || diff --git a/examples/compose/vtcompose/vtcompose.go b/examples/compose/vtcompose/vtcompose.go index 2b18317da6f..f12e41e5f6b 100644 --- a/examples/compose/vtcompose/vtcompose.go +++ b/examples/compose/vtcompose/vtcompose.go @@ -373,7 +373,7 @@ func addLookupDataToVschema( lookupTableOwner := "" // Find owner of lookup table - for primaryTableName, _ := range primaryTableColumns { + for primaryTableName := range primaryTableColumns { if strings.HasPrefix(tableName, primaryTableName) && len(primaryTableName) > len(lookupTableOwner) { lookupTableOwner = primaryTableName } diff --git a/examples/local/202_move_tables.sh b/examples/local/202_move_tables.sh index 8bf69cf95dd..4cb25390e07 100755 --- a/examples/local/202_move_tables.sh +++ b/examples/local/202_move_tables.sh @@ -19,4 +19,4 @@ source ./env.sh -vtctlclient MoveTables -workflow=commerce2customer commerce customer '{"customer":{}, "corder":{}}' +vtctlclient MoveTables -source commerce -tables 'customer,corder' Create customer.commerce2customer diff --git a/examples/local/203_switch_reads.sh b/examples/local/203_switch_reads.sh index 93b4a31b76c..04f73c17f2c 100755 --- a/examples/local/203_switch_reads.sh +++ b/examples/local/203_switch_reads.sh @@ -19,5 +19,4 @@ source ./env.sh -vtctlclient SwitchReads -tablet_type=rdonly customer.commerce2customer -vtctlclient SwitchReads -tablet_type=replica customer.commerce2customer +vtctlclient MoveTables -tablet_types=rdonly,replica SwitchTraffic customer.commerce2customer diff --git a/examples/local/204_switch_writes.sh b/examples/local/204_switch_writes.sh index b2a6434ad17..b2d85708a33 100755 --- a/examples/local/204_switch_writes.sh +++ b/examples/local/204_switch_writes.sh @@ -19,4 +19,4 @@ source ./env.sh -vtctlclient SwitchWrites customer.commerce2customer +vtctlclient MoveTables -tablet_types=master SwitchTraffic customer.commerce2customer diff --git a/examples/local/205_clean_commerce.sh b/examples/local/205_clean_commerce.sh index 203464aa5fa..37824052d27 100755 --- a/examples/local/205_clean_commerce.sh +++ b/examples/local/205_clean_commerce.sh @@ -19,5 +19,5 @@ source ./env.sh -vtctlclient DropSources customer.commerce2customer +vtctlclient MoveTables Complete customer.commerce2customer diff --git a/examples/local/303_reshard.sh b/examples/local/303_reshard.sh index 2e980a8fee7..0334b256bef 100755 --- a/examples/local/303_reshard.sh +++ b/examples/local/303_reshard.sh @@ -19,4 +19,4 @@ source ./env.sh -vtctlclient Reshard customer.cust2cust '0' '-80,80-' +vtctlclient Reshard -source_shards '0' -target_shards '-80,80-' Create customer.cust2cust diff --git a/examples/local/304_switch_reads.sh b/examples/local/304_switch_reads.sh index 041d4206048..29f143a878f 100755 --- a/examples/local/304_switch_reads.sh +++ b/examples/local/304_switch_reads.sh @@ -18,5 +18,4 @@ source ./env.sh -vtctlclient SwitchReads -tablet_type=rdonly customer.cust2cust -vtctlclient SwitchReads -tablet_type=replica customer.cust2cust +vtctlclient Reshard -tablet_types=rdonly,replica SwitchTraffic customer.cust2cust diff --git a/examples/local/305_switch_writes.sh b/examples/local/305_switch_writes.sh index 94a276e3867..2f9c5ad7aa7 100755 --- a/examples/local/305_switch_writes.sh +++ b/examples/local/305_switch_writes.sh @@ -18,4 +18,4 @@ source ./env.sh -vtctlclient SwitchWrites customer.cust2cust +vtctlclient Reshard -tablet_types=master SwitchTraffic customer.cust2cust diff --git a/examples/local/306_down_shard_0.sh b/examples/local/306_down_shard_0.sh index 9b6a0ed8180..0d956553cd5 100755 --- a/examples/local/306_down_shard_0.sh +++ b/examples/local/306_down_shard_0.sh @@ -17,6 +17,8 @@ source ./env.sh +vtctlclient Reshard Complete customer.cust2cust + for i in 200 201 202; do CELL=zone1 TABLET_UID=$i ./scripts/vttablet-down.sh CELL=zone1 TABLET_UID=$i ./scripts/mysqlctl-down.sh diff --git a/examples/local/scripts/vtadmin-up.sh b/examples/local/scripts/vtadmin-up.sh index de554b7f880..e76a9206189 100755 --- a/examples/local/scripts/vtadmin-up.sh +++ b/examples/local/scripts/vtadmin-up.sh @@ -8,9 +8,13 @@ vtadmin_api_port=14200 vtadmin \ --addr ":${vtadmin_api_port}" \ --http-origin "http://localhost:3000" \ + --http-tablet-url-tmpl "http://{{ .Tablet.Hostname }}:15{{ .Tablet.Alias.Uid }}" \ + --tracer "opentracing-jaeger" \ + --grpc-tracing \ + --http-tracing \ --logtostderr \ --alsologtostderr \ - --cluster "id=local,name=local,discovery=staticfile,discovery-staticfile-path=./vtadmin/discovery.json" \ + --cluster "id=local,name=local,discovery=staticfile,discovery-staticfile-path=./vtadmin/discovery.json,tablet-fqdn-tmpl={{ .Tablet.Hostname }}:15{{ .Tablet.Alias.Uid }}" \ > "${log_dir}/vtadmin-api.out" 2>&1 & vtadmin_pid=$! @@ -23,11 +27,12 @@ function cleanup() { trap cleanup INT QUIT TERM -echo "vtadmin-api is up! Logs are in ${log_dir}/vtadmin-api.out, and its PID is ${vtadmin_pid}" +echo "vtadmin-api is running on http://localhost:${vtadmin_api_port}. Logs are in ${log_dir}/vtadmin-api.out, and its PID is ${vtadmin_pid}" ( cd ../../web/vtadmin && npm install && REACT_APP_VTADMIN_API_ADDRESS="http://127.0.0.1:${vtadmin_api_port}" \ + REACT_APP_ENABLE_EXPERIMENTAL_TABLET_DEBUG_VARS="true" \ npm run start ) diff --git a/examples/local/vstream_client.go b/examples/local/vstream_client.go new file mode 100644 index 00000000000..2ba25f85ebe --- /dev/null +++ b/examples/local/vstream_client.go @@ -0,0 +1,93 @@ +/* +Copyright 2021 The Vitess Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package main + +import ( + "context" + "fmt" + "io" + "log" + "time" + vtgatepb "vitess.io/vitess/go/vt/proto/vtgate" + + binlogdatapb "vitess.io/vitess/go/vt/proto/binlogdata" + topodatapb "vitess.io/vitess/go/vt/proto/topodata" + _ "vitess.io/vitess/go/vt/vtctl/grpcvtctlclient" + _ "vitess.io/vitess/go/vt/vtgate/grpcvtgateconn" + "vitess.io/vitess/go/vt/vtgate/vtgateconn" +) + +/* + This is a sample client for streaming using the vstream API. It is setup to work with the local example and you can + either stream from the unsharded commerce keyspace or the customer keyspace after the sharding step. +*/ +func main() { + ctx := context.Background() + streamCustomer := true + var vgtid *binlogdatapb.VGtid + if streamCustomer { + vgtid = &binlogdatapb.VGtid{ + ShardGtids: []*binlogdatapb.ShardGtid{{ + Keyspace: "customer", + Shard: "-80", + // Gtid "" is to stream from the start, "current" is to stream from the current gtid + // you can also specify a gtid to start with. + Gtid: "", //"current" // "MySQL56/36a89abd-978f-11eb-b312-04ed332e05c2:1-265" + }, { + Keyspace: "customer", + Shard: "80-", + Gtid: "", + }}} + } else { + vgtid = &binlogdatapb.VGtid{ + ShardGtids: []*binlogdatapb.ShardGtid{{ + Keyspace: "commerce", + Shard: "0", + Gtid: "", + }}} + } + filter := &binlogdatapb.Filter{ + Rules: []*binlogdatapb.Rule{{ + Match: "customer", + Filter: "select * from customer", + }}, + } + conn, err := vtgateconn.Dial(ctx, "localhost:15991") + if err != nil { + log.Fatal(err) + } + defer conn.Close() + flags := &vtgatepb.VStreamFlags{ + //MinimizeSkew: false, + //HeartbeatInterval: 60, //seconds + } + reader, err := conn.VStream(ctx, topodatapb.TabletType_MASTER, vgtid, filter, flags) + for { + e, err := reader.Recv() + switch err { + case nil: + _ = e + fmt.Printf("%v\n", e) + case io.EOF: + fmt.Printf("stream ended\n") + return + default: + fmt.Printf("%s:: remote error: %v\n", time.Now(), err) + return + } + } +} diff --git a/examples/operator/101_initial_cluster.yaml b/examples/operator/101_initial_cluster.yaml index 93bae05a60b..4cb47c95342 100644 --- a/examples/operator/101_initial_cluster.yaml +++ b/examples/operator/101_initial_cluster.yaml @@ -8,12 +8,12 @@ metadata: name: example spec: images: - vtctld: vitess/lite:v10.0.1 - vtgate: vitess/lite:v10.0.1 - vttablet: vitess/lite:v10.0.1 - vtbackup: vitess/lite:v10.0.1 + vtctld: vitess/lite:v11.0.3 + vtgate: vitess/lite:v11.0.3 + vttablet: vitess/lite:v11.0.3 + vtbackup: vitess/lite:v11.0.3 mysqld: - mysql56Compatible: vitess/lite:v10.0.1 + mysql56Compatible: vitess/lite:v11.0.3 mysqldExporter: prom/mysqld-exporter:v0.11.0 cells: - name: zone1 diff --git a/examples/operator/201_customer_tablets.yaml b/examples/operator/201_customer_tablets.yaml index 0e9d8f0d893..d0567f74836 100644 --- a/examples/operator/201_customer_tablets.yaml +++ b/examples/operator/201_customer_tablets.yaml @@ -4,12 +4,12 @@ metadata: name: example spec: images: - vtctld: vitess/lite:v10.0.1 - vtgate: vitess/lite:v10.0.1 - vttablet: vitess/lite:v10.0.1 - vtbackup: vitess/lite:v10.0.1 + vtctld: vitess/lite:v11.0.3 + vtgate: vitess/lite:v11.0.3 + vttablet: vitess/lite:v11.0.3 + vtbackup: vitess/lite:v11.0.3 mysqld: - mysql56Compatible: vitess/lite:v10.0.1 + mysql56Compatible: vitess/lite:v11.0.3 mysqldExporter: prom/mysqld-exporter:v0.11.0 cells: - name: zone1 diff --git a/examples/operator/302_new_shards.yaml b/examples/operator/302_new_shards.yaml index c600aa2edc7..5c48f36c1ea 100644 --- a/examples/operator/302_new_shards.yaml +++ b/examples/operator/302_new_shards.yaml @@ -4,12 +4,12 @@ metadata: name: example spec: images: - vtctld: vitess/lite:v10.0.1 - vtgate: vitess/lite:v10.0.1 - vttablet: vitess/lite:v10.0.1 - vtbackup: vitess/lite:v10.0.1 + vtctld: vitess/lite:v11.0.3 + vtgate: vitess/lite:v11.0.3 + vttablet: vitess/lite:v11.0.3 + vtbackup: vitess/lite:v11.0.3 mysqld: - mysql56Compatible: vitess/lite:v10.0.1 + mysql56Compatible: vitess/lite:v11.0.3 mysqldExporter: prom/mysqld-exporter:v0.11.0 cells: - name: zone1 diff --git a/examples/operator/306_down_shard_0.yaml b/examples/operator/306_down_shard_0.yaml index c4d4b1c4dbe..a077e26521e 100644 --- a/examples/operator/306_down_shard_0.yaml +++ b/examples/operator/306_down_shard_0.yaml @@ -4,12 +4,12 @@ metadata: name: example spec: images: - vtctld: vitess/lite:v10.0.1 - vtgate: vitess/lite:v10.0.1 - vttablet: vitess/lite:v10.0.1 - vtbackup: vitess/lite:v10.0.1 + vtctld: vitess/lite:v11.0.3 + vtgate: vitess/lite:v11.0.3 + vttablet: vitess/lite:v11.0.3 + vtbackup: vitess/lite:v11.0.3 mysqld: - mysql56Compatible: vitess/lite:v10.0.1 + mysql56Compatible: vitess/lite:v11.0.3 mysqldExporter: prom/mysqld-exporter:v0.11.0 cells: - name: zone1 diff --git a/examples/operator/operator.yaml b/examples/operator/operator.yaml index 9bbadc8221e..0549856ee01 100644 --- a/examples/operator/operator.yaml +++ b/examples/operator/operator.yaml @@ -5773,7 +5773,7 @@ spec: fieldPath: metadata.name - name: OPERATOR_NAME value: vitess-operator - image: planetscale/vitess-operator:v2.4.1 + image: planetscale/vitess-operator:latest imagePullPolicy: IfNotPresent name: vitess-operator resources: diff --git a/examples/operator/vtorc_example.yaml b/examples/operator/vtorc_example.yaml index ac62a7a296d..7e712c991c9 100644 --- a/examples/operator/vtorc_example.yaml +++ b/examples/operator/vtorc_example.yaml @@ -8,13 +8,13 @@ metadata: name: example spec: images: - vtctld: vitess/lite:v10.0.1 - vtorc: vitess/lite:v10.0.1 - vtgate: vitess/lite:v10.0.1 - vttablet: vitess/lite:v10.0.1 - vtbackup: vitess/lite:v10.0.1 + vtctld: vitess/lite:v11.0.3 + vtorc: vitess/lite:v11.0.3 + vtgate: vitess/lite:v11.0.3 + vttablet: vitess/lite:v11.0.3 + vtbackup: vitess/lite:v11.0.3 mysqld: - mysql56Compatible: vitess/lite:v10.0.1 + mysql56Compatible: vitess/lite:v11.0.3 mysqldExporter: prom/mysqld-exporter:v0.11.0 cells: - name: zone1 diff --git a/examples/region_sharding/203_reshard.sh b/examples/region_sharding/203_reshard.sh index b868885aa39..ce1a7f45587 100755 --- a/examples/region_sharding/203_reshard.sh +++ b/examples/region_sharding/203_reshard.sh @@ -16,4 +16,4 @@ source ./env.sh -vtctlclient Reshard -tablet_types=MASTER main.main2regions '0' '-40,40-80,80-c0,c0-' +vtctlclient Reshard -v1 -tablet_types=MASTER main.main2regions '0' '-40,40-80,80-c0,c0-' diff --git a/examples/region_sharding/main_vschema_sharded.json b/examples/region_sharding/main_vschema_sharded.json index 94770e57ea7..06d72e58981 100644 --- a/examples/region_sharding/main_vschema_sharded.json +++ b/examples/region_sharding/main_vschema_sharded.json @@ -4,7 +4,7 @@ "region_vdx": { "type": "region_json", "params": { - "region_map": "/home/user/my-vitess/examples/region_sharding/countries.json", + "region_map": "/vt/src/vitess.io/vitess/examples/region_sharding/countries.json", "region_bytes": "1" } } diff --git a/go.mod b/go.mod index fb655e991ed..1a8ee45a9f7 100644 --- a/go.mod +++ b/go.mod @@ -1,9 +1,9 @@ module vitess.io/vitess -go 1.15 +go 1.16 require ( - cloud.google.com/go/storage v1.0.0 + cloud.google.com/go/storage v1.10.0 github.com/AdaLogics/go-fuzz-headers v0.0.0-20210330150358-dbd898e17899 github.com/Azure/azure-pipeline-go v0.2.2 github.com/Azure/azure-storage-blob-go v0.10.0 @@ -13,30 +13,25 @@ require ( github.com/PuerkitoBio/goquery v1.5.1 github.com/aquarapid/vaultlib v0.5.1 github.com/armon/consul-api v0.0.0-20180202201655-eb2c6b5be1b6 - github.com/armon/go-metrics v0.0.0-20190430140413-ec5e00d3c878 + github.com/armon/go-metrics v0.0.0-20190430140413-ec5e00d3c878 // indirect github.com/aws/aws-sdk-go v1.28.8 github.com/buger/jsonparser v0.0.0-20200322175846-f7e751efca13 github.com/cespare/xxhash/v2 v2.1.1 github.com/codahale/hdrhistogram v0.0.0-20161010025455-3a0bb77429bd // indirect github.com/codegangsta/inject v0.0.0-20150114235600-33e0aa1cb7c0 // indirect - github.com/coreos/etcd v3.3.13+incompatible - github.com/coreos/go-systemd v0.0.0-20190719114852-fd7a80b32e1f // indirect github.com/corpix/uarand v0.1.1 // indirect - github.com/cyberdelia/go-metrics-graphite v0.0.0-20161219230853-39f87cc3b432 github.com/dave/jennifer v1.4.1 github.com/evanphx/json-patch v4.5.0+incompatible github.com/fsnotify/fsnotify v1.4.9 github.com/go-martini/martini v0.0.0-20170121215854-22fa46961aab github.com/go-sql-driver/mysql v1.5.1-0.20210202043019-fe2230a8b20c - github.com/gogo/protobuf v1.3.1 github.com/golang/glog v0.0.0-20160126235308-23def4e6c14b - github.com/golang/groupcache v0.0.0-20190702054246-869f871628b6 // indirect - github.com/golang/mock v1.3.1 - github.com/golang/protobuf v1.3.3 + github.com/golang/mock v1.5.0 + github.com/golang/protobuf v1.5.2 github.com/golang/snappy v0.0.1 - github.com/google/go-cmp v0.5.2 + github.com/google/go-cmp v0.5.5 github.com/google/shlex v0.0.0-20191202100458-e7afc7fbc510 - github.com/google/uuid v1.1.1 + github.com/google/uuid v1.1.2 github.com/googleapis/gnostic v0.2.0 // indirect github.com/gorilla/handlers v1.5.1 github.com/gorilla/mux v1.8.0 @@ -45,7 +40,7 @@ require ( github.com/grpc-ecosystem/go-grpc-prometheus v1.2.0 github.com/hashicorp/consul/api v1.5.0 github.com/hashicorp/go-immutable-radix v1.1.0 // indirect - github.com/hashicorp/go-msgpack v0.5.5 + github.com/hashicorp/go-msgpack v0.5.5 // indirect github.com/hashicorp/go-sockaddr v1.0.2 // indirect github.com/hashicorp/go-uuid v1.0.2 // indirect github.com/hashicorp/serf v0.9.2 // indirect @@ -77,18 +72,20 @@ require ( github.com/philhofer/fwd v1.0.0 // indirect github.com/pires/go-proxyproto v0.0.0-20191211124218-517ecdf5bb2b github.com/pkg/errors v0.9.1 - github.com/prometheus/client_golang v1.4.1 - github.com/prometheus/common v0.9.1 + github.com/planetscale/pargzip v0.0.0-20201116224723-90c7fc03ea8a + github.com/planetscale/tengo v0.9.6-ps.v4 + github.com/planetscale/vtprotobuf v0.0.0-20210521163914-5a02622d1e2a + github.com/prometheus/client_golang v1.11.0 + github.com/prometheus/common v0.29.0 // indirect github.com/rcrowley/go-metrics v0.0.0-20200313005456-10cdbea86bc0 github.com/samuel/go-zookeeper v0.0.0-20200724154423-2164a8ac840e github.com/satori/go.uuid v1.2.0 // indirect github.com/sjmudd/stopwatch v0.0.0-20170613150411-f380bf8a9be1 - github.com/skeema/tengo v0.0.0-00010101000000-000000000000 github.com/soheilhy/cmux v0.1.4 github.com/spf13/cobra v1.1.1 github.com/spf13/pflag v1.0.5 github.com/spyzhov/ajson v0.4.2 - github.com/stretchr/testify v1.4.0 + github.com/stretchr/testify v1.7.0 github.com/tchap/go-patricia v0.0.0-20160729071656-dd168db6051b github.com/tebeka/selenium v0.9.9 github.com/tinylib/msgp v1.1.1 // indirect @@ -96,16 +93,24 @@ require ( github.com/uber/jaeger-client-go v2.16.0+incompatible github.com/uber/jaeger-lib v2.0.0+incompatible // indirect github.com/z-division/go-zookeeper v0.0.0-20190128072838-6d7457066b9b + go.etcd.io/etcd/api/v3 v3.5.0 + go.etcd.io/etcd/client/pkg/v3 v3.5.0 + go.etcd.io/etcd/client/v3 v3.5.0 golang.org/x/crypto v0.0.0-20200622213623-75b288015ac9 - golang.org/x/lint v0.0.0-20190930215403-16217165b5de - golang.org/x/net v0.0.0-20201021035429-f5854403a974 - golang.org/x/oauth2 v0.0.0-20190604053449-0f29369cfe45 - golang.org/x/sync v0.0.0-20201207232520-09787c993a3a - golang.org/x/text v0.3.3 - golang.org/x/time v0.0.0-20190308202827-9d24e82272b4 - golang.org/x/tools v0.0.0-20201202200335-bef1c476418a - google.golang.org/api v0.13.0 - google.golang.org/grpc v1.29.1 + golang.org/x/lint v0.0.0-20210508222113-6edffad5e616 + golang.org/x/net v0.0.0-20210614182718-04defd469f4e + golang.org/x/oauth2 v0.0.0-20210514164344-f6687ab2804c + golang.org/x/sync v0.0.0-20210220032951-036812b2e83c + golang.org/x/sys v0.0.0-20210630005230-0f9fa26af87c // indirect + golang.org/x/text v0.3.6 + golang.org/x/time v0.0.0-20191024005414-555d28b269f0 + golang.org/x/tools v0.1.4 + google.golang.org/api v0.45.0 + google.golang.org/genproto v0.0.0-20210701191553-46259e63a0a9 // indirect + google.golang.org/grpc v1.39.0 + google.golang.org/grpc/cmd/protoc-gen-go-grpc v1.1.0 + google.golang.org/grpc/examples v0.0.0-20210430044426-28078834f35b + google.golang.org/protobuf v1.27.1 gopkg.in/DataDog/dd-trace-go.v1 v1.17.0 gopkg.in/asn1-ber.v1 v1.0.0-20181015200546-f715ec2f112d // indirect gopkg.in/gcfg.v1 v1.2.3 @@ -113,16 +118,14 @@ require ( gopkg.in/warnings.v0 v0.1.2 // indirect gopkg.in/yaml.v2 v2.4.0 gotest.tools v2.2.0+incompatible - honnef.co/go/tools v0.0.1-2019.2.3 + honnef.co/go/tools v0.0.1-2020.1.4 k8s.io/apiextensions-apiserver v0.17.3 k8s.io/apimachinery v0.17.3 k8s.io/client-go v0.17.3 k8s.io/code-generator v0.17.3 - sigs.k8s.io/yaml v1.1.0 + sigs.k8s.io/yaml v1.2.0 ) replace github.com/skeema/tengo => github.com/planetscale/tengo v0.9.6-ps.v1 -// (NOTE:@ajm188) Something we depend on depends on moby/term, and that version -// of moby/term has this issue: https://github.com/moby/term/issues/15. -replace golang.org/x/sys => golang.org/x/sys v0.0.0-20200826173525-f9321e4c35a6 +replace google.golang.org/grpc => google.golang.org/grpc v1.37.0 diff --git a/go.sum b/go.sum index 5815ed537bd..5956a7cd65e 100644 --- a/go.sum +++ b/go.sum @@ -1,22 +1,43 @@ -bazil.org/fuse v0.0.0-20160811212531-371fbbdaa898/go.mod h1:Xbm+BRKSBEpa4q4hTSxohYNQpsxXPbPry4JJWOB3LB8= -cloud.google.com/go v0.26.0/go.mod h1:aQUYkXzVsufM+DwF1aE+0xfcU+56JwCaLick0ClmMTw= cloud.google.com/go v0.34.0/go.mod h1:aQUYkXzVsufM+DwF1aE+0xfcU+56JwCaLick0ClmMTw= cloud.google.com/go v0.38.0/go.mod h1:990N+gfupTy94rShfmMCWGDn0LpTmnzTp2qbd1dvSRU= cloud.google.com/go v0.41.0/go.mod h1:OauMR7DV8fzvZIl2qg6rkaIhD/vmgk4iwEw/h6ercmg= cloud.google.com/go v0.44.1/go.mod h1:iSa0KzasP4Uvy3f1mN/7PiObzGgflwredwwASm/v6AU= cloud.google.com/go v0.44.2/go.mod h1:60680Gw3Yr4ikxnPRS/oxxkBccT6SA1yMk63TGekxKY= cloud.google.com/go v0.45.1/go.mod h1:RpBamKRgapWJb87xiFSdk4g1CME7QZg3uwTez+TSTjc= -cloud.google.com/go v0.46.3 h1:AVXDdKsrtX33oR9fbCMu/+c1o8Ofjq6Ku/MInaLVg5Y= cloud.google.com/go v0.46.3/go.mod h1:a6bKKbmY7er1mI7TEI4lsAkts/mkhTSZK8w33B4RAg0= -cloud.google.com/go/bigquery v1.0.1 h1:hL+ycaJpVE9M7nLoiXb/Pn10ENE2u+oddxbD8uu0ZVU= +cloud.google.com/go v0.50.0/go.mod h1:r9sluTvynVuxRIOHXQEHMFffphuXHOMZMycpNR5e6To= +cloud.google.com/go v0.52.0/go.mod h1:pXajvRH/6o3+F9jDHZWQ5PbGhn+o8w9qiu/CffaVdO4= +cloud.google.com/go v0.53.0/go.mod h1:fp/UouUEsRkN6ryDKNW/Upv/JBKnv6WDthjR6+vze6M= +cloud.google.com/go v0.54.0/go.mod h1:1rq2OEkV3YMf6n/9ZvGWI3GWw0VoqH/1x2nd8Is/bPc= +cloud.google.com/go v0.56.0/go.mod h1:jr7tqZxxKOVYizybht9+26Z/gUq7tiRzu+ACVAMbKVk= +cloud.google.com/go v0.57.0/go.mod h1:oXiQ6Rzq3RAkkY7N6t3TcE6jE+CIBBbA36lwQ1JyzZs= +cloud.google.com/go v0.62.0/go.mod h1:jmCYTdRCQuc1PHIIJ/maLInMho30T/Y0M4hTdTShOYc= +cloud.google.com/go v0.65.0/go.mod h1:O5N8zS7uWy9vkA9vayVHs65eM1ubvY4h553ofrNHObY= +cloud.google.com/go v0.72.0/go.mod h1:M+5Vjvlc2wnp6tjzE102Dw08nGShTscUx2nZMufOKPI= +cloud.google.com/go v0.74.0/go.mod h1:VV1xSbzvo+9QJOxLDaJfTjx5e+MePCpCWwvftOeQmWk= +cloud.google.com/go v0.78.0/go.mod h1:QjdrLG0uq+YwhjoVOLsS1t7TW8fs36kLs4XO5R5ECHg= +cloud.google.com/go v0.79.0/go.mod h1:3bzgcEeQlzbuEAYu4mrWhKqWjmpprinYgKJLgKHnbb8= +cloud.google.com/go v0.81.0 h1:at8Tk2zUz63cLPR0JPWm5vp77pEZmzxEQBEfRKn1VV8= +cloud.google.com/go v0.81.0/go.mod h1:mk/AM35KwGk/Nm2YSeZbxXdrNK3KZOYHmLkOqC2V6E0= cloud.google.com/go/bigquery v1.0.1/go.mod h1:i/xbL2UlR5RvWAURpBYZTtm/cXjCha9lbfbpx4poX+o= -cloud.google.com/go/datastore v1.0.0 h1:Kt+gOPPp2LEPWp8CSfxhsM8ik9CcyE/gYu+0r+RnZvM= +cloud.google.com/go/bigquery v1.3.0/go.mod h1:PjpwJnslEMmckchkHFfq+HTD2DmtT67aNFKH1/VBDHE= +cloud.google.com/go/bigquery v1.4.0/go.mod h1:S8dzgnTigyfTmLBfrtrhyYhwRxG72rYxvftPBK2Dvzc= +cloud.google.com/go/bigquery v1.5.0/go.mod h1:snEHRnqQbz117VIFhE8bmtwIDY80NLUZUMb4Nv6dBIg= +cloud.google.com/go/bigquery v1.7.0/go.mod h1://okPTzCYNXSlb24MZs83e2Do+h+VXtc4gLoIoXIAPc= +cloud.google.com/go/bigquery v1.8.0/go.mod h1:J5hqkt3O0uAFnINi6JXValWIb1v0goeZM77hZzJN/fQ= cloud.google.com/go/datastore v1.0.0/go.mod h1:LXYbyblFSglQ5pkeyhO+Qmw7ukd3C+pD7TKLgZqpHYE= +cloud.google.com/go/datastore v1.1.0/go.mod h1:umbIZjpQpHh4hmRpGhH4tLFup+FVzqBi1b3c64qFpCk= cloud.google.com/go/firestore v1.1.0/go.mod h1:ulACoGHTpvq5r8rxGJ4ddJZBZqakUQqClKRT5SZwBmk= -cloud.google.com/go/pubsub v1.0.1 h1:W9tAK3E57P75u0XLLR82LZyw8VpAnhmyTOxW9qzmyj8= cloud.google.com/go/pubsub v1.0.1/go.mod h1:R0Gpsv3s54REJCy4fxDixWD93lHJMoZTyQ2kNxGRt3I= -cloud.google.com/go/storage v1.0.0 h1:VV2nUM3wwLLGh9lSABFgZMjInyUbJeaRSE64WuAIQ+4= +cloud.google.com/go/pubsub v1.1.0/go.mod h1:EwwdRX2sKPjnvnqCa270oGRyludottCI76h+R3AArQw= +cloud.google.com/go/pubsub v1.2.0/go.mod h1:jhfEVHT8odbXTkndysNHCcx0awwzvfOlguIAii9o8iA= +cloud.google.com/go/pubsub v1.3.1/go.mod h1:i+ucay31+CNRpDW4Lu78I4xXG+O1r/MAHgjpRVR+TSU= cloud.google.com/go/storage v1.0.0/go.mod h1:IhtSnM/ZTZV8YYJWCY8RULGVqBDmpoyjwiyrjsg+URw= +cloud.google.com/go/storage v1.5.0/go.mod h1:tpKbwo567HUNpVclU5sGELwQWBDZ8gh0ZeosJ0Rtdos= +cloud.google.com/go/storage v1.6.0/go.mod h1:N7U0C8pVQ/+NIKOBQyamJIeKQKkZ+mxpohlUTyfDhBk= +cloud.google.com/go/storage v1.8.0/go.mod h1:Wv1Oy7z6Yz3DshWRJFhqM/UCfaWIRTdp0RXyy7KQOVs= +cloud.google.com/go/storage v1.10.0 h1:STgFzyU5/8miMl0//zKh2aQeTyeaUH3WN9bSUiJ09bA= +cloud.google.com/go/storage v1.10.0/go.mod h1:FLPqc6j+Ki4BU591ie1oL6qBQGu2Bl/tZ9ullr3+Kg0= dmitri.shuralyov.com/gpu/mtl v0.0.0-20190408044501-666a987793e9/go.mod h1:H6x//7gZCb22OMCxBHrMx7a5I7Hp++hsVxbQ4BYO7hU= github.com/AdaLogics/go-fuzz-headers v0.0.0-20210330150358-dbd898e17899 h1:Cm0cjER/2C+3BEuRBARZ+1HG+jwU5jbVkYysA7zE2H8= github.com/AdaLogics/go-fuzz-headers v0.0.0-20210330150358-dbd898e17899/go.mod h1:CzsSbkDixRphAF5hS6wbMKq0eI6ccJRb7/A0M6JBnwg= @@ -24,7 +45,6 @@ github.com/Azure/azure-pipeline-go v0.2.2 h1:6oiIS9yaG6XCCzhgAgKFfIWyo4LLCiDhZot github.com/Azure/azure-pipeline-go v0.2.2/go.mod h1:4rQ/NZncSvGqNkkOsNpOU1tgoNuIlp9AfUH5G1tvCHc= github.com/Azure/azure-storage-blob-go v0.10.0 h1:evCwGreYo3XLeBV4vSxLbLiYb6e0SzsJiXQVRGsRXxs= github.com/Azure/azure-storage-blob-go v0.10.0/go.mod h1:ep1edmW+kNQx4UfWM9heESNmQdijykocJ0YOxmMX8SE= -github.com/Azure/go-ansiterm v0.0.0-20170929234023-d6e3b3328b78 h1:w+iIsaOQNcT7OZ575w+acHgRric5iCyQh+xv+KJ4HB8= github.com/Azure/go-ansiterm v0.0.0-20170929234023-d6e3b3328b78/go.mod h1:LmzpDX56iTiv29bbRTIsUNlaFfuhWRQBWjQdVyAevI8= github.com/Azure/go-autorest/autorest v0.9.0/go.mod h1:xyHB1BMZT0cuDHU7I0+g046+BFDTQ8rEZB0s4Yfa6bI= github.com/Azure/go-autorest/autorest v0.10.0 h1:mvdtztBqcL8se7MdrUweNieTNi4kfNG6GOJuurQJpuY= @@ -38,7 +58,6 @@ github.com/Azure/go-autorest/autorest/date v0.2.0 h1:yW+Zlqf26583pE43KhfnhFcdmSW github.com/Azure/go-autorest/autorest/date v0.2.0/go.mod h1:vcORJHLJEh643/Ioh9+vPmf1Ij9AEBM5FuBIXLmIy0g= github.com/Azure/go-autorest/autorest/mocks v0.1.0/go.mod h1:OTyCOPRA2IgIlWxVYxBee2F5Gr4kF2zd2J5cFRaIDN0= github.com/Azure/go-autorest/autorest/mocks v0.2.0/go.mod h1:OTyCOPRA2IgIlWxVYxBee2F5Gr4kF2zd2J5cFRaIDN0= -github.com/Azure/go-autorest/autorest/mocks v0.3.0 h1:qJumjCaCudz+OcqE9/XtEPfvtOjOmKaui4EOpFI6zZc= github.com/Azure/go-autorest/autorest/mocks v0.3.0/go.mod h1:a8FDP3DYzQ4RYfVAxAN3SVSiiO77gL2j2ronKKP0syM= github.com/Azure/go-autorest/logger v0.1.0 h1:ruG4BSDXONFRrZZJ2GUXDiUyVpayPmb1GnWeHDdaNKY= github.com/Azure/go-autorest/logger v0.1.0/go.mod h1:oExouG+K6PryycPJfVSxi/koC6LSNgds39diKLz7Vrc= @@ -52,17 +71,13 @@ github.com/BurntSushi/xgbutil v0.0.0-20160919175755-f7c97cef3b4e h1:4ZrkT/RzpnRO github.com/BurntSushi/xgbutil v0.0.0-20160919175755-f7c97cef3b4e/go.mod h1:uw9h2sd4WWHOPdJ13MQpwK5qYWKYDumDqxWWIknEQ+k= github.com/DataDog/datadog-go v2.2.0+incompatible h1:V5BKkxACZLjzHjSgBbr2gvLA2Ae49yhc6CSY7MLy5k4= github.com/DataDog/datadog-go v2.2.0+incompatible/go.mod h1:LButxg5PwREeZtORoXG3tL4fMGNddJ+vMq1mwgfaqoQ= +github.com/GeertJohan/go.incremental v1.0.0 h1:7AH+pY1XUgQE4Y1HcXYaMqAI0m9yrFqo/jt0CW30vsg= github.com/GeertJohan/go.incremental v1.0.0/go.mod h1:6fAjUhbVuX1KcMD3c8TEgVUqmo4seqhv0i0kdATSkM0= github.com/GeertJohan/go.rice v1.0.0 h1:KkI6O9uMaQU3VEKaj01ulavtF7o1fWT7+pk/4voiMLQ= github.com/GeertJohan/go.rice v1.0.0/go.mod h1:eH6gbSOAUv07dQuZVnBmoDP8mgsM1rtixis4Tib9if0= github.com/Masterminds/glide v0.13.2/go.mod h1:STyF5vcenH/rUqTEv+/hBXlSTo7KYwg2oc2f4tzPWic= github.com/Masterminds/semver v1.4.2/go.mod h1:MB6lktGJrhw8PrUyiEoblNEGEQ+RzHPF078ddwwvV3Y= github.com/Masterminds/vcs v1.13.0/go.mod h1:N09YCmOQr6RLxC6UNHzuVwAdodYbbnycGHSmwVJjcKA= -github.com/Microsoft/go-winio v0.4.15-0.20190919025122-fc70bd9a86b5/go.mod h1:tTuCMEN+UleMWgg9dVx4Hu52b1bJo+59jBh3ajtinzw= -github.com/Microsoft/go-winio v0.4.15-0.20200113171025-3fe6c5262873 h1:93nQ7k53GjoMQ07HVP8g6Zj1fQZDDj7Xy2VkNNtvX8o= -github.com/Microsoft/go-winio v0.4.15-0.20200113171025-3fe6c5262873/go.mod h1:tTuCMEN+UleMWgg9dVx4Hu52b1bJo+59jBh3ajtinzw= -github.com/Microsoft/hcsshim v0.8.9 h1:VrfodqvztU8YSOvygU+DN1BGaSGxmrNfqOv5oOuX2Bk= -github.com/Microsoft/hcsshim v0.8.9/go.mod h1:5692vkUqntj1idxauYlpoINNKeqCiG6Sg38RRsjT5y8= github.com/NYTimes/gziphandler v0.0.0-20170623195520-56545f4a5d46/go.mod h1:3wb06e3pkSAbeQ52E9H9iFoQsEEwGN64994WTCIhntQ= github.com/OneOfOne/xxhash v1.2.2/go.mod h1:HSdplMjZKSmBqAxg5vPj2TmRDmfkzw+cTzAElWljhcU= github.com/PuerkitoBio/goquery v1.5.1 h1:PSPBGne8NIUWw+/7vFBV+kG2J/5MOjbzc7154OaKCSE= @@ -75,16 +90,17 @@ github.com/PuerkitoBio/urlesc v0.0.0-20170810143723-de5bf2ad4578/go.mod h1:uGdko github.com/VividCortex/mysqlerr v0.0.0-20170204212430-6c6b55f8796f h1:HR5nRmUQgXrwqZOwZ2DAc/aCi3Bu3xENpspW935vxu0= github.com/VividCortex/mysqlerr v0.0.0-20170204212430-6c6b55f8796f/go.mod h1:f3HiCrHjHBdcm6E83vGaXh1KomZMA2P6aeo3hKx/wg0= github.com/agnivade/levenshtein v1.0.1/go.mod h1:CURSv5d9Uaml+FovSIICkLbAUZ9S4RqaHDIsdSBg7lM= +github.com/akavel/rsrc v0.8.0 h1:zjWn7ukO9Kc5Q62DOJCcxGpXC18RawVtYAGdz2aLlfw= github.com/akavel/rsrc v0.8.0/go.mod h1:uLoCtb9J+EyAqh+26kdrTgmzRBFPGOolLWKpdxkKq+c= github.com/alecthomas/template v0.0.0-20160405071501-a0175ee3bccc/go.mod h1:LOuyumcjzFXgccqObfd/Ljyb9UuFJ6TxHnclSeseNhc= -github.com/alecthomas/template v0.0.0-20190718012654-fb15b899a751 h1:JYp7IbQjafoB+tBA3gMyHYHrpOtNuDiK/uB5uXxq5wM= github.com/alecthomas/template v0.0.0-20190718012654-fb15b899a751/go.mod h1:LOuyumcjzFXgccqObfd/Ljyb9UuFJ6TxHnclSeseNhc= github.com/alecthomas/units v0.0.0-20151022065526-2efee857e7cf/go.mod h1:ybxpYRFXyAe+OPACYpWeL0wqObRcbAqCMya13uyzqw0= -github.com/alecthomas/units v0.0.0-20190717042225-c3de453c63f4 h1:Hs82Z41s6SdL1CELW+XaDYmOH4hkBN4/N9og/AsOv7E= github.com/alecthomas/units v0.0.0-20190717042225-c3de453c63f4/go.mod h1:ybxpYRFXyAe+OPACYpWeL0wqObRcbAqCMya13uyzqw0= +github.com/alecthomas/units v0.0.0-20190924025748-f65c72e2690d/go.mod h1:rBZYJk541a8SKzHPHnH3zbiI+7dagKZ0cgpgrD7Fyho= github.com/andreyvit/diff v0.0.0-20170406064948-c7f18ee00883/go.mod h1:rCTlJbsFo29Kk6CurOXKm700vrz8f0KW0JNfpkRJY/8= github.com/andybalholm/cascadia v1.1.0 h1:BuuO6sSfQNFRu1LppgbD25Hr2vLYW25JvxHs5zzsLTo= github.com/andybalholm/cascadia v1.1.0/go.mod h1:GsXiBklL0woXo1j/WYWtSYYC4ouU9PqHO0sqidkEA4Y= +github.com/antihax/optional v1.0.0/go.mod h1:uupD/76wgC+ih3iEmQUL+0Ugr19nfwCT1kdvxnR2qWY= github.com/aquarapid/vaultlib v0.5.1 h1:vuLWR6bZzLHybjJBSUYPgZlIp6KZ+SXeHLRRYTuk6d4= github.com/aquarapid/vaultlib v0.5.1/go.mod h1:yT7AlEXtuabkxylOc/+Ulyp18tff1+QjgNLTnFWTlOs= github.com/armon/circbuf v0.0.0-20150827004946-bbbad097214e/go.mod h1:3U/XgcO3hCbHZ8TKRvWD2dDTCfh9M9ya+I9JpbB7O8o= @@ -117,32 +133,20 @@ github.com/cespare/xxhash v1.1.0 h1:a6HrQnmkObjyL+Gs60czilIUGqrzKutQD6XZog3p+ko= github.com/cespare/xxhash v1.1.0/go.mod h1:XrSqR1VqqWfGrhpAt58auRo0WTKS1nRRg3ghfAqPWnc= github.com/cespare/xxhash/v2 v2.1.1 h1:6MnRN8NT7+YBpUIWxHtefFZOKTAPgGjpQSxqLNn0+qY= github.com/cespare/xxhash/v2 v2.1.1/go.mod h1:VGX0DQ3Q6kWi7AoAeZDth3/j3BFtOZR5XLFGgcrjCOs= +github.com/chzyer/logex v1.1.10/go.mod h1:+Ywpsq7O8HXn0nuIou7OrIPyXbp3wmkHB+jjWRnGsAI= +github.com/chzyer/readline v0.0.0-20180603132655-2972be24d48e/go.mod h1:nSuG5e5PlCu98SY8svDHJxuZscDgtXS6KTTbou5AhLI= +github.com/chzyer/test v0.0.0-20180213035817-a1ea475d72b1/go.mod h1:Q3SI9o4m/ZMnBNeIyt5eFwwo7qiLfzFZmjNmxjkiQlU= github.com/circonus-labs/circonus-gometrics v2.3.1+incompatible/go.mod h1:nmEj6Dob7S7YxXgwXpfOuvO54S+tGdZdw9fuRZt25Ag= github.com/circonus-labs/circonusllhist v0.1.3/go.mod h1:kMXHVDlOchFAehlya5ePtbp5jckzBHf4XRpQvBOLI+I= -github.com/client9/misspell v0.3.4/go.mod h1:qj6jICC3Q7zFZvVWo7KLAzC3yx5G7kyvSDkc90ppPyw= -github.com/cncf/udpa/go v0.0.0-20191209042840-269d4d468f6f/go.mod h1:M8M6+tZqaGXZJjfX53e64911xZQV5JYwmTeXPW+k8Sc= +github.com/cncf/udpa/go v0.0.0-20201120205902-5459f2c99403/go.mod h1:WmhPx2Nbnhtbo57+VJT5O0JRkEi1Wbu0z5j0R8u5Hbk= github.com/cockroachdb/datadriven v0.0.0-20190809214429-80d97fb3cbaa/go.mod h1:zn76sxSg3SzpJ0PPJaLDCu+Bu0Lg3sKTORVIj19EIF8= github.com/codahale/hdrhistogram v0.0.0-20161010025455-3a0bb77429bd h1:qMd81Ts1T2OTKmB4acZcyKaMtRnY5Y44NuXGX2GFJ1w= github.com/codahale/hdrhistogram v0.0.0-20161010025455-3a0bb77429bd/go.mod h1:sE/e/2PUdi/liOCUjSTXgM1o87ZssimdTWN964YiIeI= github.com/codegangsta/cli v1.20.0/go.mod h1:/qJNoX69yVSKu5o4jLyXAENLRyk1uhi7zkbQ3slBdOA= github.com/codegangsta/inject v0.0.0-20150114235600-33e0aa1cb7c0 h1:sDMmm+q/3+BukdIpxwO365v/Rbspp2Nt5XntgQRXq8Q= github.com/codegangsta/inject v0.0.0-20150114235600-33e0aa1cb7c0/go.mod h1:4Zcjuz89kmFXt9morQgcfYZAYZ5n8WHjt81YYWIwtTM= -github.com/containerd/cgroups v0.0.0-20190919134610-bf292b21730f/go.mod h1:OApqhQ4XNSNC13gXIwDjhOQxjWa/NxkwZXJ1EvqT0ko= -github.com/containerd/console v0.0.0-20180822173158-c12b1e7919c1/go.mod h1:Tj/on1eG8kiEhd0+fhSDzsPAFESxzBBvdyEgyryXffw= -github.com/containerd/containerd v1.3.2/go.mod h1:bC6axHOhabU15QhwfG7w5PipXdVtMXFTttgp+kVtyUA= -github.com/containerd/containerd v1.3.4 h1:3o0smo5SKY7H6AJCmJhsnCjR2/V2T8VmiHt7seN2/kI= -github.com/containerd/containerd v1.3.4/go.mod h1:bC6axHOhabU15QhwfG7w5PipXdVtMXFTttgp+kVtyUA= -github.com/containerd/continuity v0.0.0-20190426062206-aaeac12a7ffc/go.mod h1:GL3xCUCBDV3CZiTSEKksMWbLE66hEyuu9qyDOOqM47Y= -github.com/containerd/continuity v0.0.0-20200413184840-d3ef23f19fbb h1:nXPkFq8X1a9ycY3GYQpFNxHh3j2JgY7zDZfq2EXMIzk= -github.com/containerd/continuity v0.0.0-20200413184840-d3ef23f19fbb/go.mod h1:Dq467ZllaHgAtVp4p1xUQWBrFXR9s/wyoTpG8zOJGkY= -github.com/containerd/fifo v0.0.0-20190226154929-a9fb20d87448/go.mod h1:ODA38xgv3Kuk8dQz2ZQXpnv/UZZUHUCL7pnLehbXgQI= -github.com/containerd/go-runc v0.0.0-20180907222934-5a6d9f37cfa3/go.mod h1:IV7qH3hrUgRmyYrtgEeGWJfWbgcHL9CSRruz2Vqcph0= -github.com/containerd/ttrpc v0.0.0-20190828154514-0e0f228740de/go.mod h1:PvCDdDGpgqzQIzDW1TphrGLssLDZp2GuS+X5DkEJB8o= -github.com/containerd/typeurl v0.0.0-20180627222232-a93fcdb778cd/go.mod h1:Cm3kwCdlkCfMSHURc+r6fwoGH6/F1hH3S4sg0rLFWPc= -github.com/coreos/bbolt v1.3.2 h1:wZwiHHUieZCquLkDL0B8UhzreNWsPHooDAG3q34zk0s= github.com/coreos/bbolt v1.3.2/go.mod h1:iRUV2dpdMOn7Bo10OQBFzIJO9kkE559Wcmn+qkEiiKk= github.com/coreos/etcd v3.3.10+incompatible/go.mod h1:uF7uidLiAD3TWHmW31ZFd/JWoc32PjwdhPthX9715RE= -github.com/coreos/etcd v3.3.13+incompatible h1:8F3hqu9fGYLBifCmRCJsicFqDx/D68Rt3q1JMazcgBQ= github.com/coreos/etcd v3.3.13+incompatible/go.mod h1:uF7uidLiAD3TWHmW31ZFd/JWoc32PjwdhPthX9715RE= github.com/coreos/go-etcd v2.0.0+incompatible/go.mod h1:Jez6KQU2B/sWsbdaef3ED8NzMklzPG4d5KIOhIy30Tk= github.com/coreos/go-oidc v2.1.0+incompatible/go.mod h1:CgnwVTmzoESiwO9qyAFEMiHoZ1nMCKZlZ9V6mm3/LKc= @@ -150,22 +154,18 @@ github.com/coreos/go-semver v0.2.0/go.mod h1:nnelYz7RCh+5ahJtPPxZlU+153eP4D4r3Ee github.com/coreos/go-semver v0.3.0 h1:wkHLiw0WNATZnSG7epLsujiMCgPAc9xhjJ4tgnAxmfM= github.com/coreos/go-semver v0.3.0/go.mod h1:nnelYz7RCh+5ahJtPPxZlU+153eP4D4r3EedlOD2RNk= github.com/coreos/go-systemd v0.0.0-20180511133405-39ca1b05acc7/go.mod h1:F5haX7vjVVG0kc13fIWeqUViNPyEJxv/OmvnBo0Yme4= +github.com/coreos/go-systemd v0.0.0-20190321100706-95778dfbb74e h1:Wf6HqHfScWJN9/ZjdUKyjop4mf3Qdd+1TvvltAvM3m8= github.com/coreos/go-systemd v0.0.0-20190321100706-95778dfbb74e/go.mod h1:F5haX7vjVVG0kc13fIWeqUViNPyEJxv/OmvnBo0Yme4= -github.com/coreos/go-systemd v0.0.0-20190719114852-fd7a80b32e1f h1:JOrtw2xFKzlg+cbHpyrpLDmnN1HqhBfnX7WDiW7eG2c= -github.com/coreos/go-systemd v0.0.0-20190719114852-fd7a80b32e1f/go.mod h1:F5haX7vjVVG0kc13fIWeqUViNPyEJxv/OmvnBo0Yme4= +github.com/coreos/go-systemd/v22 v22.3.2 h1:D9/bQk5vlXQFZ6Kwuu6zaiXJ9oTPe68++AzAJc1DzSI= +github.com/coreos/go-systemd/v22 v22.3.2/go.mod h1:Y58oyj3AT4RCenI/lSvhwexgC+NSVTIJ3seZv2GcEnc= github.com/coreos/pkg v0.0.0-20160727233714-3ac0863d7acf/go.mod h1:E3G3o1h8I7cfcXa63jLwjI0eiQQMgzzUDFVpN/nH/eA= github.com/coreos/pkg v0.0.0-20180108230652-97fdf19511ea/go.mod h1:E3G3o1h8I7cfcXa63jLwjI0eiQQMgzzUDFVpN/nH/eA= -github.com/coreos/pkg v0.0.0-20180928190104-399ea9e2e55f h1:lBNOc5arjvs8E5mO2tbpBpLoyyu8B6e44T7hJy6potg= github.com/coreos/pkg v0.0.0-20180928190104-399ea9e2e55f/go.mod h1:E3G3o1h8I7cfcXa63jLwjI0eiQQMgzzUDFVpN/nH/eA= github.com/corpix/uarand v0.1.1 h1:RMr1TWc9F4n5jiPDzFHtmaUXLKLNUFK0SgCLo4BhX/U= github.com/corpix/uarand v0.1.1/go.mod h1:SFKZvkcRoLqVRFZ4u25xPmp6m9ktANfbpXZ7SJ0/FNU= github.com/cpuguy83/go-md2man v1.0.10/go.mod h1:SmD6nW6nTyfqj6ABTjUi3V3JVMnlJmwcJI5acqYI6dE= github.com/cpuguy83/go-md2man/v2 v2.0.0/go.mod h1:maD7wRr/U5Z6m/iR4s+kqSMx2CaBsrgA7czyZG/E6dU= github.com/creack/pty v1.1.7/go.mod h1:lj5s0c3V2DBrqTV7llrYr5NG6My20zk30Fl46Y7DoTY= -github.com/creack/pty v1.1.9 h1:uDmaGzcdjhF4i/plgjmEsriH11Y0o7RKapEf/LDaM3w= -github.com/creack/pty v1.1.9/go.mod h1:oKZEueFk5CKHvIhNR5MUki03XCEU+Q6VDXinZuGJ33E= -github.com/cyberdelia/go-metrics-graphite v0.0.0-20161219230853-39f87cc3b432 h1:M5QgkYacWj0Xs8MhpIK/5uwU02icXpEoSo9sM2aRCps= -github.com/cyberdelia/go-metrics-graphite v0.0.0-20161219230853-39f87cc3b432/go.mod h1:xwIwAxMvYnVrGJPe2FKx5prTrnAjGOD8zvDOnxnrrkM= github.com/daaku/go.zipexe v1.0.0 h1:VSOgZtH418pH9L16hC/JrgSNJbbAL26pj7lmD1+CGdY= github.com/daaku/go.zipexe v1.0.0/go.mod h1:z8IiR6TsVLEYKwXAoE/I+8ys/sDkgTzSL0CLnGVd57E= github.com/dave/jennifer v1.4.1 h1:XyqG6cn5RQsTj3qlWQTKlRGAyrTcsk1kUmWdZBzRjDw= @@ -177,15 +177,8 @@ github.com/davecgh/go-spew v1.1.1/go.mod h1:J7Y8YcW2NihsgmVo/mv3lAwl/skON4iLHjSs github.com/dgrijalva/jwt-go v3.2.0+incompatible h1:7qlOGliEKZXTDg6OTjfoBKDXWrumCAMpl/TFQ4/5kLM= github.com/dgrijalva/jwt-go v3.2.0+incompatible/go.mod h1:E3ru+11k8xSBh+hMPgOLZmtrrCbhqsmaPHjLKYnJCaQ= github.com/dgryski/go-sip13 v0.0.0-20181026042036-e10d5fee7954/go.mod h1:vAd38F8PWV+bWy6jNmig1y/TA+kYO4g3RSRF0IAv0no= -github.com/docker/distribution v2.7.1+incompatible h1:a5mlkVzth6W5A4fOsS3D2EO5BUmsJpcB+cRlLU7cSug= -github.com/docker/distribution v2.7.1+incompatible/go.mod h1:J2gT2udsDAN96Uj4KfcMRqY0/ypR+oyYUYmja8H+y+w= github.com/docker/docker v0.7.3-0.20190327010347-be7ac8be2ae0/go.mod h1:eEKB0N0r5NX/I1kEveEz05bcu8tLC/8azJZsviup8Sk= -github.com/docker/docker v17.12.0-ce-rc1.0.20200505174321-1655290016ac+incompatible h1:ZxJX4ZSNg1LORBsStUojbrLfkrE3Ut122XhzyZnN110= -github.com/docker/docker v17.12.0-ce-rc1.0.20200505174321-1655290016ac+incompatible/go.mod h1:eEKB0N0r5NX/I1kEveEz05bcu8tLC/8azJZsviup8Sk= -github.com/docker/go-connections v0.4.0 h1:El9xVISelRB7BuFusrZozjnkIM5YnzCViNKohAFqRJQ= -github.com/docker/go-connections v0.4.0/go.mod h1:Gbd7IOopHjR8Iph03tsViu4nIes5XhDvyHbTtUxmeec= github.com/docker/go-units v0.3.3/go.mod h1:fgPhTUdO+D/Jk86RDLlptpiXQzgHJF7gydDDbaIK4Dk= -github.com/docker/go-units v0.4.0 h1:3uh0PgVws3nIA0Q+MwDC8yjEPf9zjRfZZWXZYDct3Tw= github.com/docker/go-units v0.4.0/go.mod h1:fgPhTUdO+D/Jk86RDLlptpiXQzgHJF7gydDDbaIK4Dk= github.com/docker/spdystream v0.0.0-20160310174837-449fdfce4d96/go.mod h1:Qh8CwZgvJUkLughtfhJv5dyTYa91l1fOUCrgjqmcifM= github.com/dustin/go-humanize v0.0.0-20171111073723-bb3d318650d4/go.mod h1:HtrtbFcZ19U5GC7JDqmcUSB87Iq5E25KnS6fMYU6eOk= @@ -193,8 +186,7 @@ github.com/dustin/go-humanize v1.0.0/go.mod h1:HtrtbFcZ19U5GC7JDqmcUSB87Iq5E25Kn github.com/elazarl/goproxy v0.0.0-20170405201442-c4fc26588b6e/go.mod h1:/Zj4wYkgs4iZTTu3o/KG3Itv/qCCa8VVMlb3i9OVuzc= github.com/emicklei/go-restful v0.0.0-20170410110728-ff4f55a20633/go.mod h1:otzb+WCGbkyDHkqmQmT5YD2WR4BBwUdeQoFo8l/7tVs= github.com/emicklei/go-restful v2.9.5+incompatible/go.mod h1:otzb+WCGbkyDHkqmQmT5YD2WR4BBwUdeQoFo8l/7tVs= -github.com/envoyproxy/go-control-plane v0.9.0/go.mod h1:YTl/9mNaCwkRvm6d1a2C3ymFceY/DCBVvsKhRF0iEA4= -github.com/envoyproxy/go-control-plane v0.9.4/go.mod h1:6rpuAdCZL397s3pYoYcLgu1mIlRU8Am5FuJP05cCM98= +github.com/envoyproxy/go-control-plane v0.9.9-0.20210217033140-668b12f5399d/go.mod h1:cXg6YxExXjJnVBQHBLXeUAgxn2UodCpnH306RInaBQk= github.com/envoyproxy/protoc-gen-validate v0.1.0/go.mod h1:iSmxcyjqTsJpI2R4NaDN7+kN2VEUnK/pcBlmesArF7c= github.com/evanphx/json-patch v4.2.0+incompatible/go.mod h1:50XU6AFN0ol/bzJsmQLiYLvXMP4fmwYFNcr97nuDLSk= github.com/evanphx/json-patch v4.5.0+incompatible h1:ouOWdg56aJriqS0huScTkVXPC5IcNrDCXZ6OoTAWu7M= @@ -207,18 +199,19 @@ github.com/felixge/httpsnoop v1.0.1/go.mod h1:m8KPJKqk1gH5J9DgRY2ASl2lWCfGKXixSw github.com/fsnotify/fsnotify v1.4.7/go.mod h1:jwhsz4b93w/PPRr/qN1Yymfu8t87LnFCMoQvtojpjFo= github.com/fsnotify/fsnotify v1.4.9 h1:hsms1Qyu0jgnwNXIxa+/V/PDsU6CfLf6CNO8H7IWoS4= github.com/fsnotify/fsnotify v1.4.9/go.mod h1:znqG4EE+3YCdAaPaxE2ZRY/06pZUdp0tY4IgpuI1SZQ= -github.com/fsouza/go-dockerclient v1.6.6 h1:9e3xkBrVkPb81gzYq23i7iDUEd6sx2ooeJA/gnYU6R4= -github.com/fsouza/go-dockerclient v1.6.6/go.mod h1:3/oRIWoe7uT6bwtAayj/EmJmepBjeL4pYvt7ZxC7Rnk= github.com/ghodss/yaml v0.0.0-20150909031657-73d445a93680/go.mod h1:4dBDuWmgqj2HViK6kFavaiC9ZROes6MMH2rRYeMEF04= -github.com/ghodss/yaml v1.0.0 h1:wQHKEahhL6wmXdzwWG11gIVCkOv05bNOh+Rxn0yngAk= github.com/ghodss/yaml v1.0.0/go.mod h1:4dBDuWmgqj2HViK6kFavaiC9ZROes6MMH2rRYeMEF04= github.com/globalsign/mgo v0.0.0-20180905125535-1ca0a4f7cbcb/go.mod h1:xkRDCp4j0OGD1HRkm4kmhM+pmpv3AKq5SU7GMg4oO/Q= github.com/globalsign/mgo v0.0.0-20181015135952-eeefdecb41b8/go.mod h1:xkRDCp4j0OGD1HRkm4kmhM+pmpv3AKq5SU7GMg4oO/Q= github.com/go-gl/glfw v0.0.0-20190409004039-e6da0acd62b1/go.mod h1:vR7hzQXu2zJy9AVAgeJqvqgH9Q5CA+iKCZ2gyEVpxRU= +github.com/go-gl/glfw/v3.3/glfw v0.0.0-20191125211704-12ad95a8df72/go.mod h1:tQ2UAYgL5IevRw8kRxooKSPJfGvJ9fJQFa0TUsXzTg8= +github.com/go-gl/glfw/v3.3/glfw v0.0.0-20200222043503-6f7a984d4dc4/go.mod h1:tQ2UAYgL5IevRw8kRxooKSPJfGvJ9fJQFa0TUsXzTg8= github.com/go-kit/kit v0.8.0/go.mod h1:xBxKIO96dXMWWy0MnWVtmwkA9/13aqxPnvrjFYMA2as= github.com/go-kit/kit v0.9.0/go.mod h1:xBxKIO96dXMWWy0MnWVtmwkA9/13aqxPnvrjFYMA2as= +github.com/go-kit/log v0.1.0/go.mod h1:zbhenjAZHb184qTLMA9ZjW7ThYL0H2mk7Q6pNt4vbaY= github.com/go-logfmt/logfmt v0.3.0/go.mod h1:Qt1PoO58o5twSAckw1HlFXLmHsOX5/0LbT9GBnD5lWE= github.com/go-logfmt/logfmt v0.4.0/go.mod h1:3RMwSq7FuexP4Kalkev3ejPJsZTpXXBr9+V4qmtdjCk= +github.com/go-logfmt/logfmt v0.5.0/go.mod h1:wCYkCAKZfumFQihp8CzCvQ3paCTfi41vtzG1KdI/P7A= github.com/go-logr/logr v0.1.0/go.mod h1:ixOQHD9gLJUVQQ2ZOR7zLEifBX6tGkNJF4QyIY7sIas= github.com/go-martini/martini v0.0.0-20170121215854-22fa46961aab h1:xveKWz2iaueeTaUgdetzel+U7exyigDYBryyVfV/rZk= github.com/go-martini/martini v0.0.0-20170121215854-22fa46961aab/go.mod h1:/P9AEU963A2AYjv4d1V5eVL1CQbEJq6aCNHDDjibzu8= @@ -269,28 +262,47 @@ github.com/go-sql-driver/mysql v1.4.0/go.mod h1:zAC/RDZ24gD3HViQzih4MyKcchzm+sOG github.com/go-sql-driver/mysql v1.5.1-0.20210202043019-fe2230a8b20c h1:yUT3Ygm3yXBD2qLPxYRDBcnEz0MHgQ4TJ/87C/wKnWA= github.com/go-sql-driver/mysql v1.5.1-0.20210202043019-fe2230a8b20c/go.mod h1:DCzpHaOWr8IXmIStZouvnhqoel9Qv2LBy8hT2VhHyBg= github.com/go-stack/stack v1.8.0/go.mod h1:v0f6uXyyMGvRgIKkXu+yp6POWl0qKG85gN/melR3HDY= -github.com/godbus/dbus v0.0.0-20190422162347-ade71ed3457e/go.mod h1:bBOAhwG1umN6/6ZUMtDFBMQR8jRg9O75tm9K00oMsK4= +github.com/godbus/dbus/v5 v5.0.4/go.mod h1:xhWf0FNVPg57R7Z0UbKHbJfkEywrmjJnf7w5xrFpKfA= github.com/gogo/protobuf v1.1.1/go.mod h1:r8qH/GZQm5c6nD/R0oafs1akxWv10x8SbQlK7atdtwQ= github.com/gogo/protobuf v1.2.1/go.mod h1:hp+jE20tsWTFYpLwKvXlhS1hjn+gTNwPg2I6zVXpSg4= github.com/gogo/protobuf v1.2.2-0.20190723190241-65acae22fc9d/go.mod h1:SlYgWuQ5SjCEi6WLHjHCa1yvBfUnHcTbrrZtXPKa29o= -github.com/gogo/protobuf v1.3.1 h1:DqDEcV5aeaTmdFBePNpYsp3FlcVH/2ISVVM9Qf8PSls= -github.com/gogo/protobuf v1.3.1/go.mod h1:SlYgWuQ5SjCEi6WLHjHCa1yvBfUnHcTbrrZtXPKa29o= +github.com/gogo/protobuf v1.3.2 h1:Ov1cvc58UF3b5XjBnZv7+opcTcQFZebYjWzi34vdm4Q= +github.com/gogo/protobuf v1.3.2/go.mod h1:P1XiOD3dCwIKUDQYPy72D8LYyHL2YPYrpS2s69NZV8Q= github.com/golang/glog v0.0.0-20160126235308-23def4e6c14b h1:VKtxabqXZkF25pY9ekfRL6a582T4P37/31XEstQ5p58= github.com/golang/glog v0.0.0-20160126235308-23def4e6c14b/go.mod h1:SBH7ygxi8pfUlaOkMMuAQtPIUF8ecWP5IEl/CR7VP2Q= github.com/golang/groupcache v0.0.0-20160516000752-02826c3e7903/go.mod h1:cIg4eruTrX1D+g88fzRXU5OdNfaM+9IcxsU14FzY7Hc= github.com/golang/groupcache v0.0.0-20190129154638-5b532d6fd5ef/go.mod h1:cIg4eruTrX1D+g88fzRXU5OdNfaM+9IcxsU14FzY7Hc= -github.com/golang/groupcache v0.0.0-20190702054246-869f871628b6 h1:ZgQEtGgCBiWRM39fZuwSd1LwSqqSW0hOdXCYYDX0R3I= github.com/golang/groupcache v0.0.0-20190702054246-869f871628b6/go.mod h1:cIg4eruTrX1D+g88fzRXU5OdNfaM+9IcxsU14FzY7Hc= -github.com/golang/mock v1.1.1/go.mod h1:oTYuIxOrZwtPieC+H1uAHpcLFnEyAGVDL/k47Jfbm0A= +github.com/golang/groupcache v0.0.0-20191227052852-215e87163ea7/go.mod h1:cIg4eruTrX1D+g88fzRXU5OdNfaM+9IcxsU14FzY7Hc= +github.com/golang/groupcache v0.0.0-20200121045136-8c9f03a8e57e h1:1r7pUrabqp18hOBcwBwiTsbnFeTZHV9eER/QT5JVZxY= +github.com/golang/groupcache v0.0.0-20200121045136-8c9f03a8e57e/go.mod h1:cIg4eruTrX1D+g88fzRXU5OdNfaM+9IcxsU14FzY7Hc= github.com/golang/mock v1.2.0/go.mod h1:oTYuIxOrZwtPieC+H1uAHpcLFnEyAGVDL/k47Jfbm0A= -github.com/golang/mock v1.3.1 h1:qGJ6qTW+x6xX/my+8YUVl4WNpX9B7+/l2tRsHGZ7f2s= github.com/golang/mock v1.3.1/go.mod h1:sBzyDLLjw3U8JLTeZvSv8jJB+tU5PVekmnlKIyFUx0Y= +github.com/golang/mock v1.4.0/go.mod h1:UOMv5ysSaYNkG+OFQykRIcU/QvvxJf3p21QfJ2Bt3cw= +github.com/golang/mock v1.4.1/go.mod h1:UOMv5ysSaYNkG+OFQykRIcU/QvvxJf3p21QfJ2Bt3cw= +github.com/golang/mock v1.4.3/go.mod h1:UOMv5ysSaYNkG+OFQykRIcU/QvvxJf3p21QfJ2Bt3cw= +github.com/golang/mock v1.4.4/go.mod h1:l3mdAwkq5BuhzHwde/uurv3sEJeZMXNpwsxVWU71h+4= +github.com/golang/mock v1.5.0 h1:jlYHihg//f7RRwuPfptm04yp4s7O6Kw8EZiVYIGcH0g= +github.com/golang/mock v1.5.0/go.mod h1:CWnOUgYIOo4TcNZ0wHX3YZCqsaM1I1Jvs6v3mP3KVu8= github.com/golang/protobuf v0.0.0-20161109072736-4bd1920723d7/go.mod h1:6lQm79b+lXiMfvg/cZm0SGofjICqVBUtrP5yJMmIC1U= github.com/golang/protobuf v1.2.0/go.mod h1:6lQm79b+lXiMfvg/cZm0SGofjICqVBUtrP5yJMmIC1U= github.com/golang/protobuf v1.3.1/go.mod h1:6lQm79b+lXiMfvg/cZm0SGofjICqVBUtrP5yJMmIC1U= github.com/golang/protobuf v1.3.2/go.mod h1:6lQm79b+lXiMfvg/cZm0SGofjICqVBUtrP5yJMmIC1U= -github.com/golang/protobuf v1.3.3 h1:gyjaxf+svBWX08ZjK86iN9geUJF0H6gp2IRKX6Nf6/I= github.com/golang/protobuf v1.3.3/go.mod h1:vzj43D7+SQXF/4pzW/hwtAqwc6iTitCiVSaWz5lYuqw= +github.com/golang/protobuf v1.3.4/go.mod h1:vzj43D7+SQXF/4pzW/hwtAqwc6iTitCiVSaWz5lYuqw= +github.com/golang/protobuf v1.3.5/go.mod h1:6O5/vntMXwX2lRkT1hjjk0nAC1IDOTvTlVgjlRvqsdk= +github.com/golang/protobuf v1.4.0-rc.1/go.mod h1:ceaxUfeHdC40wWswd/P6IGgMaK3YpKi5j83Wpe3EHw8= +github.com/golang/protobuf v1.4.0-rc.1.0.20200221234624-67d41d38c208/go.mod h1:xKAWHe0F5eneWXFV3EuXVDTCmh+JuBKY0li0aMyXATA= +github.com/golang/protobuf v1.4.0-rc.2/go.mod h1:LlEzMj4AhA7rCAGe4KMBDvJI+AwstrUpVNzEA03Pprs= +github.com/golang/protobuf v1.4.0-rc.4.0.20200313231945-b860323f09d0/go.mod h1:WU3c8KckQ9AFe+yFwt9sWVRKCVIyN9cPHBJSNnbL67w= +github.com/golang/protobuf v1.4.0/go.mod h1:jodUvKwWbYaEsadDk5Fwe5c77LiNKVO9IDvqG2KuDX0= +github.com/golang/protobuf v1.4.1/go.mod h1:U8fpvMrcmy5pZrNK1lt4xCsGvpyWQ/VVv6QDs8UjoX8= +github.com/golang/protobuf v1.4.2/go.mod h1:oDoupMAO8OvCJWAcko0GGGIgR6R6ocIYbsSw735rRwI= +github.com/golang/protobuf v1.4.3/go.mod h1:oDoupMAO8OvCJWAcko0GGGIgR6R6ocIYbsSw735rRwI= +github.com/golang/protobuf v1.5.0/go.mod h1:FsONVRAS9T7sI+LIUmWTfcYkHO4aIWwzhcaSAoJOfIk= +github.com/golang/protobuf v1.5.1/go.mod h1:DopwsBzvsk0Fs44TXzsVbJyPhcCPeIwnvohx4u74HPM= +github.com/golang/protobuf v1.5.2 h1:ROPKBNFfQgOUMifHyP+KYbvpjbdoFNs+aK7DXlji0Tw= +github.com/golang/protobuf v1.5.2/go.mod h1:XVQd3VNwM+JqD3oG2Ue2ip4fOMUkwXdXDdiuN0vRsmY= github.com/golang/snappy v0.0.1 h1:Qgr9rKW7uDUkrbSmQeiDsGa8SjGyCOGtuasMWwvp2P4= github.com/golang/snappy v0.0.1/go.mod h1:/XxbfmMg8lxefKM7IXC3fBNl/7bRcc72aCRzEWrmP2Q= github.com/google/btree v0.0.0-20180813153112-4030bb1f1f0c/go.mod h1:lNA+9X1NB3Zf8V7Ke586lFgjr2dZNuvo3lPJSGZ5JPQ= @@ -300,8 +312,14 @@ github.com/google/go-cmp v0.2.0/go.mod h1:oXzfMopK8JAjlY9xF4vHSVASa0yLyX7SntLO5a github.com/google/go-cmp v0.3.0/go.mod h1:8QqcDgzrUqlUb/G2PQTWiueGozuR1884gddMywk6iLU= github.com/google/go-cmp v0.3.1/go.mod h1:8QqcDgzrUqlUb/G2PQTWiueGozuR1884gddMywk6iLU= github.com/google/go-cmp v0.4.0/go.mod h1:v8dTdLbMG2kIc/vJvl+f65V22dbkXbowE6jgT/gNBxE= -github.com/google/go-cmp v0.5.2 h1:X2ev0eStA3AbceY54o37/0PQ/UWqKEiiO2dKL5OPaFM= +github.com/google/go-cmp v0.4.1/go.mod h1:v8dTdLbMG2kIc/vJvl+f65V22dbkXbowE6jgT/gNBxE= +github.com/google/go-cmp v0.5.0/go.mod h1:v8dTdLbMG2kIc/vJvl+f65V22dbkXbowE6jgT/gNBxE= +github.com/google/go-cmp v0.5.1/go.mod h1:v8dTdLbMG2kIc/vJvl+f65V22dbkXbowE6jgT/gNBxE= github.com/google/go-cmp v0.5.2/go.mod h1:v8dTdLbMG2kIc/vJvl+f65V22dbkXbowE6jgT/gNBxE= +github.com/google/go-cmp v0.5.3/go.mod h1:v8dTdLbMG2kIc/vJvl+f65V22dbkXbowE6jgT/gNBxE= +github.com/google/go-cmp v0.5.4/go.mod h1:v8dTdLbMG2kIc/vJvl+f65V22dbkXbowE6jgT/gNBxE= +github.com/google/go-cmp v0.5.5 h1:Khx7svrCpmxxtHBq5j2mp/xVjsi8hQMfNLvJFAlrGgU= +github.com/google/go-cmp v0.5.5/go.mod h1:v8dTdLbMG2kIc/vJvl+f65V22dbkXbowE6jgT/gNBxE= github.com/google/go-github/v27 v27.0.4/go.mod h1:/0Gr8pJ55COkmv+S/yPKCczSkUPIM/LnFyubufRNIS0= github.com/google/go-querystring v1.0.0/go.mod h1:odCYkC5MyYFN7vkCjXpyrEuKhc/BUO6wN/zVPAxq5ck= github.com/google/gofuzz v0.0.0-20161122191042-44d81051d367/go.mod h1:HP5RmnzzSNb993RKQDq4+1A4ia9nllfqcQFTQJedwGI= @@ -309,14 +327,27 @@ github.com/google/gofuzz v1.0.0 h1:A8PeW59pxE9IoFRqBp37U+mSNaQoZ46F1f0f863XSXw= github.com/google/gofuzz v1.0.0/go.mod h1:dBl0BpW6vV/+mYPU4Po3pmUjxk6FQPldtuIdl/M65Eg= github.com/google/martian v2.1.0+incompatible h1:/CP5g8u/VJHijgedC/Legn3BAbAaWPgecwXBIDzw5no= github.com/google/martian v2.1.0+incompatible/go.mod h1:9I4somxYTbIHy5NJKHRl3wXiIaQGbYVAs8BPL6v8lEs= +github.com/google/martian/v3 v3.0.0/go.mod h1:y5Zk1BBys9G+gd6Jrk0W3cC1+ELVxBWuIGO+w/tUAp0= +github.com/google/martian/v3 v3.1.0 h1:wCKgOCHuUEVfsaQLpPSJb7VdYCdTVZQAuOdYm1yc/60= +github.com/google/martian/v3 v3.1.0/go.mod h1:y5Zk1BBys9G+gd6Jrk0W3cC1+ELVxBWuIGO+w/tUAp0= github.com/google/pprof v0.0.0-20181206194817-3ea8567a2e57/go.mod h1:zfwlbNMJ+OItoe0UupaVj+oy1omPYYDuagoSzA8v9mc= github.com/google/pprof v0.0.0-20190515194954-54271f7e092f/go.mod h1:zfwlbNMJ+OItoe0UupaVj+oy1omPYYDuagoSzA8v9mc= +github.com/google/pprof v0.0.0-20191218002539-d4f498aebedc/go.mod h1:ZgVRPoUq/hfqzAqh7sHMqb3I9Rq5C59dIz2SbBwJ4eM= +github.com/google/pprof v0.0.0-20200212024743-f11f1df84d12/go.mod h1:ZgVRPoUq/hfqzAqh7sHMqb3I9Rq5C59dIz2SbBwJ4eM= +github.com/google/pprof v0.0.0-20200229191704-1ebb73c60ed3/go.mod h1:ZgVRPoUq/hfqzAqh7sHMqb3I9Rq5C59dIz2SbBwJ4eM= +github.com/google/pprof v0.0.0-20200430221834-fc25d7d30c6d/go.mod h1:ZgVRPoUq/hfqzAqh7sHMqb3I9Rq5C59dIz2SbBwJ4eM= +github.com/google/pprof v0.0.0-20200708004538-1a94d8640e99/go.mod h1:ZgVRPoUq/hfqzAqh7sHMqb3I9Rq5C59dIz2SbBwJ4eM= +github.com/google/pprof v0.0.0-20201023163331-3e6fc7fc9c4c/go.mod h1:kpwsk12EmLew5upagYY7GY0pfYCcupk39gWOCRROcvE= +github.com/google/pprof v0.0.0-20201203190320-1bf35d6f28c2/go.mod h1:kpwsk12EmLew5upagYY7GY0pfYCcupk39gWOCRROcvE= +github.com/google/pprof v0.0.0-20210122040257-d980be63207e/go.mod h1:kpwsk12EmLew5upagYY7GY0pfYCcupk39gWOCRROcvE= +github.com/google/pprof v0.0.0-20210226084205-cbba55b83ad5/go.mod h1:kpwsk12EmLew5upagYY7GY0pfYCcupk39gWOCRROcvE= github.com/google/renameio v0.1.0/go.mod h1:KWCgfxg9yswjAJkECMjeO8J8rahYeXnNhOm40UhjYkI= github.com/google/shlex v0.0.0-20191202100458-e7afc7fbc510 h1:El6M4kTTCOh6aBiKaUGG7oYTSPP8MxqL4YI3kZKwcP4= github.com/google/shlex v0.0.0-20191202100458-e7afc7fbc510/go.mod h1:pupxD2MaaD3pAXIBCelhxNneeOaAeabZDe5s4K6zSpQ= github.com/google/uuid v1.0.0/go.mod h1:TIyPZe4MgqvfeYDBFedMoGGpEw/LqOeaOT+nhxU+yHo= -github.com/google/uuid v1.1.1 h1:Gkbcsh/GbpXz7lPftLA3P6TYMwjCLYm83jiFQZF/3gY= github.com/google/uuid v1.1.1/go.mod h1:TIyPZe4MgqvfeYDBFedMoGGpEw/LqOeaOT+nhxU+yHo= +github.com/google/uuid v1.1.2 h1:EVhdT+1Kseyi1/pUmXKaFxYsDNy9RQYkMWRH68J/W7Y= +github.com/google/uuid v1.1.2/go.mod h1:TIyPZe4MgqvfeYDBFedMoGGpEw/LqOeaOT+nhxU+yHo= github.com/googleapis/gax-go/v2 v2.0.4/go.mod h1:0Wqv26UfaUD9n4G6kQubkQ+KchISgw+vpHVxEJEs9eg= github.com/googleapis/gax-go/v2 v2.0.5 h1:sjZBwGj9Jlw33ImPtvFviGYvseOtDM7hkSKB7+Tv3SM= github.com/googleapis/gax-go/v2 v2.0.5/go.mod h1:DWXyrwAJ9X0FpwwEdw+IPEYBICEFu5mhpdKc/us6bOk= @@ -343,8 +374,8 @@ github.com/grpc-ecosystem/go-grpc-middleware v1.1.0/go.mod h1:f5nM7jw/oeRSadq3xC github.com/grpc-ecosystem/go-grpc-prometheus v1.2.0 h1:Ovs26xHkKqVztRpIrF/92BcuyuQ/YW4NSIpoGtfXNho= github.com/grpc-ecosystem/go-grpc-prometheus v1.2.0/go.mod h1:8NvIoxWQoOIhqOTXgfV/d3M/q6VIi02HzZEHgUlZvzk= github.com/grpc-ecosystem/grpc-gateway v1.9.0/go.mod h1:vNeuVxBJEsws4ogUvrchl83t/GYV9WGTSLVdBhOQFDY= -github.com/grpc-ecosystem/grpc-gateway v1.9.5 h1:UImYN5qQ8tuGpGE16ZmjvcTtTw24zw1QAp/SlnNrZhI= github.com/grpc-ecosystem/grpc-gateway v1.9.5/go.mod h1:vNeuVxBJEsws4ogUvrchl83t/GYV9WGTSLVdBhOQFDY= +github.com/grpc-ecosystem/grpc-gateway v1.16.0/go.mod h1:BDjrQk3hbvj6Nolgz8mAMFbcEtjT1g+wF4CSlocrBnw= github.com/hashicorp/consul/api v1.1.0/go.mod h1:VmuI/Lkw1nC05EYQWNKwWGbkg+FbDBtguAZLlVdkD9Q= github.com/hashicorp/consul/api v1.5.0 h1:Yo2bneoGy68A7aNwmuETFnPhjyBEm7n3vzRacEVMjvI= github.com/hashicorp/consul/api v1.5.0/go.mod h1:LqwrLNW876eYSuUOo4ZLHBcdKc038txr/IMfbLPATa4= @@ -399,6 +430,8 @@ github.com/howeyc/gopass v0.0.0-20190910152052-7cb4b85ec19c h1:aY2hhxLhjEAbfXOx2 github.com/howeyc/gopass v0.0.0-20190910152052-7cb4b85ec19c/go.mod h1:lADxMC39cJJqL93Duh1xhAs4I2Zs8mKS89XWXFGp9cs= github.com/hpcloud/tail v1.0.0 h1:nfCOvKYfkgYP8hkirhJocXT2+zOD8yUNjXaWfTlyFKI= github.com/hpcloud/tail v1.0.0/go.mod h1:ab1qPbhIpdTxEkNHXyeSf5vhxWSCs/tWer42PpOxQnU= +github.com/ianlancetaylor/demangle v0.0.0-20181102032728-5e5cf60278f6/go.mod h1:aSSvb/t6k1mPoxDqO4vJh6VOCGPwU4O0C2/Eqndh1Sc= +github.com/ianlancetaylor/demangle v0.0.0-20200824232613-28f6c0f3b639/go.mod h1:aSSvb/t6k1mPoxDqO4vJh6VOCGPwU4O0C2/Eqndh1Sc= github.com/icrowley/fake v0.0.0-20180203215853-4178557ae428 h1:Mo9W14pwbO9VfRe+ygqZ8dFbPpoIK1HFrG/zjTuQ+nc= github.com/icrowley/fake v0.0.0-20180203215853-4178557ae428/go.mod h1:uhpZMVGznybq1itEKXj6RYw9I71qK4kH+OGMjRC4KEo= github.com/imdario/mergo v0.3.5/go.mod h1:2EnlNZ0deacrJVfApfmtdGgDfMuh/nq6Ok1EcJh5FfA= @@ -406,27 +439,32 @@ github.com/imdario/mergo v0.3.6 h1:xTNEAn+kxVO7dTZGu0CegyqKZmoWFI0rF8UxjlB2d28= github.com/imdario/mergo v0.3.6/go.mod h1:2EnlNZ0deacrJVfApfmtdGgDfMuh/nq6Ok1EcJh5FfA= github.com/inconshreveable/mousetrap v1.0.0 h1:Z8tu5sraLXCXIcARxBp/8cbvlwVa7Z1NHg9XEKhtSvM= github.com/inconshreveable/mousetrap v1.0.0/go.mod h1:PxqpIevigyE2G7u3NXJIT2ANytuPF1OarO4DADm73n8= +github.com/jessevdk/go-flags v1.4.0 h1:4IU2WS7AumrZ/40jfhf4QVDMsQwqA7VEHozFRrGARJA= github.com/jessevdk/go-flags v1.4.0/go.mod h1:4FA24M0QyGHXBuZZK/XkWh8h0e1EYbRYJSGM75WSRxI= github.com/jmespath/go-jmespath v0.0.0-20180206201540-c2b33e8439af h1:pmfjZENx5imkbgOkpRUYLnmbU7UEFbjtDA2hxJ1ichM= github.com/jmespath/go-jmespath v0.0.0-20180206201540-c2b33e8439af/go.mod h1:Nht3zPeWKUH0NzdCt2Blrr5ys8VGpn0CEB0cQHVjt7k= github.com/jmoiron/sqlx v1.2.0 h1:41Ip0zITnmWNR/vHV+S4m+VoUivnWY5E4OJfLZjCJMA= github.com/jmoiron/sqlx v1.2.0/go.mod h1:1FEQNm3xlJgrMD+FBdI9+xvCksHtbpVBBw5dYhBSsks= -github.com/jonboulle/clockwork v0.1.0 h1:VKV+ZcuP6l3yW9doeqz6ziZGgcynBVQO+obU0+0hcPo= github.com/jonboulle/clockwork v0.1.0/go.mod h1:Ii8DK3G1RaLaWxj9trq07+26W01tbo22gdxWY5EU2bo= +github.com/jpillora/backoff v1.0.0/go.mod h1:J/6gKK9jxlEcS3zixgDgUAsiuZ7yrSoa/FX5e0EB2j4= github.com/json-iterator/go v0.0.0-20180612202835-f2b4162afba3/go.mod h1:+SdeFBvtyEkXs7REEP0seUULqWtbJapLOCVDaaPEHmU= github.com/json-iterator/go v1.1.6/go.mod h1:+SdeFBvtyEkXs7REEP0seUULqWtbJapLOCVDaaPEHmU= github.com/json-iterator/go v1.1.7/go.mod h1:KdQUCv79m/52Kvf8AW2vK1V8akMuk1QjK/uOdHXbAo4= github.com/json-iterator/go v1.1.8/go.mod h1:KdQUCv79m/52Kvf8AW2vK1V8akMuk1QjK/uOdHXbAo4= -github.com/json-iterator/go v1.1.9 h1:9yzud/Ht36ygwatGx56VwCZtlI/2AD15T1X2sjSuGns= -github.com/json-iterator/go v1.1.9/go.mod h1:KdQUCv79m/52Kvf8AW2vK1V8akMuk1QjK/uOdHXbAo4= -github.com/jstemmer/go-junit-report v0.0.0-20190106144839-af01ea7f8024 h1:rBMNdlhTLzJjJSDIjNEXX1Pz3Hmwmz91v+zycvx9PJc= +github.com/json-iterator/go v1.1.10/go.mod h1:KdQUCv79m/52Kvf8AW2vK1V8akMuk1QjK/uOdHXbAo4= +github.com/json-iterator/go v1.1.11 h1:uVUAXhF2To8cbw/3xN3pxj6kk7TYKs98NIrTqPlMWAQ= +github.com/json-iterator/go v1.1.11/go.mod h1:KdQUCv79m/52Kvf8AW2vK1V8akMuk1QjK/uOdHXbAo4= github.com/jstemmer/go-junit-report v0.0.0-20190106144839-af01ea7f8024/go.mod h1:6v2b51hI/fHJwM22ozAgKL4VKDeJcHhJFhtBdhmNjmU= +github.com/jstemmer/go-junit-report v0.9.1 h1:6QPYqodiu3GuPL+7mfx+NwDdp2eTkp9IfEUpgAwUN0o= +github.com/jstemmer/go-junit-report v0.9.1/go.mod h1:Brl9GWCQeLvo8nXZwPNNblvFj/XSXhF0NWZEnDohbsk= github.com/jtolds/gls v4.2.1+incompatible/go.mod h1:QJZ7F/aHp+rZTRtaJ1ow/lLfFfVYBRgL+9YlvaHOwJU= github.com/jtolds/gls v4.20.0+incompatible h1:xdiiI2gbIgH/gLH7ADydsJ1uDOEzR8yvV7C0MuV77Wo= github.com/jtolds/gls v4.20.0+incompatible/go.mod h1:QJZ7F/aHp+rZTRtaJ1ow/lLfFfVYBRgL+9YlvaHOwJU= github.com/julienschmidt/httprouter v1.2.0/go.mod h1:SYymIcj16QtmaHHD7aYtjjsJG7VTCxuUUipMqKk8s4w= +github.com/julienschmidt/httprouter v1.3.0/go.mod h1:JR6WtHb+2LUe8TCKY3cZOxFyyO8IZAc4RVcycCCAKdM= github.com/kisielk/errcheck v1.1.0/go.mod h1:EZBBE59ingxPouuu3KfxchcWSUPOHkagtvWXihfKN4Q= github.com/kisielk/errcheck v1.2.0/go.mod h1:/BMXB+zMLi60iA8Vv6Ksmxu/1UDYcXs4uQLJ+jE2L00= +github.com/kisielk/errcheck v1.5.0/go.mod h1:pFxgyoBC7bSaBwPgfKdkLd5X25qrDl4LWUI2bnpBCr8= github.com/kisielk/gotool v1.0.0/go.mod h1:XhKaO+MFFWcvkIS/tQcRk01m1F5IRFswLeQ+oQHNcck= github.com/klauspost/compress v1.4.1 h1:8VMb5+0wMgdBykOV96DwNwKFQ+WTI4pzYURP99CcB9E= github.com/klauspost/compress v1.4.1/go.mod h1:RyIbtBH6LamlWaDj8nUwkbUhJ87Yi3uG0guNDohfE1A= @@ -434,8 +472,8 @@ github.com/klauspost/cpuid v1.2.0 h1:NMpwD2G9JSFOE1/TJjGSo5zG7Yb2bTe7eq1jH+irmeE github.com/klauspost/cpuid v1.2.0/go.mod h1:Pj4uuM528wm8OyEC2QMXAi2YiTZ96dNQPGgoMS4s3ek= github.com/klauspost/pgzip v1.2.4 h1:TQ7CNpYKovDOmqzRHKxJh0BeaBI7UdQZYc6p7pMQh1A= github.com/klauspost/pgzip v1.2.4/go.mod h1:Ch1tH69qFZu15pkjo5kYi6mth2Zzwzt50oCQKQE9RUs= -github.com/konsorten/go-windows-terminal-sequences v1.0.1 h1:mweAR1A6xJ3oS2pRaGiHgQ4OO8tzTaLawm8vnODuwDk= github.com/konsorten/go-windows-terminal-sequences v1.0.1/go.mod h1:T0+1ngSBFLxvqU3pZ+m/2kptfBszLMUkC4ZK/EgS/cQ= +github.com/konsorten/go-windows-terminal-sequences v1.0.3/go.mod h1:T0+1ngSBFLxvqU3pZ+m/2kptfBszLMUkC4ZK/EgS/cQ= github.com/kr/logfmt v0.0.0-20140226030751-b84e30acd515/go.mod h1:+0opPa2QZZtGFBFZlji/RkVcI2GknAs/DXo4wKdlNEc= github.com/kr/pretty v0.1.0/go.mod h1:dAy3ld7l9f0ibDNOQOHHMYYIIbhfbHSm3C4ZsoJORNo= github.com/kr/pretty v0.2.0 h1:s5hAObm+yFO5uHYt5dYjxi2rXrsnmRpJx4OYvIWUaQs= @@ -506,12 +544,6 @@ github.com/mitchellh/mapstructure v0.0.0-20160808181253-ca63d7c062ee/go.mod h1:F github.com/mitchellh/mapstructure v1.1.2/go.mod h1:FVVH3fgwuzCH5S8UJGiWEs2h04kUh9fWfEaFds41c1Y= github.com/mitchellh/mapstructure v1.2.3 h1:f/MjBEBDLttYCGfRaKBbKSRVF5aV2O6fnBpzknuE3jU= github.com/mitchellh/mapstructure v1.2.3/go.mod h1:bFUtVrKA4DC2yAKiSyO/QUcy7e+RRV2QTWOzhPopBRo= -github.com/moby/sys/mount v0.1.0 h1:Ytx78EatgFKtrqZ0BvJ0UtJE472ZvawVmil6pIfuCCU= -github.com/moby/sys/mount v0.1.0/go.mod h1:FVQFLDRWwyBjDTBNQXDlWnSFREqOo3OKX9aqhmeoo74= -github.com/moby/sys/mountinfo v0.1.0 h1:r8vMRbMAFEAfiNptYVokP+nfxPJzvRuia5e2vzXtENo= -github.com/moby/sys/mountinfo v0.1.0/go.mod h1:w2t2Avltqx8vE7gX5l+QiBKxODu2TX0+Syr3h52Tw4o= -github.com/moby/term v0.0.0-20200429084858-129dac9f73f6 h1:3Y9aosU6S5Bo8GYH0s+t1ej4m30GuUKvQ3c9ZLqdL28= -github.com/moby/term v0.0.0-20200429084858-129dac9f73f6/go.mod h1:or9wGItza1sRcM4Wd3dIv8DsFHYQuFsMHEdxUIlUxms= github.com/modern-go/concurrent v0.0.0-20180228061459-e0a39a4cb421/go.mod h1:6dJC0mAP4ikYIbvyc7fijjWJddQyLn8Ig3JB5CqoB9Q= github.com/modern-go/concurrent v0.0.0-20180306012644-bacd9c7ef1dd h1:TRLaZ9cD/w8PVh93nsPXa1VrQ6jlwL5oN8l14QlcNfg= github.com/modern-go/concurrent v0.0.0-20180306012644-bacd9c7ef1dd/go.mod h1:6dJC0mAP4ikYIbvyc7fijjWJddQyLn8Ig3JB5CqoB9Q= @@ -521,13 +553,13 @@ github.com/modern-go/reflect2 v1.0.1 h1:9f412s+6RmYXLWZSEzVVgPGK7C2PphHj5RJrvfx9 github.com/modern-go/reflect2 v1.0.1/go.mod h1:bx2lNnkwVCuqBIxFjflWJWanXIb3RllmbCylyMrvgv0= github.com/montanaflynn/stats v0.6.3 h1:F8446DrvIF5V5smZfZ8K9nrmmix0AFgevPdLruGOmzk= github.com/montanaflynn/stats v0.6.3/go.mod h1:wL8QJuTMNUDYhXwkmfOly8iTdp5TEcJFWZD2D7SIkUc= -github.com/morikuni/aec v1.0.0 h1:nP9CBfwrvYnBRgY6qfDQkygYDmYwOilePFkwzv4dU8A= -github.com/morikuni/aec v1.0.0/go.mod h1:BbKIizmSmc5MMPqRYbxO4ZU0S0+P200+tUnFx7PXmsc= github.com/munnerz/goautoneg v0.0.0-20120707110453-a547fc61f48d/go.mod h1:+n7T8mK8HuQTcFwEeznm/DIxMOiR9yIdICNftLE1DvQ= github.com/munnerz/goautoneg v0.0.0-20191010083416-a7dc8b61c822/go.mod h1:+n7T8mK8HuQTcFwEeznm/DIxMOiR9yIdICNftLE1DvQ= github.com/mwitkow/go-conntrack v0.0.0-20161129095857-cc309e4a2223/go.mod h1:qRWi+5nqEBWmkhHvq77mSJWrCKwh8bxhgT7d/eI7P4U= +github.com/mwitkow/go-conntrack v0.0.0-20190716064945-2f068394615f/go.mod h1:qRWi+5nqEBWmkhHvq77mSJWrCKwh8bxhgT7d/eI7P4U= github.com/mxk/go-flowrate v0.0.0-20140419014527-cca7078d478f/go.mod h1:ZdcZmHo+o7JKHSa8/e818NopupXU1YMK5fe1lsApnBw= github.com/ngdinhtoan/glide-cleanup v0.2.0/go.mod h1:UQzsmiDOb8YV3nOsCxK/c9zPpCZVNoHScRE3EO9pVMM= +github.com/nkovacs/streamquote v0.0.0-20170412213628-49af9bddb229 h1:E2B8qYyeSgv5MXpmzZXRNp8IAQ4vjxIjhpAf5hv/tAg= github.com/nkovacs/streamquote v0.0.0-20170412213628-49af9bddb229/go.mod h1:0aYXnNPJ8l7uZxf45rWW1a/uME32OF0rhiYGNQ2oF2E= github.com/nozzle/throttler v0.0.0-20180817012639-2ea982251481 h1:Up6+btDp321ZG5/zdSLo48H9Iaq0UQGthrhWC6pCxzE= github.com/nozzle/throttler v0.0.0-20180817012639-2ea982251481/go.mod h1:yKZQO8QE2bHlgozqWDiRVqTFlLQSj30K/6SAK8EeYFw= @@ -544,15 +576,6 @@ github.com/onsi/gomega v0.0.0-20170829124025-dcabb60a477c/go.mod h1:C1qb7wdrVGGV github.com/onsi/gomega v1.7.0/go.mod h1:ex+gbHU/CVuBBDIJjb2X0qEXbFg53c61hWP/1CpauHY= github.com/onsi/gomega v1.7.1 h1:K0jcRCwNQM3vFGh1ppMtDh/+7ApJrjldlX8fA0jDTLQ= github.com/onsi/gomega v1.7.1/go.mod h1:XdKZgCCFLUoM/7CFJVPcG8C1xQ1AJ0vpAezJrB7JYyY= -github.com/opencontainers/go-digest v0.0.0-20180430190053-c9281466c8b2/go.mod h1:cMLVZDEM3+U2I4VmLI6N8jQYUd2OVphdqWwCJHrFt2s= -github.com/opencontainers/go-digest v1.0.0-rc1 h1:WzifXhOVOEOuFYOJAW6aQqW0TooG2iki3E3Ii+WN7gQ= -github.com/opencontainers/go-digest v1.0.0-rc1/go.mod h1:cMLVZDEM3+U2I4VmLI6N8jQYUd2OVphdqWwCJHrFt2s= -github.com/opencontainers/image-spec v1.0.1 h1:JMemWkRwHx4Zj+fVxWoMCFm/8sYGGrUVojFA6h/TRcI= -github.com/opencontainers/image-spec v1.0.1/go.mod h1:BtxoFyWECRxE4U/7sNtV5W15zMzWCbyJoFRP3s7yZA0= -github.com/opencontainers/runc v0.0.0-20190115041553-12f6a991201f/go.mod h1:qT5XzbpPznkRYVz/mWwUaVBUv2rmF59PVA73FjuZG0U= -github.com/opencontainers/runc v0.1.1 h1:GlxAyO6x8rfZYN9Tt0Kti5a/cP41iuiO2yYT0IJGY8Y= -github.com/opencontainers/runc v0.1.1/go.mod h1:qT5XzbpPznkRYVz/mWwUaVBUv2rmF59PVA73FjuZG0U= -github.com/opencontainers/runtime-spec v0.1.2-0.20190507144316-5b71a03e2700/go.mod h1:jwyrGlmzljRJv/Fgzds9SsS/C5hL+LL3ko9hs6T5lQ0= github.com/opentracing-contrib/go-grpc v0.0.0-20180928155321-4b5a12d3ff02 h1:0R5mDLI66Qw13qN80TRz85zthQ2nf2+uDyiV23w6c3Q= github.com/opentracing-contrib/go-grpc v0.0.0-20180928155321-4b5a12d3ff02/go.mod h1:JNdpVEzCpXBgIiv4ds+TzhN1hrtxq6ClLrTlT9OQRSc= github.com/opentracing/opentracing-go v1.1.0 h1:pWlfV3Bxv7k65HYwkikxat0+s3pV4bsqf19k25Ur8rU= @@ -573,12 +596,15 @@ github.com/philhofer/fwd v1.0.0/go.mod h1:gk3iGcWd9+svBvR0sR+KPcfE+RNWozjowpeBVG github.com/pires/go-proxyproto v0.0.0-20191211124218-517ecdf5bb2b h1:JPLdtNmpXbWytipbGwYz7zXZzlQNASEiFw5aGAM75us= github.com/pires/go-proxyproto v0.0.0-20191211124218-517ecdf5bb2b/go.mod h1:Odh9VFOZJCf9G8cLW5o435Xf1J95Jw9Gw5rnCjcwzAY= github.com/pkg/errors v0.8.0/go.mod h1:bwawxfHBFNV+L2hUp1rHADufV3IMtnDRdf1r5NINEl0= -github.com/pkg/errors v0.8.1-0.20171018195549-f15c970de5b7/go.mod h1:bwawxfHBFNV+L2hUp1rHADufV3IMtnDRdf1r5NINEl0= github.com/pkg/errors v0.8.1/go.mod h1:bwawxfHBFNV+L2hUp1rHADufV3IMtnDRdf1r5NINEl0= github.com/pkg/errors v0.9.1 h1:FEBLx1zS214owpjy7qsBeixbURkuhQAwrK5UwLGTwt4= github.com/pkg/errors v0.9.1/go.mod h1:bwawxfHBFNV+L2hUp1rHADufV3IMtnDRdf1r5NINEl0= -github.com/planetscale/tengo v0.9.6-ps.v1 h1:sVudRi2EKEJuPHchj8Ap6uFDGyybi0amP6OLc4Bao9s= -github.com/planetscale/tengo v0.9.6-ps.v1/go.mod h1:zrvIPs4+lw2VBJ2XX/tQj+gPbFheMht4AZFKGEhihYI= +github.com/planetscale/pargzip v0.0.0-20201116224723-90c7fc03ea8a h1:y0OpQ4+5tKxeh9+H+2cVgASl9yMZYV9CILinKOiKafA= +github.com/planetscale/pargzip v0.0.0-20201116224723-90c7fc03ea8a/go.mod h1:GJFUzQuXIoB2Kjn1ZfDhJr/42D5nWOqRcIQVgCxTuIE= +github.com/planetscale/tengo v0.9.6-ps.v4 h1:nLGFobPtYEZDmuRww38RJiP7gVWVLBMzpTUysLIUF7Q= +github.com/planetscale/tengo v0.9.6-ps.v4/go.mod h1:Xwj7BHMQW30k479dZvWIl/WOCy33skXRjYZLsKgAINM= +github.com/planetscale/vtprotobuf v0.0.0-20210521163914-5a02622d1e2a h1:qr27Mt+/BoONcc6hogjN5PPCykCxXvJMpXCtYMMgkws= +github.com/planetscale/vtprotobuf v0.0.0-20210521163914-5a02622d1e2a/go.mod h1:8abDv+/TBY4d10QUVQgKYCy6+xk9SQPorWPZDqTkPHM= github.com/pmezard/go-difflib v0.0.0-20151028094244-d8ed2627bdf0/go.mod h1:iKH77koFhYxTK1pcRnkKkqfTogsbg7gZNVY4sRDYZ/4= github.com/pmezard/go-difflib v1.0.0 h1:4DBwDE0NGyQoBHbLQYPwSUPoCMWR5BEzIk/f1lZbAQM= github.com/pmezard/go-difflib v1.0.0/go.mod h1:iKH77koFhYxTK1pcRnkKkqfTogsbg7gZNVY4sRDYZ/4= @@ -589,8 +615,9 @@ github.com/prometheus/client_golang v0.9.1/go.mod h1:7SWBe2y4D6OKWSNQJUaRYU/AaXP github.com/prometheus/client_golang v0.9.2/go.mod h1:OsXs2jCmiKlQ1lTBmv21f2mNfw4xf/QclQDMrYNZzcM= github.com/prometheus/client_golang v0.9.3/go.mod h1:/TN21ttK/J9q6uSwhBd54HahCDft0ttaMvbicHlPoso= github.com/prometheus/client_golang v1.0.0/go.mod h1:db9x61etRT2tGnBNRi70OPL5FsnadC4Ky3P0J6CfImo= -github.com/prometheus/client_golang v1.4.1 h1:FFSuS004yOQEtDdTq+TAOLP5xUq63KqAFYyOi8zA+Y8= -github.com/prometheus/client_golang v1.4.1/go.mod h1:e9GMxYsXl05ICDXkRhurwBS4Q3OK1iX/F2sw+iXX5zU= +github.com/prometheus/client_golang v1.7.1/go.mod h1:PY5Wy2awLA44sXw4AOSfFBetzPP4j5+D6mVACh+pe2M= +github.com/prometheus/client_golang v1.11.0 h1:HNkLOAEQMIDv/K+04rukrLx6ch7msSRwf3/SASFAGtQ= +github.com/prometheus/client_golang v1.11.0/go.mod h1:Z6t4BnS23TR94PD6BsDNk8yVqroYurpAkEiz0P2BEV0= github.com/prometheus/client_model v0.0.0-20180712105110-5c3871d89910/go.mod h1:MbSGuTsp3dbXC40dX6PRTWyKYBIrTGTE9sqQNg2J8bo= github.com/prometheus/client_model v0.0.0-20190129233127-fd36f4220a90/go.mod h1:xMI15A0UPsDsEKsMN9yxemIoYk6Tm2C1GtYGdfGttqA= github.com/prometheus/client_model v0.0.0-20190812154241-14fe0d1b01d4/go.mod h1:xMI15A0UPsDsEKsMN9yxemIoYk6Tm2C1GtYGdfGttqA= @@ -600,20 +627,23 @@ github.com/prometheus/common v0.0.0-20181113130724-41aa239b4cce/go.mod h1:daVV7q github.com/prometheus/common v0.0.0-20181126121408-4724e9255275/go.mod h1:daVV7qP5qjZbuso7PdcryaAu0sAZbrN9i7WWcTMWvro= github.com/prometheus/common v0.4.0/go.mod h1:TNfzLD0ON7rHzMJeJkieUDPYmFC7Snx/y86RQel1bk4= github.com/prometheus/common v0.4.1/go.mod h1:TNfzLD0ON7rHzMJeJkieUDPYmFC7Snx/y86RQel1bk4= -github.com/prometheus/common v0.9.1 h1:KOMtN28tlbam3/7ZKEYKHhKoJZYYj3gMH4uc62x7X7U= -github.com/prometheus/common v0.9.1/go.mod h1:yhUN8i9wzaXS3w1O07YhxHEBxD+W35wd8bs7vj7HSQ4= -github.com/prometheus/procfs v0.0.0-20180125133057-cb4147076ac7/go.mod h1:c3At6R/oaqEKCNdg8wHV1ftS6bRYblBhIjjI8uT2IGk= +github.com/prometheus/common v0.10.0/go.mod h1:Tlit/dnDKsSWFlCLTWaA1cyBgKHSMdTB80sz/V91rCo= +github.com/prometheus/common v0.26.0/go.mod h1:M7rCNAaPfAosfx8veZJCuw84e35h3Cfd9VFqTh1DIvc= +github.com/prometheus/common v0.29.0 h1:3jqPBvKT4OHAbje2Ql7KeaaSicDBCxMYwEJU1zRJceE= +github.com/prometheus/common v0.29.0/go.mod h1:vu+V0TpY+O6vW9J44gczi3Ap/oXXR10b+M/gUGO4Hls= github.com/prometheus/procfs v0.0.0-20181005140218-185b4288413d/go.mod h1:c3At6R/oaqEKCNdg8wHV1ftS6bRYblBhIjjI8uT2IGk= github.com/prometheus/procfs v0.0.0-20181204211112-1dc9a6cbc91a/go.mod h1:c3At6R/oaqEKCNdg8wHV1ftS6bRYblBhIjjI8uT2IGk= github.com/prometheus/procfs v0.0.0-20190507164030-5867b95ac084/go.mod h1:TjEm7ze935MbeOT/UhFTIMYKhuLP4wbCsTZCD3I8kEA= github.com/prometheus/procfs v0.0.2/go.mod h1:TjEm7ze935MbeOT/UhFTIMYKhuLP4wbCsTZCD3I8kEA= -github.com/prometheus/procfs v0.0.8 h1:+fpWZdT24pJBiqJdAwYBjPSk+5YmQzYNPYzQsdzLkt8= -github.com/prometheus/procfs v0.0.8/go.mod h1:7Qr8sr6344vo1JqZ6HhLceV9o3AJ1Ff+GxbHq6oeK9A= +github.com/prometheus/procfs v0.1.3/go.mod h1:lV6e/gmhEcM9IjHGsFOCxxuZ+z1YqCvr4OA4YeYWdaU= +github.com/prometheus/procfs v0.6.0 h1:mxy4L2jP6qMonqmq+aTtOx1ifVWUgG/TAmntgbh3xv4= +github.com/prometheus/procfs v0.6.0/go.mod h1:cz+aTbrPOrUb4q7XlbU9ygM+/jj0fzG6c1xBZuNvfVA= github.com/prometheus/tsdb v0.7.1/go.mod h1:qhTCs0VvXwvX/y3TZrWD7rabWM+ijKTux40TwIPHuXU= github.com/rcrowley/go-metrics v0.0.0-20200313005456-10cdbea86bc0 h1:MkV+77GLUNo5oJ0jf870itWm3D0Sjh7+Za9gazKc5LQ= github.com/rcrowley/go-metrics v0.0.0-20200313005456-10cdbea86bc0/go.mod h1:bCqnVzQkZxMG4s8nGwiZ5l3QUCyqpo9Y+/ZMZ9VjZe4= github.com/remyoudompheng/bigfft v0.0.0-20170806203942-52369c62f446/go.mod h1:uYEyJGbgTkfkS4+E/PavXkNJcbFIpEtjt2B0KDQ5+9M= github.com/rogpeppe/fastuuid v0.0.0-20150106093220-6724a57986af/go.mod h1:XWv6SoW27p1b0cqNHllgS5HIMJraePCO15w5zCzIWYg= +github.com/rogpeppe/fastuuid v1.2.0/go.mod h1:jVj6XXZzXRy/MSR5jhDC/2q6DgLz+nrA6LYCDYWNEvQ= github.com/rogpeppe/go-internal v1.3.0/go.mod h1:M8bDsm7K2OlrFYOpmOWEs/qY81heoFRclV5y23lUDJ4= github.com/russross/blackfriday v1.5.2/go.mod h1:JO/DiYxRf+HjHt06OyowR9PTA263kcR/rfWxYHBV53g= github.com/russross/blackfriday/v2 v2.0.1/go.mod h1:+Rmxgy9KzJVeS9/2gXHxylqXiyQDYRxCVz55jmeOWTM= @@ -627,11 +657,9 @@ github.com/sean-/seed v0.0.0-20170313163322-e2103e2c3529 h1:nn5Wsu0esKSJiIVhscUt github.com/sean-/seed v0.0.0-20170313163322-e2103e2c3529/go.mod h1:DxrIzT+xaE7yg65j358z/aeFdxmN0P9QXhEzd20vsDc= github.com/sergi/go-diff v1.0.0/go.mod h1:0CfEIISq7TuYL3j771MWULgwwjU+GofnZX9QAmXWZgo= github.com/shurcooL/sanitized_anchor_name v1.0.0/go.mod h1:1NzhyTcUVG4SuEtjjoZeVRXNmyL/1OwPU0+IJeTBvfc= -github.com/sirupsen/logrus v1.0.4-0.20170822132746-89742aefa4b2/go.mod h1:pMByvHTf9Beacp5x1UXfOR9xyW/9antXMhjMPG0dEzc= github.com/sirupsen/logrus v1.2.0/go.mod h1:LxeOpSwHxABJmUn/MG1IvRgCAasNZTLOkJPxbbu5VWo= -github.com/sirupsen/logrus v1.4.1/go.mod h1:ni0Sbl8bgC9z8RoU9G6nDWqqs/fq4eDPysMBDgk/93Q= -github.com/sirupsen/logrus v1.4.2 h1:SPIRibHv4MatM3XXNO2BJeFLZwZ2LvZgfQ5+UNI2im4= github.com/sirupsen/logrus v1.4.2/go.mod h1:tLMulIdttU9McNUspp0xgXVQah82FyeX6MwdIuYE2rE= +github.com/sirupsen/logrus v1.6.0/go.mod h1:7uNnSEd1DgxDLC74fIahvMZmmYsHGZGEOFrfsX/uA88= github.com/sjmudd/stopwatch v0.0.0-20170613150411-f380bf8a9be1 h1:acClJNSOjUrAUKW+ZneCZymCFDWtSaJG5YQl8FoOlyI= github.com/sjmudd/stopwatch v0.0.0-20170613150411-f380bf8a9be1/go.mod h1:Pgf1sZ2KrHK8vdRTV5UHGp80LT7HMUKuNAiKC402abY= github.com/smartystreets/assertions v0.0.0-20180927180507-b2de0cb4f26d/go.mod h1:OnSkiWE9lh6wB0YB77sQom3nweQdgAjqCqsofrRNTgc= @@ -646,14 +674,12 @@ github.com/spaolacci/murmur3 v0.0.0-20180118202830-f09979ecbc72/go.mod h1:JwIasO github.com/spf13/afero v1.1.2/go.mod h1:j4pytiNVoe2o6bmDsKpLACNPDBIoEAkihy7loJ1B0CQ= github.com/spf13/afero v1.2.2/go.mod h1:9ZxEEn6pIJ8Rxe320qSDBk6AsU0r9pR7Q4OcevTdifk= github.com/spf13/cast v1.3.0/go.mod h1:Qx5cxh0v+4UWYiBimWS+eyWzqEqokIECu5etghLkUJE= -github.com/spf13/cobra v0.0.2-0.20171109065643-2da4a54c5cee/go.mod h1:1l0Ry5zgKvJasoi3XT1TypsSe7PqH0Sj9dhYf7v3XqQ= github.com/spf13/cobra v0.0.3/go.mod h1:1l0Ry5zgKvJasoi3XT1TypsSe7PqH0Sj9dhYf7v3XqQ= github.com/spf13/cobra v0.0.5/go.mod h1:3K3wKZymM7VvHMDS9+Akkh4K60UwM26emMESw8tLCHU= github.com/spf13/cobra v1.1.1 h1:KfztREH0tPxJJ+geloSLaAkaPkr4ki2Er5quFV1TDo4= github.com/spf13/cobra v1.1.1/go.mod h1:WnodtKOvamDL/PwE2M4iKs8aMDBZ5Q5klgD3qfVJQMI= github.com/spf13/jwalterweatherman v1.0.0/go.mod h1:cQK4TGJAtQXfYWX+Ddv3mKDzgVb68N+wFjFa4jdeBTo= github.com/spf13/pflag v0.0.0-20170130214245-9ff6c6923cff/go.mod h1:DYY7MBk1bdzusC3SYhjObp+wFpr4gzcvqqNjLnInEg4= -github.com/spf13/pflag v1.0.1-0.20171106142849-4c012f6dcd95/go.mod h1:DYY7MBk1bdzusC3SYhjObp+wFpr4gzcvqqNjLnInEg4= github.com/spf13/pflag v1.0.1/go.mod h1:DYY7MBk1bdzusC3SYhjObp+wFpr4gzcvqqNjLnInEg4= github.com/spf13/pflag v1.0.3/go.mod h1:DYY7MBk1bdzusC3SYhjObp+wFpr4gzcvqqNjLnInEg4= github.com/spf13/pflag v1.0.5 h1:iy+VFUOCP1a+8yFto/drg2CJ5u0yRoB7fZw3DKv/JXA= @@ -668,8 +694,11 @@ github.com/stretchr/objx v0.2.0/go.mod h1:qt09Ya8vawLte6SNmTgCsAVtYtaKzEcn8ATUoH github.com/stretchr/testify v0.0.0-20151208002404-e3a8ff8ce365/go.mod h1:a8OnRcib4nhh0OaRAV+Yts87kKdq0PP7pXfy6kDkUVs= github.com/stretchr/testify v1.2.2/go.mod h1:a8OnRcib4nhh0OaRAV+Yts87kKdq0PP7pXfy6kDkUVs= github.com/stretchr/testify v1.3.0/go.mod h1:M5WIy9Dh21IEIfnGCwXGc5bZfKNJtfHm1UVUgZn+9EI= -github.com/stretchr/testify v1.4.0 h1:2E4SXV/wtOkTonXsotYi4li6zVWxYlZuYNCXe9XRJyk= github.com/stretchr/testify v1.4.0/go.mod h1:j7eGeouHqKxXV5pUuKE4zz7dFj8WfuZ+81PSLYec5m4= +github.com/stretchr/testify v1.5.1/go.mod h1:5W2xD1RspED5o8YsWQXVCued0rvSQ+mT+I5cxcmMvtA= +github.com/stretchr/testify v1.6.1/go.mod h1:6Fq8oRcR53rry900zMqJjRRixrwX3KX962/h/Wwjteg= +github.com/stretchr/testify v1.7.0 h1:nwc3DEeHmmLAfoZucVR881uASk0Mfjw8xYJ99tb5CcY= +github.com/stretchr/testify v1.7.0/go.mod h1:6Fq8oRcR53rry900zMqJjRRixrwX3KX962/h/Wwjteg= github.com/subosito/gotenv v1.2.0/go.mod h1:N0PQaV/YGNqwC0u51sEeR/aUtSLEXKX9iv69rRypqCw= github.com/tchap/go-patricia v0.0.0-20160729071656-dd168db6051b h1:i3lm+BZX5fAaH95wJavMgsSYU95LhSxdNCMa8nLv2gk= github.com/tchap/go-patricia v0.0.0-20160729071656-dd168db6051b/go.mod h1:bmLyhP68RS6kStMGxByiQ23RP/odRBOTVjwp2cDyi6I= @@ -679,7 +708,6 @@ github.com/tidwall/pretty v1.0.0/go.mod h1:XNkn88O1ChpSDQmQeStsy+sBenx6DDtFZJxhV github.com/tinylib/msgp v1.1.1 h1:TnCZ3FIuKeaIy+F45+Cnp+caqdXGy4z74HvwXN+570Y= github.com/tinylib/msgp v1.1.1/go.mod h1:+d+yLhGm8mzTaHzB+wgMYrodPfmZrzkirds8fDWklFE= github.com/tmc/grpc-websocket-proxy v0.0.0-20170815181823-89b8d40f7ca8/go.mod h1:ncp9v5uamzpCO7NfCPTXjqaC+bZgJeR0sMTm6dMHP7U= -github.com/tmc/grpc-websocket-proxy v0.0.0-20190109142713-0ad062ec5ee5 h1:LnC5Kc/wtumK+WB441p7ynQJzVuNRJiqddSIE3IlSEQ= github.com/tmc/grpc-websocket-proxy v0.0.0-20190109142713-0ad062ec5ee5/go.mod h1:ncp9v5uamzpCO7NfCPTXjqaC+bZgJeR0sMTm6dMHP7U= github.com/tv42/httpunix v0.0.0-20150427012821-b75d8614f926/go.mod h1:9ESjWnEqriFuLhtthL60Sar/7RFoluCcXsuvEwTV5KM= github.com/uber-go/atomic v1.4.0 h1:yOuPqEq4ovnhEjpHmfFwsqBXDYbQeT6Nb0bwD6XnD5o= @@ -689,35 +717,52 @@ github.com/uber/jaeger-client-go v2.16.0+incompatible/go.mod h1:WVhlPFC8FDjOFMMW github.com/uber/jaeger-lib v2.0.0+incompatible h1:iMSCV0rmXEogjNWPh2D0xk9YVKvrtGoHJNe9ebLu/pw= github.com/uber/jaeger-lib v2.0.0+incompatible/go.mod h1:ComeNDZlWwrWnDv8aPp0Ba6+uUTzImX/AauajbLI56U= github.com/ugorji/go/codec v0.0.0-20181204163529-d75b2dcb6bc8/go.mod h1:VFNgLljTbGfSG7qAOspJ7OScBnGdDN/yBr0sguwnwf0= -github.com/urfave/cli v0.0.0-20171014202726-7bc6a0acffa5/go.mod h1:70zkFmudgCuE/ngEzBv17Jvp/497gISqfk5gWijbERA= github.com/urfave/cli v1.20.0/go.mod h1:70zkFmudgCuE/ngEzBv17Jvp/497gISqfk5gWijbERA= +github.com/valyala/bytebufferpool v1.0.0 h1:GqA5TC/0021Y/b9FG4Oi9Mr3q7XYx6KllzawFIhcdPw= github.com/valyala/bytebufferpool v1.0.0/go.mod h1:6bBcMArwyJ5K/AmCkWv1jt77kVWyCJ6HpOuEn7z0Csc= +github.com/valyala/fasttemplate v1.0.1 h1:tY9CJiPnMXf1ERmG2EyK7gNUd+c6RKGD0IfU8WdUSz8= github.com/valyala/fasttemplate v1.0.1/go.mod h1:UQGH1tvbgY+Nz5t2n7tXsz52dQxojPUpymEIMZ47gx8= github.com/vektah/gqlparser v1.1.2/go.mod h1:1ycwN7Ij5njmMkPPAOaRFY4rET2Enx7IkVv3vaXspKw= -github.com/xiang90/probing v0.0.0-20190116061207-43a291ad63a2 h1:eY9dn8+vbi4tKz5Qo6v2eYzo7kUS51QINcR5jNpbZS8= github.com/xiang90/probing v0.0.0-20190116061207-43a291ad63a2/go.mod h1:UETIi67q53MR2AWcXfiuqkDkRtnGDLqkBTpCHuJHxtU= github.com/xordataexchange/crypt v0.0.3-0.20170626215501-b2862e3d0a77/go.mod h1:aYKd//L2LvnjZzWKhF00oedf4jCCReLcmhLdhm1A27Q= +github.com/yuin/goldmark v1.1.25/go.mod h1:3hX8gzYuyVAZsxl0MRgGTJEmQBFcNTphYh9decYSb74= +github.com/yuin/goldmark v1.1.27/go.mod h1:3hX8gzYuyVAZsxl0MRgGTJEmQBFcNTphYh9decYSb74= +github.com/yuin/goldmark v1.1.32/go.mod h1:3hX8gzYuyVAZsxl0MRgGTJEmQBFcNTphYh9decYSb74= github.com/yuin/goldmark v1.2.1/go.mod h1:3hX8gzYuyVAZsxl0MRgGTJEmQBFcNTphYh9decYSb74= +github.com/yuin/goldmark v1.3.5/go.mod h1:mwnBkeHKe2W/ZEtQ+71ViKU8L12m81fl3OWwC1Zlc8k= github.com/z-division/go-zookeeper v0.0.0-20190128072838-6d7457066b9b h1:Itr7GbuXoM1PK/eCeNNia4Qd3ib9IgX9g9SpXgo8BwQ= github.com/z-division/go-zookeeper v0.0.0-20190128072838-6d7457066b9b/go.mod h1:JNALoWa+nCXR8SmgLluHcBNVJgyejzpKPZk9pX2yXXE= go.etcd.io/bbolt v1.3.2/go.mod h1:IbVyRI1SCnLcuJnV2u8VeU0CEYM7e686BmAb1XKL+uU= -go.etcd.io/bbolt v1.3.3 h1:MUGmc65QhB3pIlaQ5bB4LwqSj6GIonVJXpZiaKNyaKk= go.etcd.io/bbolt v1.3.3/go.mod h1:IbVyRI1SCnLcuJnV2u8VeU0CEYM7e686BmAb1XKL+uU= +go.etcd.io/etcd v0.0.0-20191023171146-3cf2f69b5738 h1:VcrIfasaLFkyjk6KNlXQSzO+B0fZcnECiDrKJsfxka0= go.etcd.io/etcd v0.0.0-20191023171146-3cf2f69b5738/go.mod h1:dnLIgRNXwCJa5e+c6mIZCrds/GIG4ncV9HhK5PX7jPg= +go.etcd.io/etcd/api/v3 v3.5.0 h1:GsV3S+OfZEOCNXdtNkBSR7kgLobAa/SO6tCxRa0GAYw= +go.etcd.io/etcd/api/v3 v3.5.0/go.mod h1:cbVKeC6lCfl7j/8jBhAK6aIYO9XOjdptoxU/nLQcPvs= +go.etcd.io/etcd/client/pkg/v3 v3.5.0 h1:2aQv6F436YnN7I4VbI8PPYrBhu+SmrTaADcf8Mi/6PU= +go.etcd.io/etcd/client/pkg/v3 v3.5.0/go.mod h1:IJHfcCEKxYu1Os13ZdwCwIUTUVGYTSAM3YSwc9/Ac1g= +go.etcd.io/etcd/client/v3 v3.5.0 h1:62Eh0XOro+rDwkrypAGDfgmNh5Joq+z+W9HZdlXMzek= +go.etcd.io/etcd/client/v3 v3.5.0/go.mod h1:AIKXXVX/DQXtfTEqBryiLTUXwON+GuvO6Z7lLS/oTh0= go.mongodb.org/mongo-driver v1.0.3/go.mod h1:u7ryQJ+DOzQmeO7zB6MHyr8jkEQvC8vH7qLUO4lqsUM= go.mongodb.org/mongo-driver v1.1.1/go.mod h1:u7ryQJ+DOzQmeO7zB6MHyr8jkEQvC8vH7qLUO4lqsUM= go.mongodb.org/mongo-driver v1.1.2/go.mod h1:u7ryQJ+DOzQmeO7zB6MHyr8jkEQvC8vH7qLUO4lqsUM= go.opencensus.io v0.21.0/go.mod h1:mSImk1erAIZhrmZN+AvHh14ztQfjbGwt4TtuofqLduU= -go.opencensus.io v0.22.0 h1:C9hSCOW830chIVkdja34wa6Ky+IzWllkUinR+BtRZd4= go.opencensus.io v0.22.0/go.mod h1:+kGneAE2xo2IficOXnaByMWTGM9T73dGwxeWcUqIpI8= +go.opencensus.io v0.22.2/go.mod h1:yxeiOL68Rb0Xd1ddK5vPZ/oVn4vY4Ynel7k9FzqtOIw= +go.opencensus.io v0.22.3/go.mod h1:yxeiOL68Rb0Xd1ddK5vPZ/oVn4vY4Ynel7k9FzqtOIw= +go.opencensus.io v0.22.4/go.mod h1:yxeiOL68Rb0Xd1ddK5vPZ/oVn4vY4Ynel7k9FzqtOIw= +go.opencensus.io v0.22.5/go.mod h1:5pWMHQbX5EPX2/62yrJeAkowc+lfs/XD7Uxpq3pI6kk= +go.opencensus.io v0.23.0 h1:gqCw0LfLxScz8irSi8exQc7fyQ0fKQU/qnC/X8+V/1M= +go.opencensus.io v0.23.0/go.mod h1:XItmlyltB5F7CS4xOC1DcqMoFqwtC6OG2xF7mCv7P7E= go.uber.org/atomic v1.3.2/go.mod h1:gD2HeocX3+yG+ygLZcrzQJaqmWj9AIm7n08wl/qW/PE= -go.uber.org/atomic v1.4.0 h1:cxzIVoETapQEqDhQu3QfnvXAV4AlzcvUCxkVUFw3+EU= go.uber.org/atomic v1.4.0/go.mod h1:gD2HeocX3+yG+ygLZcrzQJaqmWj9AIm7n08wl/qW/PE= -go.uber.org/multierr v1.1.0 h1:HoEmRHQPVSqub6w2z2d2EOVs2fjyFRGyofhKuyDq0QI= +go.uber.org/atomic v1.7.0 h1:ADUqmZGgLDDfbSL9ZmPxKTybcoEYHgpYfELNoN+7hsw= +go.uber.org/atomic v1.7.0/go.mod h1:fEN4uk6kAWBTFdckzkM89CLk9XfWZrxpCo0nPH17wJc= go.uber.org/multierr v1.1.0/go.mod h1:wR5kodmAFQ0UK8QlbwjlSNy0Z68gJhDJUG5sjR94q/0= -go.uber.org/zap v1.10.0 h1:ORx85nbTijNz8ljznvCMR1ZBIPKFn3jQrag10X2AsuM= +go.uber.org/multierr v1.6.0 h1:y6IPFStTAIT5Ytl7/XYmHvzXQ7S3g/IeZW9hyZ5thw4= +go.uber.org/multierr v1.6.0/go.mod h1:cdWPpRnG4AhwMwsgIHip0KRBQjJy5kYEpYjJxpXp9iU= go.uber.org/zap v1.10.0/go.mod h1:vwi/ZaCAaUcBkycHslxD9B2zi4UTXhF60s6SWpuDF0Q= -golang.org/x/crypto v0.0.0-20171113213409-9f005a07e0d3/go.mod h1:6SG95UA2DQfeDnfUPMdvaQW0Q7yPrPDi9nlGo2tz2b4= +go.uber.org/zap v1.17.0 h1:MTjgFu6ZLKvY6Pvaqk97GlxNBuMpV4Hy/3P6tRGlI2U= +go.uber.org/zap v1.17.0/go.mod h1:MXVU+bhUf/A7Xi2HNOnopQOrmycQ5Ih87HtOu4q5SSo= golang.org/x/crypto v0.0.0-20180904163835-0709b304e793/go.mod h1:6SG95UA2DQfeDnfUPMdvaQW0Q7yPrPDi9nlGo2tz2b4= golang.org/x/crypto v0.0.0-20181029021203-45a5f77698d3/go.mod h1:6SG95UA2DQfeDnfUPMdvaQW0Q7yPrPDi9nlGo2tz2b4= golang.org/x/crypto v0.0.0-20181203042331-505ab145d0a9/go.mod h1:6SG95UA2DQfeDnfUPMdvaQW0Q7yPrPDi9nlGo2tz2b4= @@ -733,7 +778,6 @@ golang.org/x/crypto v0.0.0-20190820162420-60c769a6c586/go.mod h1:yigFU9vqHzYiE8U golang.org/x/crypto v0.0.0-20190923035154-9ee001bba392/go.mod h1:/lpIB1dKB+9EgE3H3cr1v9wB50oz8l4C4h62xy7jSTY= golang.org/x/crypto v0.0.0-20191011191535-87dc89f01550/go.mod h1:yigFU9vqHzYiE8UmvKecakEJjdnWj3jj499lnFckfCI= golang.org/x/crypto v0.0.0-20191206172530-e9b2fee46413/go.mod h1:LzIPMQfyMNhhGPhUkYOs5KpL4U8rLKemX1yGLhDgUto= -golang.org/x/crypto v0.0.0-20200429183012-4b2356b1ed79/go.mod h1:LzIPMQfyMNhhGPhUkYOs5KpL4U8rLKemX1yGLhDgUto= golang.org/x/crypto v0.0.0-20200622213623-75b288015ac9 h1:psW17arqaxU48Z5kZ0CQnkZWQJsqcURM6tKiBApRjXI= golang.org/x/crypto v0.0.0-20200622213623-75b288015ac9/go.mod h1:LzIPMQfyMNhhGPhUkYOs5KpL4U8rLKemX1yGLhDgUto= golang.org/x/exp v0.0.0-20190121172915-509febef88a4/go.mod h1:CJ0aWSM057203Lf6IL+f9T1iT9GByDxfZKAQTCR3kQA= @@ -742,28 +786,41 @@ golang.org/x/exp v0.0.0-20190306152737-a1d7652674e8/go.mod h1:CJ0aWSM057203Lf6IL golang.org/x/exp v0.0.0-20190312203227-4b39c73a6495/go.mod h1:ZjyILWgesfNpC6sMxTJOJm9Kp84zZh5NQWvqDGG3Qr8= golang.org/x/exp v0.0.0-20190510132918-efd6b22b2522/go.mod h1:ZjyILWgesfNpC6sMxTJOJm9Kp84zZh5NQWvqDGG3Qr8= golang.org/x/exp v0.0.0-20190829153037-c13cbed26979/go.mod h1:86+5VVa7VpoJ4kLfm080zCjGlMRFzhUhsZKEZO7MGek= -golang.org/x/exp v0.0.0-20191030013958-a1ab85dbe136 h1:A1gGSx58LAGVHUUsOf7IiR0u8Xb6W51gRwfDBhkdcaw= golang.org/x/exp v0.0.0-20191030013958-a1ab85dbe136/go.mod h1:JXzH8nQsPlswgeRAPE3MuO9GYsAcnJvJ4vnMwN/5qkY= +golang.org/x/exp v0.0.0-20191129062945-2f5052295587/go.mod h1:2RIsYlXP63K8oxa1u096TMicItID8zy7Y6sNkU49FU4= +golang.org/x/exp v0.0.0-20191227195350-da58074b4299/go.mod h1:2RIsYlXP63K8oxa1u096TMicItID8zy7Y6sNkU49FU4= +golang.org/x/exp v0.0.0-20200119233911-0405dc783f0a/go.mod h1:2RIsYlXP63K8oxa1u096TMicItID8zy7Y6sNkU49FU4= +golang.org/x/exp v0.0.0-20200207192155-f17229e696bd/go.mod h1:J/WKrq2StrnmMY6+EHIKF9dgMWnmCNThgcyBT1FY9mM= +golang.org/x/exp v0.0.0-20200224162631-6cc2880d07d6/go.mod h1:3jZMyOhIsHpP37uCMkUooju7aAi5cS1Q23tOzKc+0MU= golang.org/x/image v0.0.0-20190227222117-0694c2d4d067/go.mod h1:kZ7UVZpmo3dzQBMxlp+ypCbDeSB+sBbTgSJuh5dn5js= golang.org/x/image v0.0.0-20190802002840-cff245a6509b/go.mod h1:FeLwcggjj3mMvU+oOTbSwawSJRM1uh48EjtB4UJZlP0= -golang.org/x/lint v0.0.0-20181026193005-c67002cb31c3/go.mod h1:UVdnD1Gm6xHRNCYTkRU2/jEulfH38KcIWyp/GAMgvoE= golang.org/x/lint v0.0.0-20190227174305-5b3e6a55c961/go.mod h1:wehouNa3lNwaWXcvxsM5YxQ5yQlVC4a0KAMCusXpPoU= golang.org/x/lint v0.0.0-20190301231843-5614ed5bae6f/go.mod h1:UVdnD1Gm6xHRNCYTkRU2/jEulfH38KcIWyp/GAMgvoE= golang.org/x/lint v0.0.0-20190313153728-d0100b6bd8b3/go.mod h1:6SW0HCj/g11FgYtHlgUYUwCkIfeOF89ocIRzGO/8vkc= golang.org/x/lint v0.0.0-20190409202823-959b441ac422/go.mod h1:6SW0HCj/g11FgYtHlgUYUwCkIfeOF89ocIRzGO/8vkc= golang.org/x/lint v0.0.0-20190909230951-414d861bb4ac/go.mod h1:6SW0HCj/g11FgYtHlgUYUwCkIfeOF89ocIRzGO/8vkc= -golang.org/x/lint v0.0.0-20190930215403-16217165b5de h1:5hukYrvBGR8/eNkX5mdUezrA6JiaEZDtJb9Ei+1LlBs= golang.org/x/lint v0.0.0-20190930215403-16217165b5de/go.mod h1:6SW0HCj/g11FgYtHlgUYUwCkIfeOF89ocIRzGO/8vkc= +golang.org/x/lint v0.0.0-20191125180803-fdd1cda4f05f/go.mod h1:5qLYkcX4OjUUV8bRuDixDT3tpyyb+LUpUlRWLxfhWrs= +golang.org/x/lint v0.0.0-20200130185559-910be7a94367/go.mod h1:3xt1FjdF8hUf6vQPIChWIBhFzV8gjjsPE/fR3IyQdNY= +golang.org/x/lint v0.0.0-20200302205851-738671d3881b/go.mod h1:3xt1FjdF8hUf6vQPIChWIBhFzV8gjjsPE/fR3IyQdNY= +golang.org/x/lint v0.0.0-20201208152925-83fdc39ff7b5/go.mod h1:3xt1FjdF8hUf6vQPIChWIBhFzV8gjjsPE/fR3IyQdNY= +golang.org/x/lint v0.0.0-20210508222113-6edffad5e616 h1:VLliZ0d+/avPrXXH+OakdXhpJuEoBZuwh1m2j7U6Iug= +golang.org/x/lint v0.0.0-20210508222113-6edffad5e616/go.mod h1:3xt1FjdF8hUf6vQPIChWIBhFzV8gjjsPE/fR3IyQdNY= golang.org/x/mobile v0.0.0-20190312151609-d3739f865fa6/go.mod h1:z+o9i4GpDbdi3rU15maQ/Ox0txvL9dWGYEHz965HBQE= golang.org/x/mobile v0.0.0-20190719004257-d2bd2a29d028/go.mod h1:E/iHnbuqvinMTCcRqshq8CkpyQDoeVncDDYHnLhea+o= golang.org/x/mod v0.0.0-20190513183733-4bf6d317e70e/go.mod h1:mXi4GBBbnImb6dmsKGUJ2LatrhH/nqhxcFungHvyanc= golang.org/x/mod v0.1.0/go.mod h1:0QHyrYULN0/3qlju5TqG8bIK38QM8yzMo5ekMj3DlcY= -golang.org/x/mod v0.3.0 h1:RM4zey1++hCTbCVQfnWeKs9/IEsaBLA8vTkd0WVtmH4= +golang.org/x/mod v0.1.1-0.20191105210325-c90efee705ee/go.mod h1:QqPTAvyqsEbceGzBzNggFXnrqF1CaUcvgkdR5Ot7KZg= +golang.org/x/mod v0.1.1-0.20191107180719-034126e5016b/go.mod h1:QqPTAvyqsEbceGzBzNggFXnrqF1CaUcvgkdR5Ot7KZg= +golang.org/x/mod v0.2.0/go.mod h1:s0Qsj1ACt9ePp/hMypM3fl4fZqREWJwdYDEqhRiZZUA= golang.org/x/mod v0.3.0/go.mod h1:s0Qsj1ACt9ePp/hMypM3fl4fZqREWJwdYDEqhRiZZUA= +golang.org/x/mod v0.4.0/go.mod h1:s0Qsj1ACt9ePp/hMypM3fl4fZqREWJwdYDEqhRiZZUA= +golang.org/x/mod v0.4.1/go.mod h1:s0Qsj1ACt9ePp/hMypM3fl4fZqREWJwdYDEqhRiZZUA= +golang.org/x/mod v0.4.2 h1:Gz96sIWK3OalVv/I/qNygP42zyoKp3xptRVCWRFEBvo= +golang.org/x/mod v0.4.2/go.mod h1:s0Qsj1ACt9ePp/hMypM3fl4fZqREWJwdYDEqhRiZZUA= golang.org/x/net v0.0.0-20170114055629-f2499483f923/go.mod h1:mL1N/T3taQHkDXs73rZJwtUhF3w3ftmwwsq0BUmARs4= golang.org/x/net v0.0.0-20180218175443-cbe0f9307d01/go.mod h1:mL1N/T3taQHkDXs73rZJwtUhF3w3ftmwwsq0BUmARs4= golang.org/x/net v0.0.0-20180724234803-3673e40ba225/go.mod h1:mL1N/T3taQHkDXs73rZJwtUhF3w3ftmwwsq0BUmARs4= -golang.org/x/net v0.0.0-20180826012351-8a410e7b638d/go.mod h1:mL1N/T3taQHkDXs73rZJwtUhF3w3ftmwwsq0BUmARs4= golang.org/x/net v0.0.0-20180906233101-161cd47e91fd/go.mod h1:mL1N/T3taQHkDXs73rZJwtUhF3w3ftmwwsq0BUmARs4= golang.org/x/net v0.0.0-20181005035420-146acd28ed58/go.mod h1:mL1N/T3taQHkDXs73rZJwtUhF3w3ftmwwsq0BUmARs4= golang.org/x/net v0.0.0-20181023162649-9b4f9f5ad519/go.mod h1:mL1N/T3taQHkDXs73rZJwtUhF3w3ftmwwsq0BUmARs4= @@ -781,45 +838,151 @@ golang.org/x/net v0.0.0-20190503192946-f4e77d36d62c/go.mod h1:t9HGtf8HONx5eT2rtn golang.org/x/net v0.0.0-20190603091049-60506f45cf65/go.mod h1:HSz+uSET+XFnRR8LxR5pz3Of3rY3CfYBVs4xY44aLks= golang.org/x/net v0.0.0-20190613194153-d28f0bde5980/go.mod h1:z5CRVTTTmAJ677TzLLGU+0bjPO0LkuOLi4/5GtJWs/s= golang.org/x/net v0.0.0-20190620200207-3b0461eec859/go.mod h1:z5CRVTTTmAJ677TzLLGU+0bjPO0LkuOLi4/5GtJWs/s= +golang.org/x/net v0.0.0-20190628185345-da137c7871d7/go.mod h1:z5CRVTTTmAJ677TzLLGU+0bjPO0LkuOLi4/5GtJWs/s= +golang.org/x/net v0.0.0-20190724013045-ca1201d0de80/go.mod h1:z5CRVTTTmAJ677TzLLGU+0bjPO0LkuOLi4/5GtJWs/s= golang.org/x/net v0.0.0-20190813141303-74dc4d7220e7/go.mod h1:z5CRVTTTmAJ677TzLLGU+0bjPO0LkuOLi4/5GtJWs/s= golang.org/x/net v0.0.0-20190827160401-ba9fcec4b297/go.mod h1:z5CRVTTTmAJ677TzLLGU+0bjPO0LkuOLi4/5GtJWs/s= golang.org/x/net v0.0.0-20190923162816-aa69164e4478/go.mod h1:z5CRVTTTmAJ677TzLLGU+0bjPO0LkuOLi4/5GtJWs/s= golang.org/x/net v0.0.0-20191004110552-13f9640d40b9/go.mod h1:z5CRVTTTmAJ677TzLLGU+0bjPO0LkuOLi4/5GtJWs/s= +golang.org/x/net v0.0.0-20191209160850-c0dbc17a3553/go.mod h1:z5CRVTTTmAJ677TzLLGU+0bjPO0LkuOLi4/5GtJWs/s= +golang.org/x/net v0.0.0-20200114155413-6afb5195e5aa/go.mod h1:z5CRVTTTmAJ677TzLLGU+0bjPO0LkuOLi4/5GtJWs/s= golang.org/x/net v0.0.0-20200202094626-16171245cfb2/go.mod h1:z5CRVTTTmAJ677TzLLGU+0bjPO0LkuOLi4/5GtJWs/s= +golang.org/x/net v0.0.0-20200222125558-5a598a2470a0/go.mod h1:z5CRVTTTmAJ677TzLLGU+0bjPO0LkuOLi4/5GtJWs/s= +golang.org/x/net v0.0.0-20200226121028-0de0cce0169b/go.mod h1:z5CRVTTTmAJ677TzLLGU+0bjPO0LkuOLi4/5GtJWs/s= +golang.org/x/net v0.0.0-20200301022130-244492dfa37a/go.mod h1:z5CRVTTTmAJ677TzLLGU+0bjPO0LkuOLi4/5GtJWs/s= golang.org/x/net v0.0.0-20200324143707-d3edc9973b7e/go.mod h1:qpuaurCH72eLCgpAm/N6yyVIVM9cpaDIP3A8BGJEC5A= -golang.org/x/net v0.0.0-20201021035429-f5854403a974 h1:IX6qOQeG5uLjB/hjjwjedwfjND0hgjPMMyO1RoIXQNI= -golang.org/x/net v0.0.0-20201021035429-f5854403a974 h1:IX6qOQeG5uLjB/hjjwjedwfjND0hgjPMMyO1RoIXQNI= +golang.org/x/net v0.0.0-20200501053045-e0ff5e5a1de5/go.mod h1:qpuaurCH72eLCgpAm/N6yyVIVM9cpaDIP3A8BGJEC5A= +golang.org/x/net v0.0.0-20200506145744-7e3656a0809f/go.mod h1:qpuaurCH72eLCgpAm/N6yyVIVM9cpaDIP3A8BGJEC5A= +golang.org/x/net v0.0.0-20200513185701-a91f0712d120/go.mod h1:qpuaurCH72eLCgpAm/N6yyVIVM9cpaDIP3A8BGJEC5A= +golang.org/x/net v0.0.0-20200520182314-0ba52f642ac2/go.mod h1:qpuaurCH72eLCgpAm/N6yyVIVM9cpaDIP3A8BGJEC5A= +golang.org/x/net v0.0.0-20200625001655-4c5254603344/go.mod h1:/O7V0waA8r7cgGh81Ro3o1hOxt32SMVPicZroKQ2sZA= +golang.org/x/net v0.0.0-20200707034311-ab3426394381/go.mod h1:/O7V0waA8r7cgGh81Ro3o1hOxt32SMVPicZroKQ2sZA= +golang.org/x/net v0.0.0-20200822124328-c89045814202/go.mod h1:/O7V0waA8r7cgGh81Ro3o1hOxt32SMVPicZroKQ2sZA= golang.org/x/net v0.0.0-20201021035429-f5854403a974/go.mod h1:sp8m0HH+o8qH0wwXwYZr8TS3Oi6o0r6Gce1SSxlDquU= +golang.org/x/net v0.0.0-20201031054903-ff519b6c9102/go.mod h1:sp8m0HH+o8qH0wwXwYZr8TS3Oi6o0r6Gce1SSxlDquU= +golang.org/x/net v0.0.0-20201110031124-69a78807bb2b/go.mod h1:sp8m0HH+o8qH0wwXwYZr8TS3Oi6o0r6Gce1SSxlDquU= +golang.org/x/net v0.0.0-20201209123823-ac852fbbde11/go.mod h1:m0MpNAwzfU5UDzcl9v0D8zg8gWTRqZa9RBIspLL5mdg= +golang.org/x/net v0.0.0-20210119194325-5f4716e94777/go.mod h1:m0MpNAwzfU5UDzcl9v0D8zg8gWTRqZa9RBIspLL5mdg= +golang.org/x/net v0.0.0-20210226172049-e18ecbb05110/go.mod h1:m0MpNAwzfU5UDzcl9v0D8zg8gWTRqZa9RBIspLL5mdg= +golang.org/x/net v0.0.0-20210316092652-d523dce5a7f4/go.mod h1:RBQZq4jEuRlivfhVLdyRGr576XBO4/greRjx4P4O3yc= +golang.org/x/net v0.0.0-20210405180319-a5a99cb37ef4/go.mod h1:p54w0d4576C0XHj96bSt6lcn1PtDYWL6XObtHCRCNQM= +golang.org/x/net v0.0.0-20210525063256-abc453219eb5/go.mod h1:9nx3DQGgdP8bBQD5qxJ1jj9UTztislL4KSBs9R2vV5Y= +golang.org/x/net v0.0.0-20210614182718-04defd469f4e h1:XpT3nA5TvE525Ne3hInMh6+GETgn27Zfm9dxsThnX2Q= +golang.org/x/net v0.0.0-20210614182718-04defd469f4e/go.mod h1:9nx3DQGgdP8bBQD5qxJ1jj9UTztislL4KSBs9R2vV5Y= golang.org/x/oauth2 v0.0.0-20180821212333-d2e6202438be/go.mod h1:N/0e6XlmueqKjAGxoOufVs8QHGRruUQn6yWY3a++T0U= golang.org/x/oauth2 v0.0.0-20190226205417-e64efc72b421/go.mod h1:gOpvHmFTYa4IltrdGE7lF6nIHvwfUNPOp7c8zoXwtLw= -golang.org/x/oauth2 v0.0.0-20190604053449-0f29369cfe45 h1:SVwTIAaPC2U/AvvLNZ2a7OVsmBpC8L5BlwK1whH3hm0= golang.org/x/oauth2 v0.0.0-20190604053449-0f29369cfe45/go.mod h1:gOpvHmFTYa4IltrdGE7lF6nIHvwfUNPOp7c8zoXwtLw= +golang.org/x/oauth2 v0.0.0-20191202225959-858c2ad4c8b6/go.mod h1:gOpvHmFTYa4IltrdGE7lF6nIHvwfUNPOp7c8zoXwtLw= +golang.org/x/oauth2 v0.0.0-20200107190931-bf48bf16ab8d/go.mod h1:gOpvHmFTYa4IltrdGE7lF6nIHvwfUNPOp7c8zoXwtLw= +golang.org/x/oauth2 v0.0.0-20200902213428-5d25da1a8d43/go.mod h1:KelEdhl1UZF7XfJ4dDtk6s++YSgaE7mD/BuKKDLBl4A= +golang.org/x/oauth2 v0.0.0-20201109201403-9fd604954f58/go.mod h1:KelEdhl1UZF7XfJ4dDtk6s++YSgaE7mD/BuKKDLBl4A= +golang.org/x/oauth2 v0.0.0-20201208152858-08078c50e5b5/go.mod h1:KelEdhl1UZF7XfJ4dDtk6s++YSgaE7mD/BuKKDLBl4A= +golang.org/x/oauth2 v0.0.0-20210218202405-ba52d332ba99/go.mod h1:KelEdhl1UZF7XfJ4dDtk6s++YSgaE7mD/BuKKDLBl4A= +golang.org/x/oauth2 v0.0.0-20210220000619-9bb904979d93/go.mod h1:KelEdhl1UZF7XfJ4dDtk6s++YSgaE7mD/BuKKDLBl4A= +golang.org/x/oauth2 v0.0.0-20210313182246-cd4f82c27b84/go.mod h1:KelEdhl1UZF7XfJ4dDtk6s++YSgaE7mD/BuKKDLBl4A= +golang.org/x/oauth2 v0.0.0-20210413134643-5e61552d6c78/go.mod h1:KelEdhl1UZF7XfJ4dDtk6s++YSgaE7mD/BuKKDLBl4A= +golang.org/x/oauth2 v0.0.0-20210514164344-f6687ab2804c h1:pkQiBZBvdos9qq4wBAHqlzuZHEXo07pqV06ef90u1WI= +golang.org/x/oauth2 v0.0.0-20210514164344-f6687ab2804c/go.mod h1:KelEdhl1UZF7XfJ4dDtk6s++YSgaE7mD/BuKKDLBl4A= golang.org/x/sync v0.0.0-20180314180146-1d60e4601c6f/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM= golang.org/x/sync v0.0.0-20181108010431-42b317875d0f/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM= golang.org/x/sync v0.0.0-20181221193216-37e7f081c4d4/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM= golang.org/x/sync v0.0.0-20190227155943-e225da77a7e6/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM= golang.org/x/sync v0.0.0-20190423024810-112230192c58/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM= golang.org/x/sync v0.0.0-20190911185100-cd5d95a43a6e/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM= +golang.org/x/sync v0.0.0-20200317015054-43a5402ce75a/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM= +golang.org/x/sync v0.0.0-20200625203802-6e8e738ad208/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM= golang.org/x/sync v0.0.0-20201020160332-67f06af15bc9/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM= -golang.org/x/sync v0.0.0-20201207232520-09787c993a3a h1:DcqTD9SDLc+1P/r1EmRBwnVsrOwW+kk2vWf9n+1sGhs= golang.org/x/sync v0.0.0-20201207232520-09787c993a3a/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM= -golang.org/x/sys v0.0.0-20200826173525-f9321e4c35a6 h1:DvY3Zkh7KabQE/kfzMvYvKirSiguP9Q/veMtkYyf0o8= -golang.org/x/sys v0.0.0-20200826173525-f9321e4c35a6/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= +golang.org/x/sync v0.0.0-20210220032951-036812b2e83c h1:5KslGYwFpkhGh+Q16bwMP3cOontH8FOep7tGV86Y7SQ= +golang.org/x/sync v0.0.0-20210220032951-036812b2e83c/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM= +golang.org/x/sys v0.0.0-20170830134202-bb24a47a89ea/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY= +golang.org/x/sys v0.0.0-20180823144017-11551d06cbcc/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY= +golang.org/x/sys v0.0.0-20180905080454-ebe1bf3edb33/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY= +golang.org/x/sys v0.0.0-20180909124046-d0be0721c37e/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY= +golang.org/x/sys v0.0.0-20181026203630-95b1ffbd15a5/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY= +golang.org/x/sys v0.0.0-20181107165924-66b7b1311ac8/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY= +golang.org/x/sys v0.0.0-20181116152217-5ac8a444bdc5/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY= +golang.org/x/sys v0.0.0-20181205085412-a5c9d58dba9a/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY= +golang.org/x/sys v0.0.0-20190124100055-b90733256f2e/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY= +golang.org/x/sys v0.0.0-20190209173611-3b5209105503/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY= +golang.org/x/sys v0.0.0-20190215142949-d0b11bdaac8a/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY= +golang.org/x/sys v0.0.0-20190222072716-a9d3bda3a223/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY= +golang.org/x/sys v0.0.0-20190312061237-fead79001313/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= +golang.org/x/sys v0.0.0-20190321052220-f7bb7a8bee54/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= +golang.org/x/sys v0.0.0-20190412213103-97732733099d/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= +golang.org/x/sys v0.0.0-20190422165155-953cdadca894/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= +golang.org/x/sys v0.0.0-20190502145724-3ef323f4f1fd/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= +golang.org/x/sys v0.0.0-20190507160741-ecd444e8653b/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= +golang.org/x/sys v0.0.0-20190606165138-5da285871e9c/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= +golang.org/x/sys v0.0.0-20190616124812-15dcb6c0061f/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= +golang.org/x/sys v0.0.0-20190624142023-c5567b49c5d0/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= +golang.org/x/sys v0.0.0-20190726091711-fc99dfbffb4e/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= +golang.org/x/sys v0.0.0-20190826190057-c7b8b68b1456/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= +golang.org/x/sys v0.0.0-20190922100055-0a153f010e69/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= +golang.org/x/sys v0.0.0-20190924154521-2837fb4f24fe/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= +golang.org/x/sys v0.0.0-20191001151750-bb3f8db39f24/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= +golang.org/x/sys v0.0.0-20191005200804-aed5e4c7ecf9/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= +golang.org/x/sys v0.0.0-20191008105621-543471e840be/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= +golang.org/x/sys v0.0.0-20191026070338-33540a1f6037/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= +golang.org/x/sys v0.0.0-20191204072324-ce4227a45e2e/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= +golang.org/x/sys v0.0.0-20191228213918-04cbcbbfeed8/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= +golang.org/x/sys v0.0.0-20200106162015-b016eb3dc98e/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= +golang.org/x/sys v0.0.0-20200113162924-86b910548bc1/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= +golang.org/x/sys v0.0.0-20200116001909-b77594299b42/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= +golang.org/x/sys v0.0.0-20200122134326-e047566fdf82/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= +golang.org/x/sys v0.0.0-20200124204421-9fbb57f87de9/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= +golang.org/x/sys v0.0.0-20200202164722-d101bd2416d5/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= +golang.org/x/sys v0.0.0-20200212091648-12a6c2dcc1e4/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= +golang.org/x/sys v0.0.0-20200223170610-d5e6a3e2c0ae/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= +golang.org/x/sys v0.0.0-20200302150141-5c8b2ff67527/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= +golang.org/x/sys v0.0.0-20200323222414-85ca7c5b95cd/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= +golang.org/x/sys v0.0.0-20200331124033-c3d80250170d/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= +golang.org/x/sys v0.0.0-20200501052902-10377860bb8e/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= +golang.org/x/sys v0.0.0-20200511232937-7e40ca221e25/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= +golang.org/x/sys v0.0.0-20200515095857-1151b9dac4a9/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= +golang.org/x/sys v0.0.0-20200523222454-059865788121/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= +golang.org/x/sys v0.0.0-20200615200032-f1bc736245b1/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= +golang.org/x/sys v0.0.0-20200625212154-ddb9806d33ae/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= +golang.org/x/sys v0.0.0-20200803210538-64077c9b5642/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= +golang.org/x/sys v0.0.0-20200905004654-be1d3432aa8f/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= +golang.org/x/sys v0.0.0-20200930185726-fdedc70b468f/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= +golang.org/x/sys v0.0.0-20201119102817-f84b799fce68/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= +golang.org/x/sys v0.0.0-20201201145000-ef89a241ccb3/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= +golang.org/x/sys v0.0.0-20210104204734-6f8348627aad/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= +golang.org/x/sys v0.0.0-20210119212857-b64e53b001e4/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= +golang.org/x/sys v0.0.0-20210124154548-22da62e12c0c/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= +golang.org/x/sys v0.0.0-20210220050731-9a76102bfb43/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= +golang.org/x/sys v0.0.0-20210305230114-8fe3ee5dd75b/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= +golang.org/x/sys v0.0.0-20210315160823-c6e025ad8005/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= +golang.org/x/sys v0.0.0-20210320140829-1e4c9ba3b0c4/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= +golang.org/x/sys v0.0.0-20210330210617-4fbd30eecc44/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= +golang.org/x/sys v0.0.0-20210403161142-5e06dd20ab57/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= +golang.org/x/sys v0.0.0-20210412220455-f1c623a9e750/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= +golang.org/x/sys v0.0.0-20210423082822-04245dca01da/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= +golang.org/x/sys v0.0.0-20210510120138-977fb7262007/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= +golang.org/x/sys v0.0.0-20210603081109-ebe580a85c40/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= +golang.org/x/sys v0.0.0-20210630005230-0f9fa26af87c h1:F1jZWGFhYfh0Ci55sIpILtKKK8p3i2/krTr0H1rg74I= +golang.org/x/sys v0.0.0-20210630005230-0f9fa26af87c/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= +golang.org/x/term v0.0.0-20201126162022-7de9c90e9dd1/go.mod h1:bj7SfCRtBDWHUb9snDiAeCFNEtKQo2Wmx5Cou7ajbmo= golang.org/x/text v0.0.0-20160726164857-2910a502d2bf/go.mod h1:NqM8EUOU14njkJ3fqMW+pc6Ldnwhi/IjpwHt7yyuwOQ= +golang.org/x/text v0.0.0-20170915032832-14c0d48ead0c/go.mod h1:NqM8EUOU14njkJ3fqMW+pc6Ldnwhi/IjpwHt7yyuwOQ= golang.org/x/text v0.3.0/go.mod h1:NqM8EUOU14njkJ3fqMW+pc6Ldnwhi/IjpwHt7yyuwOQ= golang.org/x/text v0.3.1-0.20180807135948-17ff2d5776d2/go.mod h1:NqM8EUOU14njkJ3fqMW+pc6Ldnwhi/IjpwHt7yyuwOQ= golang.org/x/text v0.3.2/go.mod h1:bEr9sfX3Q8Zfm5fL9x+3itogRgK3+ptLWKqgva+5dAk= -golang.org/x/text v0.3.3 h1:cokOdA+Jmi5PJGXLlLllQSgYigAEfHXJAERHVMaCc2k= golang.org/x/text v0.3.3/go.mod h1:5Zoc/QRtKVWzQhOtBMvqHzDpF6irO9z98xDceosuGiQ= +golang.org/x/text v0.3.4/go.mod h1:5Zoc/QRtKVWzQhOtBMvqHzDpF6irO9z98xDceosuGiQ= +golang.org/x/text v0.3.5/go.mod h1:5Zoc/QRtKVWzQhOtBMvqHzDpF6irO9z98xDceosuGiQ= +golang.org/x/text v0.3.6 h1:aRYxNxv6iGQlyVaZmk6ZgYEDa+Jg18DxebPSrd6bg1M= +golang.org/x/text v0.3.6/go.mod h1:5Zoc/QRtKVWzQhOtBMvqHzDpF6irO9z98xDceosuGiQ= golang.org/x/time v0.0.0-20180412165947-fbb02b2291d2/go.mod h1:tRJNPiyCQ0inRvYxbN9jk5I+vvW/OXSQhTDSoE431IQ= golang.org/x/time v0.0.0-20181108054448-85acf8d2951c/go.mod h1:tRJNPiyCQ0inRvYxbN9jk5I+vvW/OXSQhTDSoE431IQ= -golang.org/x/time v0.0.0-20190308202827-9d24e82272b4 h1:SvFZT6jyqRaOeXpc5h/JSfZenJ2O330aBsf7JfSUXmQ= golang.org/x/time v0.0.0-20190308202827-9d24e82272b4/go.mod h1:tRJNPiyCQ0inRvYxbN9jk5I+vvW/OXSQhTDSoE431IQ= +golang.org/x/time v0.0.0-20191024005414-555d28b269f0 h1:/5xXl8Y5W96D+TtHSlonuFqGHIWVuyCkGJLwGh9JJFs= +golang.org/x/time v0.0.0-20191024005414-555d28b269f0/go.mod h1:tRJNPiyCQ0inRvYxbN9jk5I+vvW/OXSQhTDSoE431IQ= golang.org/x/tools v0.0.0-20180221164845-07fd8470d635/go.mod h1:n7NCudcB/nEzxVGmLbDWY5pfWTLqBcC2KZ6jyYvM4mQ= golang.org/x/tools v0.0.0-20180917221912-90fa682c2a6e/go.mod h1:n7NCudcB/nEzxVGmLbDWY5pfWTLqBcC2KZ6jyYvM4mQ= golang.org/x/tools v0.0.0-20181011042414-1f849cf54d09/go.mod h1:n7NCudcB/nEzxVGmLbDWY5pfWTLqBcC2KZ6jyYvM4mQ= golang.org/x/tools v0.0.0-20181030221726-6c7e314b6563/go.mod h1:n7NCudcB/nEzxVGmLbDWY5pfWTLqBcC2KZ6jyYvM4mQ= -golang.org/x/tools v0.0.0-20190114222345-bf090417da8b/go.mod h1:n7NCudcB/nEzxVGmLbDWY5pfWTLqBcC2KZ6jyYvM4mQ= golang.org/x/tools v0.0.0-20190125232054-d66bd3c5d5a6/go.mod h1:n7NCudcB/nEzxVGmLbDWY5pfWTLqBcC2KZ6jyYvM4mQ= golang.org/x/tools v0.0.0-20190206041539-40960b6deb8e/go.mod h1:n7NCudcB/nEzxVGmLbDWY5pfWTLqBcC2KZ6jyYvM4mQ= golang.org/x/tools v0.0.0-20190226205152-f727befe758c/go.mod h1:9Yl7xja0Znq3iFh3HoIrodX9oNMXvdceNzlUR8zjMvY= @@ -835,7 +998,6 @@ golang.org/x/tools v0.0.0-20190614205625-5aca471b1d59/go.mod h1:/rFqwRUd4F7ZHNgw golang.org/x/tools v0.0.0-20190617190820-da514acc4774/go.mod h1:/rFqwRUd4F7ZHNgwSSTFct+R/Kf4OFW1sUzUTQQTgfc= golang.org/x/tools v0.0.0-20190621195816-6e04913cbbac/go.mod h1:/rFqwRUd4F7ZHNgwSSTFct+R/Kf4OFW1sUzUTQQTgfc= golang.org/x/tools v0.0.0-20190624190245-7f2218787638/go.mod h1:/rFqwRUd4F7ZHNgwSSTFct+R/Kf4OFW1sUzUTQQTgfc= -golang.org/x/tools v0.0.0-20190624222133-a101b041ded4/go.mod h1:/rFqwRUd4F7ZHNgwSSTFct+R/Kf4OFW1sUzUTQQTgfc= golang.org/x/tools v0.0.0-20190628153133-6cdbf07be9d0/go.mod h1:/rFqwRUd4F7ZHNgwSSTFct+R/Kf4OFW1sUzUTQQTgfc= golang.org/x/tools v0.0.0-20190816200558-6889da9d5479/go.mod h1:b+2E5dAYhXwXZwtnZ6UAqBI28+e2cm9otk0dWdXHAEo= golang.org/x/tools v0.0.0-20190907020128-2ca718005c18/go.mod h1:b+2E5dAYhXwXZwtnZ6UAqBI28+e2cm9otk0dWdXHAEo= @@ -843,9 +1005,42 @@ golang.org/x/tools v0.0.0-20190911174233-4f2ddba30aff/go.mod h1:b+2E5dAYhXwXZwtn golang.org/x/tools v0.0.0-20190920225731-5eefd052ad72/go.mod h1:b+2E5dAYhXwXZwtnZ6UAqBI28+e2cm9otk0dWdXHAEo= golang.org/x/tools v0.0.0-20191012152004-8de300cfc20a/go.mod h1:b+2E5dAYhXwXZwtnZ6UAqBI28+e2cm9otk0dWdXHAEo= golang.org/x/tools v0.0.0-20191112195655-aa38f8e97acc/go.mod h1:b+2E5dAYhXwXZwtnZ6UAqBI28+e2cm9otk0dWdXHAEo= +golang.org/x/tools v0.0.0-20191113191852-77e3bb0ad9e7/go.mod h1:b+2E5dAYhXwXZwtnZ6UAqBI28+e2cm9otk0dWdXHAEo= +golang.org/x/tools v0.0.0-20191115202509-3a792d9c32b2/go.mod h1:b+2E5dAYhXwXZwtnZ6UAqBI28+e2cm9otk0dWdXHAEo= golang.org/x/tools v0.0.0-20191119224855-298f0cb1881e/go.mod h1:b+2E5dAYhXwXZwtnZ6UAqBI28+e2cm9otk0dWdXHAEo= -golang.org/x/tools v0.0.0-20201202200335-bef1c476418a h1:TYqOq/v+Ri5aADpldxXOj6PmvcPMOJbLjdALzZDQT2M= -golang.org/x/tools v0.0.0-20201202200335-bef1c476418a/go.mod h1:emZCQorbCU4vsT4fOWvOPXz4eW1wZW4PmDk9uLelYpA= +golang.org/x/tools v0.0.0-20191125144606-a911d9008d1f/go.mod h1:b+2E5dAYhXwXZwtnZ6UAqBI28+e2cm9otk0dWdXHAEo= +golang.org/x/tools v0.0.0-20191130070609-6e064ea0cf2d/go.mod h1:b+2E5dAYhXwXZwtnZ6UAqBI28+e2cm9otk0dWdXHAEo= +golang.org/x/tools v0.0.0-20191216173652-a0e659d51361/go.mod h1:TB2adYChydJhpapKDTa4BR/hXlZSLoq2Wpct/0txZ28= +golang.org/x/tools v0.0.0-20191227053925-7b8e75db28f4/go.mod h1:TB2adYChydJhpapKDTa4BR/hXlZSLoq2Wpct/0txZ28= +golang.org/x/tools v0.0.0-20200117161641-43d50277825c/go.mod h1:TB2adYChydJhpapKDTa4BR/hXlZSLoq2Wpct/0txZ28= +golang.org/x/tools v0.0.0-20200122220014-bf1340f18c4a/go.mod h1:TB2adYChydJhpapKDTa4BR/hXlZSLoq2Wpct/0txZ28= +golang.org/x/tools v0.0.0-20200130002326-2f3ba24bd6e7/go.mod h1:TB2adYChydJhpapKDTa4BR/hXlZSLoq2Wpct/0txZ28= +golang.org/x/tools v0.0.0-20200204074204-1cc6d1ef6c74/go.mod h1:TB2adYChydJhpapKDTa4BR/hXlZSLoq2Wpct/0txZ28= +golang.org/x/tools v0.0.0-20200207183749-b753a1ba74fa/go.mod h1:TB2adYChydJhpapKDTa4BR/hXlZSLoq2Wpct/0txZ28= +golang.org/x/tools v0.0.0-20200212150539-ea181f53ac56/go.mod h1:TB2adYChydJhpapKDTa4BR/hXlZSLoq2Wpct/0txZ28= +golang.org/x/tools v0.0.0-20200224181240-023911ca70b2/go.mod h1:TB2adYChydJhpapKDTa4BR/hXlZSLoq2Wpct/0txZ28= +golang.org/x/tools v0.0.0-20200227222343-706bc42d1f0d/go.mod h1:TB2adYChydJhpapKDTa4BR/hXlZSLoq2Wpct/0txZ28= +golang.org/x/tools v0.0.0-20200304193943-95d2e580d8eb/go.mod h1:o4KQGtdN14AW+yjsvvwRTJJuXz8XRtIHtEnmAXLyFUw= +golang.org/x/tools v0.0.0-20200312045724-11d5b4c81c7d/go.mod h1:o4KQGtdN14AW+yjsvvwRTJJuXz8XRtIHtEnmAXLyFUw= +golang.org/x/tools v0.0.0-20200331025713-a30bf2db82d4/go.mod h1:Sl4aGygMT6LrqrWclx+PTx3U+LnKx/seiNR+3G19Ar8= +golang.org/x/tools v0.0.0-20200501065659-ab2804fb9c9d/go.mod h1:EkVYQZoAsY45+roYkvgYkIh4xh/qjgUK9TdY2XT94GE= +golang.org/x/tools v0.0.0-20200512131952-2bc93b1c0c88/go.mod h1:EkVYQZoAsY45+roYkvgYkIh4xh/qjgUK9TdY2XT94GE= +golang.org/x/tools v0.0.0-20200515010526-7d3b6ebf133d/go.mod h1:EkVYQZoAsY45+roYkvgYkIh4xh/qjgUK9TdY2XT94GE= +golang.org/x/tools v0.0.0-20200618134242-20370b0cb4b2/go.mod h1:EkVYQZoAsY45+roYkvgYkIh4xh/qjgUK9TdY2XT94GE= +golang.org/x/tools v0.0.0-20200619180055-7c47624df98f/go.mod h1:EkVYQZoAsY45+roYkvgYkIh4xh/qjgUK9TdY2XT94GE= +golang.org/x/tools v0.0.0-20200729194436-6467de6f59a7/go.mod h1:njjCfa9FT2d7l9Bc6FUM5FLjQPp3cFF28FI3qnDFljA= +golang.org/x/tools v0.0.0-20200804011535-6c149bb5ef0d/go.mod h1:njjCfa9FT2d7l9Bc6FUM5FLjQPp3cFF28FI3qnDFljA= +golang.org/x/tools v0.0.0-20200825202427-b303f430e36d/go.mod h1:njjCfa9FT2d7l9Bc6FUM5FLjQPp3cFF28FI3qnDFljA= +golang.org/x/tools v0.0.0-20200904185747-39188db58858/go.mod h1:Cj7w3i3Rnn0Xh82ur9kSqwfTHTeVxaDqrfMjpcNT6bE= +golang.org/x/tools v0.0.0-20201110124207-079ba7bd75cd/go.mod h1:emZCQorbCU4vsT4fOWvOPXz4eW1wZW4PmDk9uLelYpA= +golang.org/x/tools v0.0.0-20201201161351-ac6f37ff4c2a/go.mod h1:emZCQorbCU4vsT4fOWvOPXz4eW1wZW4PmDk9uLelYpA= +golang.org/x/tools v0.0.0-20201208233053-a543418bbed2/go.mod h1:emZCQorbCU4vsT4fOWvOPXz4eW1wZW4PmDk9uLelYpA= +golang.org/x/tools v0.0.0-20210105154028-b0ab187a4818/go.mod h1:emZCQorbCU4vsT4fOWvOPXz4eW1wZW4PmDk9uLelYpA= +golang.org/x/tools v0.0.0-20210106214847-113979e3529a/go.mod h1:emZCQorbCU4vsT4fOWvOPXz4eW1wZW4PmDk9uLelYpA= +golang.org/x/tools v0.1.0/go.mod h1:xkSsbof2nBLbhDlRMhhhyNLN/zl3eTqcnHD5viDpcZ0= +golang.org/x/tools v0.1.2/go.mod h1:o0xws9oXOQQZyjljx8fwUC0k7L1pTE6eaCbjGeHmOkk= +golang.org/x/tools v0.1.4 h1:cVngSRcfgyZCzys3KYOpCFa+4dqX/Oub9tAq00ttGVs= +golang.org/x/tools v0.1.4/go.mod h1:o0xws9oXOQQZyjljx8fwUC0k7L1pTE6eaCbjGeHmOkk= golang.org/x/xerrors v0.0.0-20190717185122-a985d3407aa7/go.mod h1:I/5z698sn9Ka8TeJc9MKroUUfqBBauWjQqLJ2OPfmY0= golang.org/x/xerrors v0.0.0-20191011141410-1b5146add898/go.mod h1:I/5z698sn9Ka8TeJc9MKroUUfqBBauWjQqLJ2OPfmY0= golang.org/x/xerrors v0.0.0-20191204190536-9bdfabe68543/go.mod h1:I/5z698sn9Ka8TeJc9MKroUUfqBBauWjQqLJ2OPfmY0= @@ -858,13 +1053,33 @@ google.golang.org/api v0.4.0/go.mod h1:8k5glujaEP+g9n7WNsDg8QP6cUVNI86fCNMcbazEt google.golang.org/api v0.7.0/go.mod h1:WtwebWUNSVBH/HAw79HIFXZNqEvBhG+Ra+ax0hx3E3M= google.golang.org/api v0.8.0/go.mod h1:o4eAsZoiT+ibD93RtjEohWalFOjRDx6CVaqeizhEnKg= google.golang.org/api v0.9.0/go.mod h1:o4eAsZoiT+ibD93RtjEohWalFOjRDx6CVaqeizhEnKg= -google.golang.org/api v0.13.0 h1:Q3Ui3V3/CVinFWFiW39Iw0kMuVrRzYX0wN6OPFp0lTA= google.golang.org/api v0.13.0/go.mod h1:iLdEw5Ide6rF15KTC1Kkl0iskquN2gFfn9o9XIsbkAI= +google.golang.org/api v0.14.0/go.mod h1:iLdEw5Ide6rF15KTC1Kkl0iskquN2gFfn9o9XIsbkAI= +google.golang.org/api v0.15.0/go.mod h1:iLdEw5Ide6rF15KTC1Kkl0iskquN2gFfn9o9XIsbkAI= +google.golang.org/api v0.17.0/go.mod h1:BwFmGc8tA3vsd7r/7kR8DY7iEEGSU04BFxCo5jP/sfE= +google.golang.org/api v0.18.0/go.mod h1:BwFmGc8tA3vsd7r/7kR8DY7iEEGSU04BFxCo5jP/sfE= +google.golang.org/api v0.19.0/go.mod h1:BwFmGc8tA3vsd7r/7kR8DY7iEEGSU04BFxCo5jP/sfE= +google.golang.org/api v0.20.0/go.mod h1:BwFmGc8tA3vsd7r/7kR8DY7iEEGSU04BFxCo5jP/sfE= +google.golang.org/api v0.22.0/go.mod h1:BwFmGc8tA3vsd7r/7kR8DY7iEEGSU04BFxCo5jP/sfE= +google.golang.org/api v0.24.0/go.mod h1:lIXQywCXRcnZPGlsd8NbLnOjtAoL6em04bJ9+z0MncE= +google.golang.org/api v0.28.0/go.mod h1:lIXQywCXRcnZPGlsd8NbLnOjtAoL6em04bJ9+z0MncE= +google.golang.org/api v0.29.0/go.mod h1:Lcubydp8VUV7KeIHD9z2Bys/sm/vGKnG1UHuDBSrHWM= +google.golang.org/api v0.30.0/go.mod h1:QGmEvQ87FHZNiUVJkT14jQNYJ4ZJjdRF23ZXz5138Fc= +google.golang.org/api v0.35.0/go.mod h1:/XrVsuzM0rZmrsbjJutiuftIzeuTQcEeaYcSk/mQ1dg= +google.golang.org/api v0.36.0/go.mod h1:+z5ficQTmoYpPn8LCUNVpK5I7hwkpjbcgqA7I34qYtE= +google.golang.org/api v0.40.0/go.mod h1:fYKFpnQN0DsDSKRVRcQSDQNtqWPfM9i+zNPxepjRCQ8= +google.golang.org/api v0.41.0/go.mod h1:RkxM5lITDfTzmyKFPt+wGrCJbVfniCr2ool8kTBzRTU= +google.golang.org/api v0.43.0/go.mod h1:nQsDGjRXMo4lvh5hP0TKqF244gqhGcr/YSIykhUk/94= +google.golang.org/api v0.45.0 h1:pqMffJFLBVUDIoYsHcqtxgQVTsmxMDpYLOc5MT4Jrww= +google.golang.org/api v0.45.0/go.mod h1:ISLIJCedJolbZvDfAk+Ctuq5hf+aJ33WgtUsfyFoLXA= google.golang.org/appengine v1.1.0/go.mod h1:EbEs0AVv82hx2wNQdGPgUI5lhzA/G0D9YwlJXL52JkM= google.golang.org/appengine v1.4.0/go.mod h1:xpcJRLb0r/rnEns0DIKYYv+WjYCduHsrkT7/EB5XEv4= google.golang.org/appengine v1.5.0/go.mod h1:xpcJRLb0r/rnEns0DIKYYv+WjYCduHsrkT7/EB5XEv4= -google.golang.org/appengine v1.6.1 h1:QzqyMA1tlu6CgqCDUtU9V+ZKhLFT2dkJuANu5QaxI3I= google.golang.org/appengine v1.6.1/go.mod h1:i06prIuMbXzDqacNJfV5OdTW448YApPu5ww/cMBSeb0= +google.golang.org/appengine v1.6.5/go.mod h1:8WjMMxjGQR8xUklV/ARdw2HLXBOI7O7uCIDZVag1xfc= +google.golang.org/appengine v1.6.6/go.mod h1:8WjMMxjGQR8xUklV/ARdw2HLXBOI7O7uCIDZVag1xfc= +google.golang.org/appengine v1.6.7 h1:FZR1q0exgwxzPzp/aF+VccGrSfxfPpkBqjIIEq3ru6c= +google.golang.org/appengine v1.6.7/go.mod h1:8WjMMxjGQR8xUklV/ARdw2HLXBOI7O7uCIDZVag1xfc= google.golang.org/genproto v0.0.0-20180817151627-c66870c02cf8/go.mod h1:JiN7NxoALGmiZfu7CAH4rXhgtRTLTxftemlI0sWmxmc= google.golang.org/genproto v0.0.0-20190307195333-5fe7a883aa19/go.mod h1:VzzqZJRnGkLBvHegQrXjBqPurQTc5/KpmUdxsrq26oE= google.golang.org/genproto v0.0.0-20190418145605-e7d98fc518a7/go.mod h1:VzzqZJRnGkLBvHegQrXjBqPurQTc5/KpmUdxsrq26oE= @@ -874,20 +1089,66 @@ google.golang.org/genproto v0.0.0-20190626174449-989357319d63/go.mod h1:z3L6/3dT google.golang.org/genproto v0.0.0-20190801165951-fa694d86fc64/go.mod h1:DMBHOl98Agz4BDEuKkezgsaosCRResVns1a3J2ZsMNc= google.golang.org/genproto v0.0.0-20190819201941-24fa4b261c55/go.mod h1:DMBHOl98Agz4BDEuKkezgsaosCRResVns1a3J2ZsMNc= google.golang.org/genproto v0.0.0-20190911173649-1774047e7e51/go.mod h1:IbNlFCBrqXvoKpeg0TB2l7cyZUmoaFKYIwrEpbDKLA8= -google.golang.org/genproto v0.0.0-20191108220845-16a3f7862a1a h1:Ob5/580gVHBJZgXnff1cZDbG+xLtMVE5mDRTe+nIsX4= google.golang.org/genproto v0.0.0-20191108220845-16a3f7862a1a/go.mod h1:n3cpQtvxv34hfy77yVDNjmbRyujviMdxYliBSkLhpCc= -google.golang.org/grpc v1.19.0/go.mod h1:mqu4LbDTu4XGKhr4mRzUsmM4RtVoemTSY81AxZiDr8c= -google.golang.org/grpc v1.20.1/go.mod h1:10oTOabMzJvdu6/UiuZezV6QK5dSlG84ov/aaiqXj38= -google.golang.org/grpc v1.21.1/go.mod h1:oYelfM1adQP15Ek0mdvEgi9Df8B9CZIaU1084ijfRaM= -google.golang.org/grpc v1.23.0/go.mod h1:Y5yQAOtifL1yxbo5wqy6BxZv8vAUGQwXBOALyacEbxg= -google.golang.org/grpc v1.23.1/go.mod h1:Y5yQAOtifL1yxbo5wqy6BxZv8vAUGQwXBOALyacEbxg= -google.golang.org/grpc v1.25.1/go.mod h1:c3i+UQWmh7LiEpx4sFZnkU36qjEYZ0imhYfXVyQciAY= -google.golang.org/grpc v1.29.1 h1:EC2SB8S04d2r73uptxphDSUG+kTKVgjRPF+N3xpxRB4= -google.golang.org/grpc v1.29.1/go.mod h1:itym6AZVZYACWQqET3MqgPpjcuV5QH3BxFS3IjizoKk= +google.golang.org/genproto v0.0.0-20191115194625-c23dd37a84c9/go.mod h1:n3cpQtvxv34hfy77yVDNjmbRyujviMdxYliBSkLhpCc= +google.golang.org/genproto v0.0.0-20191216164720-4f79533eabd1/go.mod h1:n3cpQtvxv34hfy77yVDNjmbRyujviMdxYliBSkLhpCc= +google.golang.org/genproto v0.0.0-20191230161307-f3c370f40bfb/go.mod h1:n3cpQtvxv34hfy77yVDNjmbRyujviMdxYliBSkLhpCc= +google.golang.org/genproto v0.0.0-20200115191322-ca5a22157cba/go.mod h1:n3cpQtvxv34hfy77yVDNjmbRyujviMdxYliBSkLhpCc= +google.golang.org/genproto v0.0.0-20200122232147-0452cf42e150/go.mod h1:n3cpQtvxv34hfy77yVDNjmbRyujviMdxYliBSkLhpCc= +google.golang.org/genproto v0.0.0-20200204135345-fa8e72b47b90/go.mod h1:GmwEX6Z4W5gMy59cAlVYjN9JhxgbQH6Gn+gFDQe2lzA= +google.golang.org/genproto v0.0.0-20200212174721-66ed5ce911ce/go.mod h1:55QSHmfGQM9UVYDPBsyGGes0y52j32PQ3BqQfXhyH3c= +google.golang.org/genproto v0.0.0-20200224152610-e50cd9704f63/go.mod h1:55QSHmfGQM9UVYDPBsyGGes0y52j32PQ3BqQfXhyH3c= +google.golang.org/genproto v0.0.0-20200228133532-8c2c7df3a383/go.mod h1:55QSHmfGQM9UVYDPBsyGGes0y52j32PQ3BqQfXhyH3c= +google.golang.org/genproto v0.0.0-20200305110556-506484158171/go.mod h1:55QSHmfGQM9UVYDPBsyGGes0y52j32PQ3BqQfXhyH3c= +google.golang.org/genproto v0.0.0-20200312145019-da6875a35672/go.mod h1:55QSHmfGQM9UVYDPBsyGGes0y52j32PQ3BqQfXhyH3c= +google.golang.org/genproto v0.0.0-20200331122359-1ee6d9798940/go.mod h1:55QSHmfGQM9UVYDPBsyGGes0y52j32PQ3BqQfXhyH3c= +google.golang.org/genproto v0.0.0-20200430143042-b979b6f78d84/go.mod h1:55QSHmfGQM9UVYDPBsyGGes0y52j32PQ3BqQfXhyH3c= +google.golang.org/genproto v0.0.0-20200511104702-f5ebc3bea380/go.mod h1:55QSHmfGQM9UVYDPBsyGGes0y52j32PQ3BqQfXhyH3c= +google.golang.org/genproto v0.0.0-20200513103714-09dca8ec2884/go.mod h1:55QSHmfGQM9UVYDPBsyGGes0y52j32PQ3BqQfXhyH3c= +google.golang.org/genproto v0.0.0-20200515170657-fc4c6c6a6587/go.mod h1:YsZOwe1myG/8QRHRsmBRE1LrgQY60beZKjly0O1fX9U= +google.golang.org/genproto v0.0.0-20200526211855-cb27e3aa2013/go.mod h1:NbSheEEYHJ7i3ixzK3sjbqSGDJWnxyFXZblF3eUsNvo= +google.golang.org/genproto v0.0.0-20200618031413-b414f8b61790/go.mod h1:jDfRM7FcilCzHH/e9qn6dsT145K34l5v+OpcnNgKAAA= +google.golang.org/genproto v0.0.0-20200729003335-053ba62fc06f/go.mod h1:FWY/as6DDZQgahTzZj3fqbO1CbirC29ZNUFHwi0/+no= +google.golang.org/genproto v0.0.0-20200804131852-c06518451d9c/go.mod h1:FWY/as6DDZQgahTzZj3fqbO1CbirC29ZNUFHwi0/+no= +google.golang.org/genproto v0.0.0-20200806141610-86f49bd18e98/go.mod h1:FWY/as6DDZQgahTzZj3fqbO1CbirC29ZNUFHwi0/+no= +google.golang.org/genproto v0.0.0-20200825200019-8632dd797987/go.mod h1:FWY/as6DDZQgahTzZj3fqbO1CbirC29ZNUFHwi0/+no= +google.golang.org/genproto v0.0.0-20200904004341-0bd0a958aa1d/go.mod h1:FWY/as6DDZQgahTzZj3fqbO1CbirC29ZNUFHwi0/+no= +google.golang.org/genproto v0.0.0-20201109203340-2640f1f9cdfb/go.mod h1:FWY/as6DDZQgahTzZj3fqbO1CbirC29ZNUFHwi0/+no= +google.golang.org/genproto v0.0.0-20201201144952-b05cb90ed32e/go.mod h1:FWY/as6DDZQgahTzZj3fqbO1CbirC29ZNUFHwi0/+no= +google.golang.org/genproto v0.0.0-20201210142538-e3217bee35cc/go.mod h1:FWY/as6DDZQgahTzZj3fqbO1CbirC29ZNUFHwi0/+no= +google.golang.org/genproto v0.0.0-20201214200347-8c77b98c765d/go.mod h1:FWY/as6DDZQgahTzZj3fqbO1CbirC29ZNUFHwi0/+no= +google.golang.org/genproto v0.0.0-20210222152913-aa3ee6e6a81c/go.mod h1:FWY/as6DDZQgahTzZj3fqbO1CbirC29ZNUFHwi0/+no= +google.golang.org/genproto v0.0.0-20210303154014-9728d6b83eeb/go.mod h1:FWY/as6DDZQgahTzZj3fqbO1CbirC29ZNUFHwi0/+no= +google.golang.org/genproto v0.0.0-20210310155132-4ce2db91004e/go.mod h1:FWY/as6DDZQgahTzZj3fqbO1CbirC29ZNUFHwi0/+no= +google.golang.org/genproto v0.0.0-20210319143718-93e7006c17a6/go.mod h1:FWY/as6DDZQgahTzZj3fqbO1CbirC29ZNUFHwi0/+no= +google.golang.org/genproto v0.0.0-20210402141018-6c239bbf2bb1/go.mod h1:9lPAdzaEmUacj36I+k7YKbEc5CXzPIeORRgDAUOu28A= +google.golang.org/genproto v0.0.0-20210413151531-c14fb6ef47c3/go.mod h1:P3QM42oQyzQSnHPnZ/vqoCdDmzH28fzWByN9asMeM8A= +google.golang.org/genproto v0.0.0-20210513213006-bf773b8c8384/go.mod h1:P3QM42oQyzQSnHPnZ/vqoCdDmzH28fzWByN9asMeM8A= +google.golang.org/genproto v0.0.0-20210602131652-f16073e35f0c/go.mod h1:UODoCrxHCcBojKKwX1terBiRUaqAsFqJiF615XL43r0= +google.golang.org/genproto v0.0.0-20210701191553-46259e63a0a9 h1:HBPuvo39L0DgfVn9eHR3ki/RjZoUFWa+em77e7KFDfs= +google.golang.org/genproto v0.0.0-20210701191553-46259e63a0a9/go.mod h1:yiaVoXHpRzHGyxV3o4DktVWY4mSUErTKaeEOq6C3t3U= +google.golang.org/grpc v1.37.0 h1:uSZWeQJX5j11bIQ4AJoj+McDBo29cY1MCoC1wO3ts+c= +google.golang.org/grpc v1.37.0/go.mod h1:NREThFqKR1f3iQ6oBuvc5LadQuXVGo9rkm5ZGrQdJfM= +google.golang.org/grpc/cmd/protoc-gen-go-grpc v1.1.0 h1:M1YKkFIboKNieVO5DLUEVzQfGwJD30Nv2jfUgzb5UcE= +google.golang.org/grpc/cmd/protoc-gen-go-grpc v1.1.0/go.mod h1:6Kw0yEErY5E/yWrBtf03jp27GLLJujG4z/JK95pnjjw= +google.golang.org/grpc/examples v0.0.0-20210430044426-28078834f35b h1:D/GTYPo6I1oEo08Bfpuj3xl5XE+UGHj7//5fVyKxhsQ= +google.golang.org/grpc/examples v0.0.0-20210430044426-28078834f35b/go.mod h1:Ly7ZA/ARzg8fnPU9TyZIxoz33sEUuWX7txiqs8lPTgE= +google.golang.org/protobuf v0.0.0-20200109180630-ec00e32a8dfd/go.mod h1:DFci5gLYBciE7Vtevhsrf46CRTquxDuWsQurQQe4oz8= +google.golang.org/protobuf v0.0.0-20200221191635-4d8936d0db64/go.mod h1:kwYJMbMJ01Woi6D6+Kah6886xMZcty6N08ah7+eCXa0= +google.golang.org/protobuf v0.0.0-20200228230310-ab0ca4ff8a60/go.mod h1:cfTl7dwQJ+fmap5saPgwCLgHXTUD7jkjRqWcaiX5VyM= +google.golang.org/protobuf v1.20.1-0.20200309200217-e05f789c0967/go.mod h1:A+miEFZTKqfCUM6K7xSMQL9OKL/b6hQv+e19PK+JZNE= +google.golang.org/protobuf v1.21.0/go.mod h1:47Nbq4nVaFHyn7ilMalzfO3qCViNmqZ2kzikPIcrTAo= +google.golang.org/protobuf v1.22.0/go.mod h1:EGpADcykh3NcUnDUJcl1+ZksZNG86OlYog2l/sGQquU= +google.golang.org/protobuf v1.23.0/go.mod h1:EGpADcykh3NcUnDUJcl1+ZksZNG86OlYog2l/sGQquU= +google.golang.org/protobuf v1.23.1-0.20200526195155-81db48ad09cc/go.mod h1:EGpADcykh3NcUnDUJcl1+ZksZNG86OlYog2l/sGQquU= +google.golang.org/protobuf v1.24.0/go.mod h1:r/3tXBNzIEhYS9I1OUVjXDlt8tc493IdKGjtUeSXeh4= +google.golang.org/protobuf v1.25.0/go.mod h1:9JNX74DMeImyA3h4bdi1ymwjUzf21/xIlbajtzgsN7c= +google.golang.org/protobuf v1.26.0-rc.1/go.mod h1:jlhhOSvTdKEhbULTjvd4ARK9grFBp09yW+WbY/TyQbw= +google.golang.org/protobuf v1.26.0/go.mod h1:9q0QmTI4eRPtz6boOQmLYwt+qCgq0jsYwAQnmE0givc= +google.golang.org/protobuf v1.27.1 h1:SnqbnDw1V7RiZcXPx5MEeqPv2s79L9i7BJUlG/+RurQ= +google.golang.org/protobuf v1.27.1/go.mod h1:9q0QmTI4eRPtz6boOQmLYwt+qCgq0jsYwAQnmE0givc= gopkg.in/DataDog/dd-trace-go.v1 v1.17.0 h1:j9vAp9Re9bbtA/QFehkJpNba/6W2IbJtNuXZophCa54= gopkg.in/DataDog/dd-trace-go.v1 v1.17.0/go.mod h1:DVp8HmDh8PuTu2Z0fVVlBsyWaC++fzwVCaGWylTe3tg= -gopkg.in/airbrake/gobrake.v2 v2.0.9/go.mod h1:/h5ZAUhDkGaJfjzjKLSjv6zCL6O0LLBxU4K+aSYdM/U= -gopkg.in/alecthomas/kingpin.v2 v2.2.6 h1:jMFz6MfLP0/4fUyZle81rXUoxOBFi19VUFKVDOQfozc= gopkg.in/alecthomas/kingpin.v2 v2.2.6/go.mod h1:FMv+mEhP44yOT+4EoQTLFTRgOQ1FBLkstjWtayDeSgw= gopkg.in/asn1-ber.v1 v1.0.0-20181015200546-f715ec2f112d h1:TxyelI5cVkbREznMhfzycHdkp5cLA7DpE+GKjSslYhM= gopkg.in/asn1-ber.v1 v1.0.0-20181015200546-f715ec2f112d/go.mod h1:cuepJuh7vyXfUyUwEgHQXw849cJrilpS5NeIjOWESAw= @@ -901,7 +1162,6 @@ gopkg.in/fsnotify.v1 v1.4.7 h1:xOHLXZwVvI9hhs+cLKq5+I5onOuwQLhQwiu63xxlHs4= gopkg.in/fsnotify.v1 v1.4.7/go.mod h1:Tz8NjZHkW78fSQdbUxIjBTcgA1z1m8ZHf0WmKUhAMys= gopkg.in/gcfg.v1 v1.2.3 h1:m8OOJ4ccYHnx2f4gQwpno8nAX5OGOh7RLaaz0pj3Ogs= gopkg.in/gcfg.v1 v1.2.3/go.mod h1:yesOnuUOFQAhST5vPY4nbZsb/huCgGGXlipJsBn0b3o= -gopkg.in/gemnasium/logrus-airbrake-hook.v2 v2.1.2/go.mod h1:Xk6kEKp8OKb+X14hQBKWaSkCsqBpgog8nAV2xsGOxlo= gopkg.in/inf.v0 v0.9.1 h1:73M5CoZyi3ZLMOyDlQh031Cx6N9NDJ2Vvfl76EDAgDc= gopkg.in/inf.v0 v0.9.1/go.mod h1:cWUDdTG/fYaXco+Dcufb5Vnc6Gp2YChqWtbxRZE0mXw= gopkg.in/ini.v1 v1.41.0/go.mod h1:pNLf8WUiyNEtQjuu5G5vTm06TEv9tsIgeAvK8hOrP4k= @@ -919,21 +1179,26 @@ gopkg.in/warnings.v0 v0.1.2/go.mod h1:jksf8JmL6Qr/oQM2OXTHunEvvTAsrWBLb6OOjuVWRN gopkg.in/yaml.v2 v2.0.0-20170812160011-eb3733d160e7/go.mod h1:JAlM8MvJe8wmxCU4Bli9HhUf9+ttbYbLASfIpnQbh74= gopkg.in/yaml.v2 v2.2.1/go.mod h1:hI93XBmqTisBFMUTm0b8Fm+jr3Dg1NNxqwp+5A1VGuI= gopkg.in/yaml.v2 v2.2.2/go.mod h1:hI93XBmqTisBFMUTm0b8Fm+jr3Dg1NNxqwp+5A1VGuI= +gopkg.in/yaml.v2 v2.2.3/go.mod h1:hI93XBmqTisBFMUTm0b8Fm+jr3Dg1NNxqwp+5A1VGuI= gopkg.in/yaml.v2 v2.2.4/go.mod h1:hI93XBmqTisBFMUTm0b8Fm+jr3Dg1NNxqwp+5A1VGuI= gopkg.in/yaml.v2 v2.2.5/go.mod h1:hI93XBmqTisBFMUTm0b8Fm+jr3Dg1NNxqwp+5A1VGuI= gopkg.in/yaml.v2 v2.2.8/go.mod h1:hI93XBmqTisBFMUTm0b8Fm+jr3Dg1NNxqwp+5A1VGuI= +gopkg.in/yaml.v2 v2.3.0/go.mod h1:hI93XBmqTisBFMUTm0b8Fm+jr3Dg1NNxqwp+5A1VGuI= gopkg.in/yaml.v2 v2.4.0 h1:D8xgwECY7CYvx+Y2n4sBz93Jn9JRvxdiyyo8CTfuKaY= gopkg.in/yaml.v2 v2.4.0/go.mod h1:RDklbk79AGWmwhnvt/jBztapEOGDOx6ZbXqjP6csGnQ= +gopkg.in/yaml.v3 v3.0.0-20200313102051-9f266ea9e77c/go.mod h1:K4uyk7z7BCEPqu6E+C64Yfv1cQ7kz7rIZviUmN+EgEM= +gopkg.in/yaml.v3 v3.0.0-20210107192922-496545a6307b h1:h8qDotaEPuJATrMmW04NCwg7v22aHH28wwpauUhK9Oo= +gopkg.in/yaml.v3 v3.0.0-20210107192922-496545a6307b/go.mod h1:K4uyk7z7BCEPqu6E+C64Yfv1cQ7kz7rIZviUmN+EgEM= gotest.tools v2.2.0+incompatible h1:VsBPFP1AI068pPrMxtb/S8Zkgf9xEmTLJjfM+P5UIEo= gotest.tools v2.2.0+incompatible/go.mod h1:DsYFclhRJ6vuDpmuTbkuFWG+y2sxOXAzmJt81HFBacw= -gotest.tools/v3 v3.0.2 h1:kG1BFyqVHuQoVQiR1bWGnfz/fmHvvuiSPIV7rvl360E= -gotest.tools/v3 v3.0.2/go.mod h1:3SzNCllyD9/Y+b5r9JIKQ474KzkZyqLqEfYqMsX94Bk= honnef.co/go/tools v0.0.0-20190102054323-c2f93a96b099/go.mod h1:rf3lG4BRIbNafJWhAfAdb/ePZxsR/4RtNHQocxwk9r4= honnef.co/go/tools v0.0.0-20190106161140-3f1c8253044a/go.mod h1:rf3lG4BRIbNafJWhAfAdb/ePZxsR/4RtNHQocxwk9r4= honnef.co/go/tools v0.0.0-20190418001031-e561f6794a2a/go.mod h1:rf3lG4BRIbNafJWhAfAdb/ePZxsR/4RtNHQocxwk9r4= honnef.co/go/tools v0.0.0-20190523083050-ea95bdfd59fc/go.mod h1:rf3lG4BRIbNafJWhAfAdb/ePZxsR/4RtNHQocxwk9r4= -honnef.co/go/tools v0.0.1-2019.2.3 h1:3JgtbtFHMiCmsznwGVTUWbgGov+pVqnlf1dEJTNAXeM= honnef.co/go/tools v0.0.1-2019.2.3/go.mod h1:a3bituU0lyd329TUQxRnasdCoJDkEUEAqEt0JzvZhAg= +honnef.co/go/tools v0.0.1-2020.1.3/go.mod h1:X/FiERA/W4tHapMX5mGpAtMSVEeEUOyHaw9vFzvIQ3k= +honnef.co/go/tools v0.0.1-2020.1.4 h1:UoveltGrhghAA7ePc+e+QYDHXrBps2PqFZiHkGR/xK8= +honnef.co/go/tools v0.0.1-2020.1.4/go.mod h1:X/FiERA/W4tHapMX5mGpAtMSVEeEUOyHaw9vFzvIQ3k= k8s.io/api v0.17.3 h1:XAm3PZp3wnEdzekNkcmj/9Y1zdmQYJ1I4GKSBBZ8aG0= k8s.io/api v0.17.3/go.mod h1:YZ0OTkuw7ipbe305fMpIdf3GLXZKRigjtZaV5gzC2J0= k8s.io/apiextensions-apiserver v0.17.3 h1:WDZWkPcbgvchEdDd7ysL21GGPx3UKZQLDZXEkevT6n4= @@ -963,7 +1228,10 @@ modernc.org/mathutil v1.0.0/go.mod h1:wU0vUrJsVWBZ4P6e7xtFJEhFSNsfRLJ8H458uRjg03 modernc.org/strutil v1.0.0/go.mod h1:lstksw84oURvj9y3tn8lGvRxyRC1S2+g5uuIzNfIOBs= modernc.org/xc v1.0.0/go.mod h1:mRNCo0bvLjGhHO9WsyuKVU4q0ceiDDDoEeWDJHrNx8I= rsc.io/binaryregexp v0.2.0/go.mod h1:qTv7/COck+e2FymRvadv62gMdZztPaShugOCi3I+8D8= +rsc.io/quote/v3 v3.1.0/go.mod h1:yEA65RcK8LyAZtP9Kv3t0HmxON59tX3rD+tICJqUlj0= +rsc.io/sampler v1.3.0/go.mod h1:T1hPZKmBbMNahiBKFy5HrXp6adAjACjK9JXDnKaTXpA= sigs.k8s.io/structured-merge-diff v0.0.0-20190525122527-15d366b2352e/go.mod h1:wWxsB5ozmmv/SG7nM11ayaAW51xMvak/t1r0CSlcokI= sigs.k8s.io/structured-merge-diff v1.0.1-0.20191108220359-b1b620dd3f06/go.mod h1:/ULNhyfzRopfcjskuui0cTITekDduZ7ycKN3oUT9R18= -sigs.k8s.io/yaml v1.1.0 h1:4A07+ZFc2wgJwo8YNlQpr1rVlgUDlxXHhPJciaPY5gs= sigs.k8s.io/yaml v1.1.0/go.mod h1:UJmg0vDUVViEyp3mgSv9WPwZCDxu4rQW1olrI1uml+o= +sigs.k8s.io/yaml v1.2.0 h1:kr/MCeFWJWTwyaHoR9c8EjH9OumOmoF9YGiZd7lFm/Q= +sigs.k8s.io/yaml v1.2.0/go.mod h1:yfXDCHCao9+ENCvLSE62v9VSji2MKu5jeNfTrofGhJc= diff --git a/go/bucketpool/bucketpool.go b/go/bucketpool/bucketpool.go index 7839142cb67..d51a3dedf0d 100644 --- a/go/bucketpool/bucketpool.go +++ b/go/bucketpool/bucketpool.go @@ -17,7 +17,7 @@ limitations under the License. package bucketpool import ( - "math" + "math/bits" "sync" ) @@ -69,12 +69,10 @@ func (p *Pool) findPool(size int) *sizedPool { if size > p.maxSize { return nil } - idx := int(math.Ceil(math.Log2(float64(size) / float64(p.minSize)))) - if idx < 0 { - idx = 0 - } - if idx > len(p.pools)-1 { - return nil + div, rem := bits.Div64(0, uint64(size), uint64(p.minSize)) + idx := bits.Len64(div) + if rem == 0 && div != 0 && (div&(div-1)) == 0 { + idx = idx - 1 } return p.pools[idx] } diff --git a/go/bytes2/buffer.go b/go/bytes2/buffer.go index a7dacf2d04a..1725274c43c 100644 --- a/go/bytes2/buffer.go +++ b/go/bytes2/buffer.go @@ -16,6 +16,8 @@ limitations under the License. package bytes2 +import "unsafe" + // Buffer implements a subset of the write portion of // bytes.Buffer, but more efficiently. This is meant to // be used in very high QPS operations, especially for @@ -59,6 +61,18 @@ func (buf *Buffer) String() string { return string(buf.bytes) } +// StringUnsafe is equivalent to String, but the copy of the string that it returns +// is _not_ allocated, so modifying this buffer after calling StringUnsafe will lead +// to undefined behavior. +func (buf *Buffer) StringUnsafe() string { + return *(*string)(unsafe.Pointer(&buf.bytes)) +} + +// Reset is equivalent to bytes.Buffer.Reset. +func (buf *Buffer) Reset() { + buf.bytes = buf.bytes[:0] +} + // Len is equivalent to bytes.Buffer.Len. func (buf *Buffer) Len() int { return len(buf.bytes) diff --git a/go/cmd/automation_client/automation_client.go b/go/cmd/automation_client/automation_client.go index f1316dabbff..ebc82a11048 100644 --- a/go/cmd/automation_client/automation_client.go +++ b/go/cmd/automation_client/automation_client.go @@ -23,9 +23,10 @@ import ( "strings" "time" + "google.golang.org/protobuf/encoding/prototext" + "context" - "github.com/golang/protobuf/proto" "google.golang.org/grpc" "vitess.io/vitess/go/vt/grpcclient" @@ -92,7 +93,8 @@ func main() { Name: *task, Parameters: params.parameters, } - fmt.Printf("Sending request:\n%v", proto.MarshalTextString(enqueueRequest)) + protoTextReq, _ := prototext.Marshal(enqueueRequest) + fmt.Printf("Sending request:\n%s", protoTextReq) enqueueResponse, err := client.EnqueueClusterOperation(context.Background(), enqueueRequest, grpc.WaitForReady(true)) if err != nil { fmt.Println("Failed to enqueue ClusterOperation. Error:", err) @@ -104,7 +106,8 @@ func main() { fmt.Println("ERROR:", errWait) os.Exit(5) } - fmt.Printf("SUCCESS: ClusterOperation finished.\n\nDetails:\n%v", proto.MarshalTextString(resp)) + protoTextResp, _ := prototext.Marshal(resp) + fmt.Printf("SUCCESS: ClusterOperation finished.\n\nDetails:\n%s", protoTextResp) } // waitForClusterOp polls and blocks until the ClusterOperation invocation specified by "id" has finished. If an error occurred, it will be returned. @@ -124,7 +127,8 @@ func waitForClusterOp(client automationservicepb.AutomationClient, id string) (* return resp, fmt.Errorf("ClusterOperation is in an unknown state. Details: %v", resp) case automationpb.ClusterOperationState_CLUSTER_OPERATION_DONE: if resp.ClusterOp.Error != "" { - return resp, fmt.Errorf("ClusterOperation failed. Details:\n%v", proto.MarshalTextString(resp)) + protoTextResp, _ := prototext.Marshal(resp) + return resp, fmt.Errorf("ClusterOperation failed. Details:\n%s", protoTextResp) } return resp, nil } diff --git a/go/cmd/query_analyzer/query_analyzer.go b/go/cmd/query_analyzer/query_analyzer.go index caeb1928270..b340084f7f5 100644 --- a/go/cmd/query_analyzer/query_analyzer.go +++ b/go/cmd/query_analyzer/query_analyzer.go @@ -120,7 +120,7 @@ func formatWithBind(buf *sqlparser.TrackedBuffer, node sqlparser.SQLNode) { } switch v.Type { case sqlparser.StrVal, sqlparser.HexVal, sqlparser.IntVal: - buf.WriteArg(fmt.Sprintf(":v%d", bindIndex)) + buf.WriteArg(":", fmt.Sprintf("v%d", bindIndex)) bindIndex++ default: node.Format(buf) diff --git a/go/cmd/vtadmin/main.go b/go/cmd/vtadmin/main.go index abc7d5b08c7..d1ee284b59f 100644 --- a/go/cmd/vtadmin/main.go +++ b/go/cmd/vtadmin/main.go @@ -17,17 +17,21 @@ limitations under the License. package main import ( + "context" "flag" + "io" "os" "time" "github.com/spf13/cobra" + "vitess.io/vitess/go/trace" "vitess.io/vitess/go/vt/log" "vitess.io/vitess/go/vt/vtadmin" "vitess.io/vitess/go/vt/vtadmin/cluster" "vitess.io/vitess/go/vt/vtadmin/grpcserver" vtadminhttp "vitess.io/vitess/go/vt/vtadmin/http" + "vitess.io/vitess/go/vt/vtadmin/http/debug" ) var ( @@ -37,6 +41,8 @@ var ( clusterFileConfig cluster.FileConfig defaultClusterConfig cluster.Config + traceCloser io.Closer = &noopCloser{} + rootCmd = &cobra.Command{ Use: "vtadmin", PreRun: func(cmd *cobra.Command, args []string) { @@ -44,32 +50,68 @@ var ( os.Args = os.Args[0:1] flag.Parse() os.Args = tmp - // (TODO:@amason) Check opts.EnableTracing and trace boot time. + + if opts.EnableTracing || httpOpts.EnableTracing { + startTracing(cmd) + } }, Run: run, + PostRun: func(cmd *cobra.Command, args []string) { + trace.LogErrorsWhenClosing(traceCloser) + }, } ) +// fatal ensures the tracer is closed and final spans are sent before issuing +// a log.Fatal call with the given args. +func fatal(args ...interface{}) { + trace.LogErrorsWhenClosing(traceCloser) + log.Fatal(args...) +} + +// startTracing checks the value of --tracer and then starts tracing, populating +// the private global traceCloser +func startTracing(cmd *cobra.Command) { + tracer, err := cmd.Flags().GetString("tracer") + if err != nil { + log.Warningf("not starting tracer; err: %s", err) + return + } + + if tracer == "" || tracer == "noop" { + log.Warningf("starting tracing with noop tracer") + } + + traceCloser = trace.StartTracing("vtadmin") +} + func run(cmd *cobra.Command, args []string) { + bootSpan, _ := trace.NewSpan(context.Background(), "vtadmin.boot") + defer bootSpan.Finish() + configs := clusterFileConfig.Combine(defaultClusterConfig, clusterConfigs) clusters := make([]*cluster.Cluster, len(configs)) if len(configs) == 0 { - log.Fatal("must specify at least one cluster") + bootSpan.Finish() + fatal("must specify at least one cluster") } for i, cfg := range configs { cluster, err := cfg.Cluster() if err != nil { - log.Fatal(err) + bootSpan.Finish() + fatal(err) } clusters[i] = cluster } s := vtadmin.NewAPI(clusters, opts, httpOpts) + bootSpan.Finish() + if err := s.ListenAndServe(); err != nil { - log.Fatal(err) + fatal(err) } } @@ -81,10 +123,25 @@ func main() { rootCmd.Flags().Var(&clusterFileConfig, "cluster-config", "path to a yaml cluster configuration. see clusters.example.yaml") // (TODO:@amason) provide example config. rootCmd.Flags().Var(&defaultClusterConfig, "cluster-defaults", "default options for all clusters") + rootCmd.Flags().AddGoFlag(flag.Lookup("tracer")) // defined in go/vt/trace + rootCmd.Flags().AddGoFlag(flag.Lookup("tracing-enable-logging")) // defined in go/vt/trace + rootCmd.Flags().AddGoFlag(flag.Lookup("tracing-sampling-type")) // defined in go/vt/trace + rootCmd.Flags().AddGoFlag(flag.Lookup("tracing-sampling-rate")) // defined in go/vt/trace rootCmd.Flags().BoolVar(&opts.EnableTracing, "grpc-tracing", false, "whether to enable tracing on the gRPC server") rootCmd.Flags().BoolVar(&httpOpts.EnableTracing, "http-tracing", false, "whether to enable tracing on the HTTP server") + rootCmd.Flags().BoolVar(&httpOpts.DisableCompression, "http-no-compress", false, "whether to disable compression of HTTP API responses") + rootCmd.Flags().BoolVar(&httpOpts.DisableDebug, "http-no-debug", false, "whether to disable /debug/pprof/* and /debug/env HTTP endpoints") + rootCmd.Flags().Var(&debug.OmitEnv, "http-debug-omit-env", "name of an environment variable to omit from /debug/env, if http debug endpoints are enabled. specify multiple times to omit multiple env vars") + rootCmd.Flags().Var(&debug.SanitizeEnv, "http-debug-sanitize-env", "name of an environment variable to sanitize in /debug/env, if http debug endpoints are enabled. specify multiple times to sanitize multiple env vars") rootCmd.Flags().StringSliceVar(&httpOpts.CORSOrigins, "http-origin", []string{}, "repeated, comma-separated flag of allowed CORS origins. omit to disable CORS") + rootCmd.Flags().StringVar(&httpOpts.ExperimentalOptions.TabletURLTmpl, + "http-tablet-url-tmpl", + "https://{{ .Tablet.Hostname }}:80", + "[EXPERIMENTAL] Go template string to generate a reachable http(s) "+ + "address for a tablet. Currently used to make passthrough "+ + "requests to /debug/vars endpoints.", + ) // glog flags, no better way to do this rootCmd.Flags().AddGoFlag(flag.Lookup("v")) @@ -99,3 +156,7 @@ func main() { log.Flush() } + +type noopCloser struct{} + +func (nc *noopCloser) Close() error { return nil } diff --git a/go/cmd/vtbackup/vtbackup.go b/go/cmd/vtbackup/vtbackup.go index f5117376a23..cd8fbab570b 100644 --- a/go/cmd/vtbackup/vtbackup.go +++ b/go/cmd/vtbackup/vtbackup.go @@ -447,8 +447,8 @@ func startReplication(ctx context.Context, mysqld mysqlctl.MysqlDaemon, topoServ } // Stop replication (in case we're restarting), set master, and start replication. - if err := mysqld.SetMaster(ctx, ti.Tablet.MysqlHostname, int(ti.Tablet.MysqlPort), true /* stopReplicationBefore */, true /* startReplicationAfter */); err != nil { - return vterrors.Wrap(err, "MysqlDaemon.SetMaster failed") + if err := mysqld.SetReplicationSource(ctx, ti.Tablet.MysqlHostname, int(ti.Tablet.MysqlPort), true /* stopReplicationBefore */, true /* startReplicationAfter */); err != nil { + return vterrors.Wrap(err, "MysqlDaemon.SetReplicationSource failed") } return nil } diff --git a/go/cmd/vtcombo/main.go b/go/cmd/vtcombo/main.go index 41c25d03470..f425ce40761 100644 --- a/go/cmd/vtcombo/main.go +++ b/go/cmd/vtcombo/main.go @@ -22,14 +22,13 @@ limitations under the License. package main import ( + "context" "flag" "os" "strings" "time" - "context" - - "github.com/golang/protobuf/proto" + "google.golang.org/protobuf/encoding/prototext" "vitess.io/vitess/go/exit" "vitess.io/vitess/go/vt/dbconfigs" @@ -45,6 +44,7 @@ import ( "vitess.io/vitess/go/vt/vtctld" "vitess.io/vitess/go/vt/vtgate" "vitess.io/vitess/go/vt/vttablet/tabletserver/tabletenv" + "vitess.io/vitess/go/vt/wrangler" topodatapb "vitess.io/vitess/go/vt/proto/topodata" vttestpb "vitess.io/vitess/go/vt/proto/vttest" @@ -115,7 +115,7 @@ func main() { // parse the input topology tpb := &vttestpb.VTTestTopology{} - if err := proto.UnmarshalText(*protoTopo, tpb); err != nil { + if err := prototext.Unmarshal([]byte(*protoTopo), tpb); err != nil { log.Errorf("cannot parse topology: %v", err) exit.Return(1) } @@ -158,7 +158,8 @@ func main() { // tablets configuration and init. // Send mycnf as nil because vtcombo won't do backups and restores. - if err := vtcombo.InitTabletMap(ts, tpb, mysqld, &dbconfigs.GlobalDBConfigs, *schemaDir, nil, *startMysql); err != nil { + uid, err := vtcombo.InitTabletMap(ts, tpb, mysqld, &dbconfigs.GlobalDBConfigs, *schemaDir, *startMysql) + if err != nil { log.Errorf("initTabletMapProto failed: %v", err) // ensure we start mysql in the event we fail here if *startMysql { @@ -167,6 +168,30 @@ func main() { exit.Return(1) } + globalCreateDb = func(ctx context.Context, ks *vttestpb.Keyspace) error { + wr := wrangler.New(logutil.NewConsoleLogger(), ts, nil) + newUID, err := vtcombo.CreateKs(ctx, ts, tpb, mysqld, &dbconfigs.GlobalDBConfigs, *schemaDir, ks, true, uid, wr) + if err != nil { + return err + } + uid = newUID + tpb.Keyspaces = append(tpb.Keyspaces, ks) + return nil + } + + globalDropDb = func(ctx context.Context, ksName string) error { + if err := vtcombo.DeleteKs(ctx, ts, ksName, mysqld, tpb); err != nil { + return err + } + + // Rebuild the SrvVSchema object + if err := ts.RebuildSrvVSchema(ctx, tpb.Cells); err != nil { + return err + } + + return nil + } + // Now that we have fully initialized the tablets, rebuild the keyspace graph. for _, ks := range tpb.Keyspaces { err := topotools.RebuildKeyspace(context.Background(), logutil.NewConsoleLogger(), ts, ks.GetName(), tpb.Cells, false) diff --git a/go/cmd/vtcombo/plugin_dbddl.go b/go/cmd/vtcombo/plugin_dbddl.go new file mode 100644 index 00000000000..49a7a601fb1 --- /dev/null +++ b/go/cmd/vtcombo/plugin_dbddl.go @@ -0,0 +1,54 @@ +/* +Copyright 2021 The Vitess Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package main + +import ( + "context" + + "vitess.io/vitess/go/vt/servenv" + "vitess.io/vitess/go/vt/vtgate/engine" + + vttestpb "vitess.io/vitess/go/vt/proto/vttest" +) + +var globalCreateDb func(ctx context.Context, ks *vttestpb.Keyspace) error +var globalDropDb func(ctx context.Context, ksName string) error + +// DBDDL doesn't need to store any state - we use the global variables above instead +type DBDDL struct{} + +// CreateDatabase implements the engine.DBDDLPlugin interface +func (plugin *DBDDL) CreateDatabase(ctx context.Context, name string) error { + ks := &vttestpb.Keyspace{ + Name: name, + Shards: []*vttestpb.Shard{{ + Name: "0", + }}, + } + return globalCreateDb(ctx, ks) +} + +// DropDatabase implements the engine.DBDDLPlugin interface +func (plugin *DBDDL) DropDatabase(ctx context.Context, name string) error { + return globalDropDb(ctx, name) +} + +func init() { + servenv.OnRun(func() { + engine.DBDDLRegister("vttest", &DBDDL{}) + }) +} diff --git a/go/cmd/vtcombo/plugin_grpcvtctldserver.go b/go/cmd/vtcombo/plugin_grpcvtctldserver.go new file mode 100644 index 00000000000..e5bba399072 --- /dev/null +++ b/go/cmd/vtcombo/plugin_grpcvtctldserver.go @@ -0,0 +1,30 @@ +/* +Copyright 2021 The Vitess Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package main + +import ( + "vitess.io/vitess/go/vt/servenv" + "vitess.io/vitess/go/vt/vtctl/grpcvtctldserver" +) + +func init() { + servenv.OnRun(func() { + if servenv.GRPCCheckServiceMap("vtctld") { + grpcvtctldserver.StartServer(servenv.GRPCServer, ts) + } + }) +} diff --git a/go/cmd/vtctlclient/main.go b/go/cmd/vtctlclient/main.go index e7311a5cffa..bd8871851f7 100644 --- a/go/cmd/vtctlclient/main.go +++ b/go/cmd/vtctlclient/main.go @@ -67,6 +67,10 @@ func main() { logutil.LogEvent(logger, e) }) if err != nil { + if strings.Contains(err.Error(), "flag: help requested") { + return + } + errStr := strings.Replace(err.Error(), "remote error: ", "", -1) fmt.Printf("%s Error: %s\n", flag.Arg(0), errStr) log.Error(err) diff --git a/go/cmd/vtctldclient/cli/json.go b/go/cmd/vtctldclient/cli/json.go index 903ca905b3e..e3625cc78bd 100644 --- a/go/cmd/vtctldclient/cli/json.go +++ b/go/cmd/vtctldclient/cli/json.go @@ -17,12 +17,12 @@ limitations under the License. package cli import ( - "bytes" "encoding/json" "fmt" - "github.com/golang/protobuf/jsonpb" - "github.com/golang/protobuf/proto" + "google.golang.org/protobuf/encoding/protojson" + + "google.golang.org/protobuf/proto" ) // MarshalJSON marshals obj to a JSON string. It uses the jsonpb marshaler for @@ -37,19 +37,14 @@ import ( func MarshalJSON(obj interface{}) ([]byte, error) { switch obj := obj.(type) { case proto.Message: - b := bytes.NewBuffer(nil) - m := jsonpb.Marshaler{ - EnumsAsInts: false, - EmitDefaults: true, - Indent: " ", - OrigName: true, - } - - if err := m.Marshal(b, obj); err != nil { - return nil, fmt.Errorf("jsonpb.Marshal = %v", err) + m := protojson.MarshalOptions{ + Multiline: true, + Indent: " ", + UseEnumNumbers: true, + UseProtoNames: true, + EmitUnpopulated: true, } - - return b.Bytes(), nil + return m.Marshal(obj) default: data, err := json.MarshalIndent(obj, "", " ") if err != nil { diff --git a/go/cmd/vtctldclient/internal/command/backups.go b/go/cmd/vtctldclient/internal/command/backups.go index 21d5673ef34..02fca34b620 100644 --- a/go/cmd/vtctldclient/internal/command/backups.go +++ b/go/cmd/vtctldclient/internal/command/backups.go @@ -23,30 +23,50 @@ import ( "github.com/spf13/cobra" "vitess.io/vitess/go/cmd/vtctldclient/cli" + "vitess.io/vitess/go/vt/topo/topoproto" + vtctldatapb "vitess.io/vitess/go/vt/proto/vtctldata" ) // GetBackups makes a GetBackups gRPC call to a vtctld. var GetBackups = &cobra.Command{ - Use: "GetBackups keyspace shard", - Args: cobra.ExactArgs(2), + Use: "GetBackups ", + Args: cobra.ExactArgs(1), RunE: commandGetBackups, } +var getBackupsOptions = struct { + Limit uint32 + OutputJSON bool +}{} + func commandGetBackups(cmd *cobra.Command, args []string) error { - cli.FinishedParsing(cmd) + keyspace, shard, err := topoproto.ParseKeyspaceShard(cmd.Flags().Arg(0)) + if err != nil { + return err + } - keyspace := cmd.Flags().Arg(0) - shard := cmd.Flags().Arg(1) + cli.FinishedParsing(cmd) resp, err := client.GetBackups(commandCtx, &vtctldatapb.GetBackupsRequest{ Keyspace: keyspace, Shard: shard, + Limit: getBackupsOptions.Limit, }) if err != nil { return err } + if getBackupsOptions.OutputJSON { + data, err := cli.MarshalJSON(resp) + if err != nil { + return err + } + + fmt.Printf("%s\n", data) + return nil + } + names := make([]string, len(resp.Backups)) for i, b := range resp.Backups { names[i] = b.Name @@ -58,5 +78,7 @@ func commandGetBackups(cmd *cobra.Command, args []string) error { } func init() { + GetBackups.Flags().Uint32VarP(&getBackupsOptions.Limit, "limit", "l", 0, "Retrieve only the most recent N backups") + GetBackups.Flags().BoolVarP(&getBackupsOptions.OutputJSON, "json", "j", false, "Output backup info in JSON format rather than a list of backups") Root.AddCommand(GetBackups) } diff --git a/go/cmd/vtctldclient/internal/command/cells.go b/go/cmd/vtctldclient/internal/command/cells.go index e04984f761b..046dde13744 100644 --- a/go/cmd/vtctldclient/internal/command/cells.go +++ b/go/cmd/vtctldclient/internal/command/cells.go @@ -24,10 +24,55 @@ import ( "vitess.io/vitess/go/cmd/vtctldclient/cli" + topodatapb "vitess.io/vitess/go/vt/proto/topodata" vtctldatapb "vitess.io/vitess/go/vt/proto/vtctldata" ) var ( + // AddCellInfo makes an AddCellInfo gRPC call to a vtctld. + AddCellInfo = &cobra.Command{ + Use: "AddCellInfo --root [--server-address ] ", + Short: "Registers a local topology service in a new cell by creating the CellInfo.", + Long: `Registers a local topology service in a new cell by creating the CellInfo +with the provided parameters. + +The address will be used to connect to the topology service, and Vitess data will +be stored starting at the provided root.`, + DisableFlagsInUseLine: true, + Args: cobra.ExactArgs(1), + RunE: commandAddCellInfo, + } + // AddCellsAlias makes an AddCellsAlias gRPC call to a vtctld. + AddCellsAlias = &cobra.Command{ + Use: "AddCellsAlias --cells [--cells ...] ", + Short: "Defines a group of cells that can be referenced by a single name (the alias).", + Long: `Defines a group of cells that can be referenced by a single name (the alias). + +When routing query traffic, replica/rdonly traffic can be routed across cells +within the group (alias). Only primary traffic can be routed across cells not in +the same group (alias).`, + DisableFlagsInUseLine: true, + Args: cobra.ExactArgs(1), + RunE: commandAddCellsAlias, + } + // DeleteCellInfo makes a DeleteCellInfo gRPC call to a vtctld. + DeleteCellInfo = &cobra.Command{ + Use: "DeleteCellInfo [--force] ", + Short: "Deletes the CellInfo for the provided cell.", + Long: "Deletes the CellInfo for the provided cell. The cell cannot be referenced by any Shard record.", + DisableFlagsInUseLine: true, + Args: cobra.ExactArgs(1), + RunE: commandDeleteCellInfo, + } + // DeleteCellsAlias makes a DeleteCellsAlias gRPC call to a vtctld. + DeleteCellsAlias = &cobra.Command{ + Use: "DeleteCellsAlias ", + Short: "Deletes the CellsAlias for the provided alias.", + Long: "Deletes the CellsAlias for the provided alias.", + DisableFlagsInUseLine: true, + Args: cobra.ExactArgs(1), + RunE: commandDeleteCellsAlias, + } // GetCellInfoNames makes a GetCellInfoNames gRPC call to a vtctld. GetCellInfoNames = &cobra.Command{ Use: "GetCellInfoNames", @@ -46,8 +91,99 @@ var ( Args: cobra.NoArgs, RunE: commandGetCellsAliases, } + // UpdateCellInfo makes an UpdateCellInfo gRPC call to a vtctld. + UpdateCellInfo = &cobra.Command{ + Use: "UpdateCellInfo [--root ] [--server-address ] ", + Short: "Updates the content of a CellInfo with the provided parameters, creating the CellInfo if it does not exist.", + Long: `Updates the content of a CellInfo with the provided parameters, creating the CellInfo if it does not exist. + +If a value is empty, it is ignored.`, + DisableFlagsInUseLine: true, + Args: cobra.ExactArgs(1), + RunE: commandUpdateCellInfo, + } + // UpdateCellsAlias makes an UpdateCellsAlias gRPC call to a vtctld. + UpdateCellsAlias = &cobra.Command{ + Use: "UpdateCellsAlias [--cells [--cells ...]] ", + Short: "Updates the content of a CellsAlias with the provided parameters, creating the CellsAlias if it does not exist.", + Long: "Updates the content of a CellsAlias with the provided parameters, creating the CellsAlias if it does not exist.", + DisableFlagsInUseLine: true, + Args: cobra.ExactArgs(1), + RunE: commandUpdateCellsAlias, + } ) +var addCellInfoOptions topodatapb.CellInfo + +func commandAddCellInfo(cmd *cobra.Command, args []string) error { + cli.FinishedParsing(cmd) + + cell := cmd.Flags().Arg(0) + _, err := client.AddCellInfo(commandCtx, &vtctldatapb.AddCellInfoRequest{ + Name: cell, + CellInfo: &addCellInfoOptions, + }) + if err != nil { + return err + } + + fmt.Printf("Created cell: %s\n", cell) + return nil +} + +var addCellsAliasOptions topodatapb.CellsAlias + +func commandAddCellsAlias(cmd *cobra.Command, args []string) error { + cli.FinishedParsing(cmd) + + alias := cmd.Flags().Arg(0) + _, err := client.AddCellsAlias(commandCtx, &vtctldatapb.AddCellsAliasRequest{ + Name: alias, + Cells: addCellsAliasOptions.Cells, + }) + if err != nil { + return err + } + + fmt.Printf("Created cells alias: %s (cells = %v)\n", alias, addCellsAliasOptions.Cells) + return nil +} + +var deleteCellInfoOptions = struct { + Force bool +}{} + +func commandDeleteCellInfo(cmd *cobra.Command, args []string) error { + cli.FinishedParsing(cmd) + + cell := cmd.Flags().Arg(0) + _, err := client.DeleteCellInfo(commandCtx, &vtctldatapb.DeleteCellInfoRequest{ + Name: cell, + Force: deleteCellInfoOptions.Force, + }) + if err != nil { + return err + } + + fmt.Printf("Deleted cell %s\n", cell) + return nil +} + +func commandDeleteCellsAlias(cmd *cobra.Command, args []string) error { + cli.FinishedParsing(cmd) + + alias := cmd.Flags().Arg(0) + _, err := client.DeleteCellsAlias(commandCtx, &vtctldatapb.DeleteCellsAliasRequest{ + Name: alias, + }) + if err != nil { + return err + } + + fmt.Printf("Delete cells alias %s\n", alias) + return nil +} + func commandGetCellInfoNames(cmd *cobra.Command, args []string) error { cli.FinishedParsing(cmd) @@ -99,8 +235,73 @@ func commandGetCellsAliases(cmd *cobra.Command, args []string) error { return nil } +var updateCellInfoOptions topodatapb.CellInfo + +func commandUpdateCellInfo(cmd *cobra.Command, args []string) error { + cli.FinishedParsing(cmd) + + cell := cmd.Flags().Arg(0) + resp, err := client.UpdateCellInfo(commandCtx, &vtctldatapb.UpdateCellInfoRequest{ + Name: cell, + CellInfo: &updateCellInfoOptions, + }) + if err != nil { + return err + } + + data, err := cli.MarshalJSON(resp.CellInfo) + if err != nil { + return err + } + + fmt.Printf("Updated cell %s. New CellInfo:\n%s\n", resp.Name, data) + return nil +} + +var updateCellsAliasOptions topodatapb.CellsAlias + +func commandUpdateCellsAlias(cmd *cobra.Command, args []string) error { + cli.FinishedParsing(cmd) + + alias := cmd.Flags().Arg(0) + resp, err := client.UpdateCellsAlias(commandCtx, &vtctldatapb.UpdateCellsAliasRequest{ + Name: alias, + CellsAlias: &updateCellsAliasOptions, + }) + if err != nil { + return err + } + + data, err := cli.MarshalJSON(resp.CellsAlias) + if err != nil { + return err + } + + fmt.Printf("Updated cells alias %s. New CellsAlias:\n%s\n", resp.Name, data) + return nil +} + func init() { + AddCellInfo.Flags().StringVarP(&addCellInfoOptions.ServerAddress, "server-address", "a", "", "The address the topology server will connect to for this cell.") + AddCellInfo.Flags().StringVarP(&addCellInfoOptions.Root, "root", "r", "", "The root path the topology server will use for this cell") + AddCellInfo.MarkFlagRequired("root") + Root.AddCommand(AddCellInfo) + + AddCellsAlias.Flags().StringSliceVarP(&addCellsAliasOptions.Cells, "cells", "c", nil, "The list of cell names that are members of this alias.") + Root.AddCommand(AddCellsAlias) + + DeleteCellInfo.Flags().BoolVarP(&deleteCellInfoOptions.Force, "force", "f", false, "Proceeds even if the cell's topology server cannot be reached. The assumption is that you shut down the entire cell, and just need to update the global topo data.") + Root.AddCommand(DeleteCellInfo) + Root.AddCommand(DeleteCellsAlias) + Root.AddCommand(GetCellInfoNames) Root.AddCommand(GetCellInfo) Root.AddCommand(GetCellsAliases) + + UpdateCellInfo.Flags().StringVarP(&updateCellInfoOptions.ServerAddress, "server-address", "a", "", "The address the topology server will connect to for this cell.") + UpdateCellInfo.Flags().StringVarP(&updateCellInfoOptions.Root, "root", "r", "", "The root path the topology server will use for this cell") + Root.AddCommand(UpdateCellInfo) + + UpdateCellsAlias.Flags().StringSliceVarP(&updateCellsAliasOptions.Cells, "cells", "c", nil, "The list of cell names that are members of this alias.") + Root.AddCommand(UpdateCellsAlias) } diff --git a/go/cmd/vtctldclient/internal/command/legacy_shim.go b/go/cmd/vtctldclient/internal/command/legacy_shim.go new file mode 100644 index 00000000000..f4036fd26ae --- /dev/null +++ b/go/cmd/vtctldclient/internal/command/legacy_shim.go @@ -0,0 +1,104 @@ +/* +Copyright 2021 The Vitess Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package command + +import ( + "context" + "flag" + "fmt" + "strings" + + "github.com/spf13/cobra" + + "vitess.io/vitess/go/cmd/vtctldclient/cli" + "vitess.io/vitess/go/vt/log" + "vitess.io/vitess/go/vt/logutil" + "vitess.io/vitess/go/vt/vtctl/vtctlclient" + + logutilpb "vitess.io/vitess/go/vt/proto/logutil" +) + +var ( + // LegacyVtctlCommand provides a shim to make legacy ExecuteVtctlCommand + // RPCs. This allows users to use a single binary to make RPCs against both + // the new and old vtctld gRPC APIs. + LegacyVtctlCommand = &cobra.Command{ + Use: "LegacyVtctlCommand -- [flags ...] [args ...]", + Short: "Invoke a legacy vtctlclient command. Flag parsing is best effort.", + Args: cobra.ArbitraryArgs, + RunE: func(cmd *cobra.Command, args []string) error { + cli.FinishedParsing(cmd) + return runLegacyCommand(args) + }, + Long: strings.TrimSpace(` +LegacyVtctlCommand uses the legacy vtctl grpc client to make an ExecuteVtctlCommand +rpc to a vtctld. + +This command exists to support a smooth transition of any scripts that relied on +vtctlclient during the migration to the new vtctldclient, and will be removed, +following the Vitess project's standard deprecation cycle, once all commands +have been migrated to the new VtctldServer api. + +To see the list of available legacy commands, run "LegacyVtctlCommand -- help". +Note that, as with the old client, this requires a running server, as the flag +parsing and help/usage text generation, is done server-side. + +Also note that, in order to defer that flag parsing to the server side, you must +use the double-dash ("--") after the LegacyVtctlCommand subcommand string, or +the client-side flag parsing library we are using will attempt to parse those +flags (and fail). +`), + Example: strings.TrimSpace(` +LegacyVtctlCommand help # displays this help message +LegacyVtctlCommand -- help # displays help for supported legacy vtctl commands + +# When using legacy command that take arguments, a double dash must be used +# before the first flag argument, like in the first example. The double dash may +# be used, however, at any point after the "LegacyVtctlCommand" string, as in +# the second example. +LegacyVtctlCommand AddCellInfo -- -server_address "localhost:1234" -root "/vitess/cell1" +LegacyVtctlCommand -- AddCellInfo -server_address "localhost:5678" -root "/vitess/cell1"`), + } +) + +func runLegacyCommand(args []string) error { + // Duplicated (mostly) from go/cmd/vtctlclient/main.go. + logger := logutil.NewConsoleLogger() + + ctx, cancel := context.WithTimeout(context.Background(), actionTimeout) + defer cancel() + + err := vtctlclient.RunCommandAndWait(ctx, server, args, func(e *logutilpb.Event) { + logutil.LogEvent(logger, e) + }) + if err != nil { + if strings.Contains(err.Error(), "flag: help requested") { + // Help is caught by SetHelpFunc, so we don't want to indicate this as an error. + return nil + } + + errStr := strings.Replace(err.Error(), "remote error: ", "", -1) + fmt.Printf("%s Error: %s\n", flag.Arg(0), errStr) + log.Error(err) + } + + return err +} + +func init() { + Root.AddCommand(LegacyVtctlCommand) +} diff --git a/go/cmd/vtctldclient/internal/command/root.go b/go/cmd/vtctldclient/internal/command/root.go index 7243c8836e5..8335710f449 100644 --- a/go/cmd/vtctldclient/internal/command/root.go +++ b/go/cmd/vtctldclient/internal/command/root.go @@ -25,7 +25,6 @@ import ( "github.com/spf13/cobra" "vitess.io/vitess/go/trace" - "vitess.io/vitess/go/vt/log" "vitess.io/vitess/go/vt/vtctl/vtctldclient" ) @@ -44,9 +43,7 @@ var ( // command context for every command. PersistentPreRunE: func(cmd *cobra.Command, args []string) (err error) { traceCloser = trace.StartTracing("vtctldclient") - if server == "" { - err = errors.New("please specify -server to specify the vtctld server to connect to") - log.Error(err) + if err := ensureServerArg(); err != nil { return err } @@ -75,6 +72,17 @@ var ( } ) +var errNoServer = errors.New("please specify -server to specify the vtctld server to connect to") + +// ensureServerArg validates that --server was passed to the CLI. +func ensureServerArg() error { + if server == "" { + return errNoServer + } + + return nil +} + func init() { Root.PersistentFlags().StringVar(&server, "server", "", "server to use for connection") Root.PersistentFlags().DurationVar(&actionTimeout, "action_timeout", time.Hour, "timeout for the total command") diff --git a/go/cmd/vtctldclient/internal/command/routing_rules.go b/go/cmd/vtctldclient/internal/command/routing_rules.go new file mode 100644 index 00000000000..e43aa49c915 --- /dev/null +++ b/go/cmd/vtctldclient/internal/command/routing_rules.go @@ -0,0 +1,157 @@ +/* +Copyright 2021 The Vitess Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package command + +import ( + "errors" + "fmt" + "io/ioutil" + "strings" + + "github.com/spf13/cobra" + + "vitess.io/vitess/go/cmd/vtctldclient/cli" + "vitess.io/vitess/go/json2" + + vschemapb "vitess.io/vitess/go/vt/proto/vschema" + vtctldatapb "vitess.io/vitess/go/vt/proto/vtctldata" +) + +var ( + // ApplyRoutingRules makes an ApplyRoutingRules gRPC call to a vtctld. + ApplyRoutingRules = &cobra.Command{ + Use: "ApplyRoutingRules {--rules RULES | --rules-file RULES_FILE} [--cells=c1,c2,...] [--skip-rebuild] [--dry-run]", + Short: "Applies the VSchema routing rules.", + DisableFlagsInUseLine: true, + Args: cobra.NoArgs, + RunE: commandApplyRoutingRules, + } + // GetRoutingRules makes a GetRoutingRules gRPC call to a vtctld. + GetRoutingRules = &cobra.Command{ + Use: "GetRoutingRules", + Short: "Displays the VSchema routing rules.", + DisableFlagsInUseLine: true, + Args: cobra.NoArgs, + RunE: commandGetRoutingRules, + } +) + +var applyRoutingRulesOptions = struct { + Rules string + RulesFilePath string + Cells []string + SkipRebuild bool + DryRun bool +}{} + +func commandApplyRoutingRules(cmd *cobra.Command, args []string) error { + if applyRoutingRulesOptions.Rules != "" && applyRoutingRulesOptions.RulesFilePath != "" { + return fmt.Errorf("cannot pass both --rules (=%s) and --rules-file (=%s)", applyRoutingRulesOptions.Rules, applyRoutingRulesOptions.RulesFilePath) + } + + if applyRoutingRulesOptions.Rules == "" && applyRoutingRulesOptions.RulesFilePath == "" { + return errors.New("must pass exactly one of --rules and --rules-file") + } + + cli.FinishedParsing(cmd) + + var rulesBytes []byte + if applyRoutingRulesOptions.RulesFilePath != "" { + data, err := ioutil.ReadFile(applyRoutingRulesOptions.RulesFilePath) + if err != nil { + return err + } + + rulesBytes = data + } else { + rulesBytes = []byte(applyRoutingRulesOptions.Rules) + } + + rr := &vschemapb.RoutingRules{} + if err := json2.Unmarshal(rulesBytes, &rr); err != nil { + return err + } + + // Round-trip so when we display the result it's readable. + data, err := cli.MarshalJSON(rr) + if err != nil { + return err + } + + if applyRoutingRulesOptions.DryRun { + fmt.Printf("[DRY RUN] Would have saved new RoutingRules object:\n%s\n", data) + + if applyRoutingRulesOptions.SkipRebuild { + fmt.Println("[DRY RUN] Would not have rebuilt VSchema graph, would have required operator to run RebuildVSchemaGraph for changes to take effect") + } else { + fmt.Print("[DRY RUN] Would have rebuilt the VSchema graph") + if len(applyRoutingRulesOptions.Cells) == 0 { + fmt.Print(" in all cells\n") + } else { + fmt.Printf(" in the following cells: %s.\n", strings.Join(applyRoutingRulesOptions.Cells, ", ")) + } + } + + return nil + } + + _, err = client.ApplyRoutingRules(commandCtx, &vtctldatapb.ApplyRoutingRulesRequest{ + RoutingRules: rr, + SkipRebuild: applyRoutingRulesOptions.SkipRebuild, + RebuildCells: applyRoutingRulesOptions.Cells, + }) + if err != nil { + return err + } + + fmt.Printf("New RoutingRules object:\n%s\nIf this is not what you expected, check the input data (as JSON parsing will skip unexpected fields).\n", data) + + if applyRoutingRulesOptions.SkipRebuild { + fmt.Println("Skipping rebuild of VSchema graph, will need to run RebuildVSchemaGraph for changes to take effect.") + } + + return nil +} + +func commandGetRoutingRules(cmd *cobra.Command, args []string) error { + cli.FinishedParsing(cmd) + + resp, err := client.GetRoutingRules(commandCtx, &vtctldatapb.GetRoutingRulesRequest{}) + if err != nil { + return err + } + + data, err := cli.MarshalJSON(resp.RoutingRules) + if err != nil { + return err + } + + fmt.Printf("%s\n", data) + + return nil +} + +func init() { + ApplyRoutingRules.Flags().StringVarP(&applyRoutingRulesOptions.Rules, "rules", "r", "", "Routing rules, specified as a string") + ApplyRoutingRules.Flags().StringVarP(&applyRoutingRulesOptions.RulesFilePath, "rules-file", "f", "", "Path to a file containing routing rules specified as JSON") + ApplyRoutingRules.Flags().StringSliceVarP(&applyRoutingRulesOptions.Cells, "cells", "c", nil, "Limit the VSchema graph rebuildingg to the specified cells. Ignored if --skip-rebuild is specified.") + ApplyRoutingRules.Flags().BoolVar(&applyRoutingRulesOptions.SkipRebuild, "skip-rebuild", false, "Skip rebuilding the SrvVSchema objects.") + ApplyRoutingRules.Flags().BoolVarP(&applyRoutingRulesOptions.DryRun, "dry-run", "d", false, "Load the specified routing rules as a validation step, but do not actually apply the rules to the topo.") + Root.AddCommand(ApplyRoutingRules) + + Root.AddCommand(GetRoutingRules) +} diff --git a/go/cmd/vtctldclient/internal/command/serving_graph.go b/go/cmd/vtctldclient/internal/command/serving_graph.go index 18a5d04ec37..2c7065088cd 100644 --- a/go/cmd/vtctldclient/internal/command/serving_graph.go +++ b/go/cmd/vtctldclient/internal/command/serving_graph.go @@ -29,15 +29,35 @@ import ( var ( // GetSrvKeyspaces makes a GetSrvKeyspaces gRPC call to a vtctld. GetSrvKeyspaces = &cobra.Command{ - Use: "GetSrvKeyspaces [ ...]", - Args: cobra.MinimumNArgs(1), - RunE: commandGetSrvKeyspaces, + Use: "GetSrvKeyspaces [ ...]", + Short: "Returns the SrvKeyspaces for the given keyspace in one or more cells.", + Args: cobra.MinimumNArgs(1), + RunE: commandGetSrvKeyspaces, + DisableFlagsInUseLine: true, } // GetSrvVSchema makes a GetSrvVSchema gRPC call to a vtctld. GetSrvVSchema = &cobra.Command{ - Use: "GetSrvVSchema cell", - Args: cobra.ExactArgs(1), - RunE: commandGetSrvVSchema, + Use: "GetSrvVSchema cell", + Short: "Returns the SrvVSchema for the given cell.", + Args: cobra.ExactArgs(1), + RunE: commandGetSrvVSchema, + DisableFlagsInUseLine: true, + } + // GetSrvVSchemas makes a GetSrvVSchemas gRPC call to a vtctld. + GetSrvVSchemas = &cobra.Command{ + Use: "GetSrvVSchemas [ ...]", + Short: "Returns the SrvVSchema for all cells, optionally filtered by the given cells.", + Args: cobra.ArbitraryArgs, + RunE: commandGetSrvVSchemas, + DisableFlagsInUseLine: true, + } + // RebuildVSchemaGraph makes a RebuildVSchemaGraph gRPC call to a vtctld. + RebuildVSchemaGraph = &cobra.Command{ + Use: "RebuildVSchemaGraph [--cells=c1,c2,...]", + Short: "Rebuilds the cell-specific SrvVSchema from the global VSchema objects in the provided cells (or all cells if none provided).", + DisableFlagsInUseLine: true, + Args: cobra.NoArgs, + RunE: commandRebuildVSchemaGraph, } ) @@ -87,7 +107,57 @@ func commandGetSrvVSchema(cmd *cobra.Command, args []string) error { return nil } +func commandGetSrvVSchemas(cmd *cobra.Command, args []string) error { + cli.FinishedParsing(cmd) + + cells := cmd.Flags().Args()[0:] + + resp, err := client.GetSrvVSchemas(commandCtx, &vtctldatapb.GetSrvVSchemasRequest{ + Cells: cells, + }) + if err != nil { + return err + } + + // By default, an empty array will serialize as `null`, but `[]` is a little nicer. + data := []byte("[]") + + if len(resp.SrvVSchemas) > 0 { + data, err = cli.MarshalJSON(resp.SrvVSchemas) + if err != nil { + return err + } + } + + fmt.Printf("%s\n", data) + + return nil +} + +var rebuildVSchemaGraphOptions = struct { + Cells []string +}{} + +func commandRebuildVSchemaGraph(cmd *cobra.Command, args []string) error { + cli.FinishedParsing(cmd) + + _, err := client.RebuildVSchemaGraph(commandCtx, &vtctldatapb.RebuildVSchemaGraphRequest{ + Cells: rebuildVSchemaGraphOptions.Cells, + }) + if err != nil { + return err + } + + fmt.Println("RebuildVSchemaGraph: ok") + + return nil +} + func init() { Root.AddCommand(GetSrvKeyspaces) Root.AddCommand(GetSrvVSchema) + Root.AddCommand(GetSrvVSchemas) + + RebuildVSchemaGraph.Flags().StringSliceVarP(&rebuildVSchemaGraphOptions.Cells, "cells", "c", nil, "Specifies a comma-separated list of cells to look for tablets") + Root.AddCommand(RebuildVSchemaGraph) } diff --git a/go/cmd/vtctldclient/internal/command/tablets.go b/go/cmd/vtctldclient/internal/command/tablets.go index 65aef8298fd..a8ff785a677 100644 --- a/go/cmd/vtctldclient/internal/command/tablets.go +++ b/go/cmd/vtctldclient/internal/command/tablets.go @@ -32,27 +32,69 @@ import ( var ( // ChangeTabletType makes a ChangeTabletType gRPC call to a vtctld. ChangeTabletType = &cobra.Command{ - Use: "ChangeTabletType [--dry-run] TABLET_ALIAS TABLET_TYPE", - Args: cobra.ExactArgs(2), - RunE: commandChangeTabletType, + Use: "ChangeTabletType [--dry-run] ", + Short: "Changes the db type for the specified tablet, if possible.", + Long: `Changes the db type for the specified tablet, if possible. + +This command is used primarily to arrange replicas, and it will not convert a primary. +NOTE: This command automatically updates the serving graph.`, + DisableFlagsInUseLine: true, + Args: cobra.ExactArgs(2), + RunE: commandChangeTabletType, } // DeleteTablets makes a DeleteTablets gRPC call to a vtctld. DeleteTablets = &cobra.Command{ - Use: "DeleteTablets TABLET_ALIAS [ TABLET_ALIAS ... ]", - Args: cobra.MinimumNArgs(1), - RunE: commandDeleteTablets, + Use: "DeleteTablets [ ... ]", + Short: "Deletes tablet(s) from the topology.", + DisableFlagsInUseLine: true, + Args: cobra.MinimumNArgs(1), + RunE: commandDeleteTablets, } // GetTablet makes a GetTablet gRPC call to a vtctld. GetTablet = &cobra.Command{ - Use: "GetTablet alias", - Args: cobra.ExactArgs(1), - RunE: commandGetTablet, + Use: "GetTablet ", + Short: "Outputs a JSON structure that contains information about the tablet.", + DisableFlagsInUseLine: true, + Args: cobra.ExactArgs(1), + RunE: commandGetTablet, } // GetTablets makes a GetTablets gRPC call to a vtctld. GetTablets = &cobra.Command{ - Use: "GetTablets [--strict] [{--cell $c1 [--cell $c2 ...], --keyspace $ks [--shard $shard], --tablet-alias $alias}]", - Args: cobra.NoArgs, - RunE: commandGetTablets, + Use: "GetTablets [--strict] [{--cell $c1 [--cell $c2 ...], --keyspace $ks [--shard $shard], --tablet-alias $alias}]", + Short: "Looks up tablets according to filter criteria.", + Long: `Looks up tablets according to the filter criteria. + +If --tablet-alias is passed, none of the other filters (keyspace, shard, cell) may +be passed, and tablets are looked up by tablet alias only. + +If --keyspace is passed, then all tablets in the keyspace are retrieved. The +--shard flag may also be passed to further narrow the set of tablets to that +. Passing --shard without also passing --keyspace will fail. + +Passing --cell limits the set of tablets to those in the specified cells. The +--cell flag accepts a CSV argument (e.g. --cell "c1,c2") and may be repeated +(e.g. --cell "c1" --cell "c2"). + +Valid output formats are "awk" and "json".`, + DisableFlagsInUseLine: true, + Args: cobra.NoArgs, + RunE: commandGetTablets, + } + // RefreshState makes a RefreshState gRPC call to a vtctld. + RefreshState = &cobra.Command{ + Use: "RefreshState ", + Short: "Reloads the tablet record on the specified tablet.", + DisableFlagsInUseLine: true, + Args: cobra.ExactArgs(1), + RunE: commandRefreshState, + } + // RefreshStateByShard makes a RefreshStateByShard gRPC call to a vtcld. + RefreshStateByShard = &cobra.Command{ + Use: "RefreshStateByShard [--cell ...] ", + Short: "Reloads the tablet record all tablets in the shard, optionally limited to the specified cells.", + DisableFlagsInUseLine: true, + Args: cobra.ExactArgs(1), + RunE: commandRefreshStateByShard, } ) @@ -218,6 +260,60 @@ func commandGetTablets(cmd *cobra.Command, args []string) error { return nil } +func commandRefreshState(cmd *cobra.Command, args []string) error { + alias, err := topoproto.ParseTabletAlias(cmd.Flags().Arg(0)) + if err != nil { + return err + } + + cli.FinishedParsing(cmd) + + _, err = client.RefreshState(commandCtx, &vtctldatapb.RefreshStateRequest{ + TabletAlias: alias, + }) + if err != nil { + return err + } + + fmt.Printf("Refreshed state on %s\n", topoproto.TabletAliasString(alias)) + return nil +} + +var refreshStateByShardOptions = struct { + Cells []string +}{} + +func commandRefreshStateByShard(cmd *cobra.Command, args []string) error { + keyspace, shard, err := topoproto.ParseKeyspaceShard(cmd.Flags().Arg(0)) + if err != nil { + return err + } + + cli.FinishedParsing(cmd) + + resp, err := client.RefreshStateByShard(commandCtx, &vtctldatapb.RefreshStateByShardRequest{ + Keyspace: keyspace, + Shard: shard, + Cells: refreshStateByShardOptions.Cells, + }) + if err != nil { + return err + } + + msg := &strings.Builder{} + msg.WriteString(fmt.Sprintf("Refreshed state on %s/%s", keyspace, shard)) + if len(refreshStateByShardOptions.Cells) > 0 { + msg.WriteString(fmt.Sprintf(" in cells %s", strings.Join(refreshStateByShardOptions.Cells, ", "))) + } + msg.WriteByte('\n') + if resp.IsPartialRefresh { + msg.WriteString("State refresh was partial; some tablets in the shard may not have succeeded.\n") + } + + fmt.Print(msg.String()) + return nil +} + func init() { ChangeTabletType.Flags().BoolVarP(&changeTabletTypeOptions.DryRun, "dry-run", "d", false, "Shows the proposed change without actually executing it") Root.AddCommand(ChangeTabletType) @@ -234,4 +330,9 @@ func init() { GetTablets.Flags().StringVar(&getTabletsOptions.Format, "format", "awk", "Output format to use; valid choices are (json, awk)") GetTablets.Flags().BoolVar(&getTabletsOptions.Strict, "strict", false, "Require all cells to return successful tablet data. Without --strict, tablet listings may be partial.") Root.AddCommand(GetTablets) + + Root.AddCommand(RefreshState) + + RefreshStateByShard.Flags().StringSliceVarP(&refreshStateByShardOptions.Cells, "cells", "c", nil, "If specified, only call RefreshState on tablets in the specified cells. If empty, all cells are considered.") + Root.AddCommand(RefreshStateByShard) } diff --git a/go/cmd/vtctldclient/internal/command/vschemas.go b/go/cmd/vtctldclient/internal/command/vschemas.go index ac4f4499090..5a519b6d9a0 100644 --- a/go/cmd/vtctldclient/internal/command/vschemas.go +++ b/go/cmd/vtctldclient/internal/command/vschemas.go @@ -18,11 +18,14 @@ package command import ( "fmt" + "io/ioutil" "github.com/spf13/cobra" "vitess.io/vitess/go/cmd/vtctldclient/cli" + "vitess.io/vitess/go/json2" + vschemapb "vitess.io/vitess/go/vt/proto/vschema" vtctldatapb "vitess.io/vitess/go/vt/proto/vtctldata" ) @@ -33,8 +36,89 @@ var ( Args: cobra.ExactArgs(1), RunE: commandGetVSchema, } + // ApplyVSchema makes an ApplyVSchema gRPC call to a vtctld. + ApplyVSchema = &cobra.Command{ + Use: "ApplyVSchema {-vschema= || -vschema-file= || -sql= || -sql-file=} [-cells=c1,c2,...] [-skip-rebuild] [-dry-run] ", + Args: cobra.ExactArgs(1), + DisableFlagsInUseLine: true, + RunE: commandApplyVSchema, + Short: "Applies the VTGate routing schema to the provided keyspace. Shows the result after application.", + } ) +var applyVSchemaOptions = struct { + VSchema string + VSchemaFile string + SQL string + SQLFile string + DryRun bool + SkipRebuild bool + Cells []string +}{} + +func commandApplyVSchema(cmd *cobra.Command, args []string) error { + sqlMode := (applyVSchemaOptions.SQL != "") != (applyVSchemaOptions.SQLFile != "") + jsonMode := (applyVSchemaOptions.VSchema != "") != (applyVSchemaOptions.VSchemaFile != "") + + if sqlMode && jsonMode { + return fmt.Errorf("only one of the sql, sql-file, vschema, or vschema-file flags may be specified when calling the ApplyVSchema command") + } + + if !sqlMode && !jsonMode { + return fmt.Errorf("one of the sql, sql-file, vschema, or vschema-file flags must be specified when calling the ApplyVSchema command") + } + + req := &vtctldatapb.ApplyVSchemaRequest{ + Keyspace: cmd.Flags().Arg(0), + SkipRebuild: applyVSchemaOptions.SkipRebuild, + Cells: applyVSchemaOptions.Cells, + DryRun: applyVSchemaOptions.DryRun, + } + + var err error + if sqlMode { + if applyVSchemaOptions.SQLFile != "" { + sqlBytes, err := ioutil.ReadFile(applyVSchemaOptions.SQLFile) + if err != nil { + return err + } + req.Sql = string(sqlBytes) + } else { + req.Sql = applyVSchemaOptions.SQL + } + } else { // jsonMode + var schema []byte + if applyVSchemaOptions.VSchemaFile != "" { + schema, err = ioutil.ReadFile(applyVSchemaOptions.VSchemaFile) + if err != nil { + return err + } + } else { + schema = []byte(applyVSchemaOptions.VSchema) + } + + var vs *vschemapb.Keyspace + err = json2.Unmarshal(schema, vs) + if err != nil { + return err + } + req.VSchema = vs + } + + cli.FinishedParsing(cmd) + + res, err := client.ApplyVSchema(commandCtx, req) + if err != nil { + return err + } + data, err := cli.MarshalJSON(res.VSchema) + if err != nil { + return err + } + fmt.Printf("New VSchema object:\n%s\nIf this is not what you expected, check the input data (as JSON parsing will skip unexpected fields).\n", data) + return nil +} + func commandGetVSchema(cmd *cobra.Command, args []string) error { cli.FinishedParsing(cmd) @@ -58,5 +142,14 @@ func commandGetVSchema(cmd *cobra.Command, args []string) error { } func init() { + ApplyVSchema.Flags().StringVar(&applyVSchemaOptions.VSchema, "vschema", "", "VSchema") + ApplyVSchema.Flags().StringVar(&applyVSchemaOptions.VSchemaFile, "vschema-file", "", "VSchema File") + ApplyVSchema.Flags().StringVar(&applyVSchemaOptions.SQL, "sql", "", "A VSchema DDL SQL statement, e.g. `alter table t add vindex hash(id)`") + ApplyVSchema.Flags().StringVar(&applyVSchemaOptions.SQLFile, "sql-file", "", "A file containing VSchema DDL SQL") + ApplyVSchema.Flags().BoolVar(&applyVSchemaOptions.DryRun, "dry-run", false, "If set, do not save the altered vschema, simply echo to console.") + ApplyVSchema.Flags().BoolVar(&applyVSchemaOptions.SkipRebuild, "skip-rebuild", false, "If set, do no rebuild the SrvSchema objects.") + ApplyVSchema.Flags().StringSliceVar(&applyVSchemaOptions.Cells, "cells", nil, "If specified, limits the rebuild to the cells, after upload. Ignored if skipRebuild is set.") + Root.AddCommand(ApplyVSchema) + Root.AddCommand(GetVSchema) } diff --git a/go/cmd/vtctldclient/plugin_grpcvtctlclient.go b/go/cmd/vtctldclient/plugin_grpcvtctlclient.go new file mode 100644 index 00000000000..48c631a8baa --- /dev/null +++ b/go/cmd/vtctldclient/plugin_grpcvtctlclient.go @@ -0,0 +1,23 @@ +/* +Copyright 2021 The Vitess Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package main + +// Imports and registers the gRPC vtctl client. + +import ( + _ "vitess.io/vitess/go/vt/vtctl/grpcvtctlclient" +) diff --git a/go/cmd/vtgate/vtgate.go b/go/cmd/vtgate/vtgate.go index 732bcee80e6..3f98b7d6212 100644 --- a/go/cmd/vtgate/vtgate.go +++ b/go/cmd/vtgate/vtgate.go @@ -17,12 +17,14 @@ limitations under the License. package main import ( + "context" "flag" "math/rand" "strings" "time" - "context" + "vitess.io/vitess/go/vt/proto/vtrpc" + "vitess.io/vitess/go/vt/vterrors" "vitess.io/vitess/go/exit" "vitess.io/vitess/go/vt/discovery" @@ -32,6 +34,7 @@ import ( "vitess.io/vitess/go/vt/topo" "vitess.io/vitess/go/vt/topo/topoproto" "vitess.io/vitess/go/vt/vtgate" + "vitess.io/vitess/go/vt/vttablet/tabletserver" topodatapb "vitess.io/vitess/go/vt/proto/topodata" ) @@ -49,6 +52,62 @@ func init() { servenv.RegisterDefaultFlags() } +// CheckCellFlags will check validation of cell and cells_to_watch flag +// it will help to avoid strange behaviors when vtgate runs but actually does not work +func CheckCellFlags(ctx context.Context, serv srvtopo.Server, cell string, cellsToWatch string) error { + // topo check + var topoServer *topo.Server + if serv != nil { + var err error + topoServer, err = serv.GetTopoServer() + if err != nil { + log.Exitf("Unable to create gateway: %v", err) + } + } else { + log.Exitf("topo server cannot be nil") + } + cellsInTopo, err := topoServer.GetKnownCells(ctx) + if err != nil { + return err + } + if len(cellsInTopo) == 0 { + return vterrors.Errorf(vtrpc.Code_INVALID_ARGUMENT, "topo server should have at least one cell") + } + + // cell valid check + if cell == "" { + return vterrors.Errorf(vtrpc.Code_INVALID_ARGUMENT, "cell flag must be set") + } + hasCell := false + for _, v := range cellsInTopo { + if v == cell { + hasCell = true + break + } + } + if !hasCell { + return vterrors.Errorf(vtrpc.Code_INVALID_ARGUMENT, "cell:[%v] does not exist in topo", cell) + } + + // cells_to_watch valid check + cells := make([]string, 0, 1) + for _, c := range strings.Split(cellsToWatch, ",") { + if c == "" { + continue + } + // cell should contained in cellsInTopo + if exists := topo.InCellList(c, cellsInTopo); !exists { + return vterrors.Errorf(vtrpc.Code_INVALID_ARGUMENT, "cell: [%v] is not valid. Available cells: [%v]", c, strings.Join(cellsInTopo, ",")) + } + cells = append(cells, c) + } + if len(cells) == 0 { + return vterrors.Errorf(vtrpc.Code_INVALID_ARGUMENT, "cells_to_watch flag cannot be empty") + } + + return nil +} + func main() { defer exit.Recover() @@ -68,8 +127,21 @@ func main() { log.Errorf("unknown tablet type: %v", ttStr) continue } - tabletTypes = append(tabletTypes, tt) + if tabletserver.IsServingType(tt) { + tabletTypes = append(tabletTypes, tt) + } } + } else { + log.Exitf("tablet_types_to_wait flag must be set") + } + + if len(tabletTypes) == 0 { + log.Exitf("tablet_types_to_wait should contain at least one serving tablet type") + } + + err := CheckCellFlags(context.Background(), resilientServer, *cell, *vtgate.CellsToWatch) + if err != nil { + log.Exitf("cells_to_watch validation failed: %v", err) } var vtg *vtgate.VTGate diff --git a/go/cmd/vtgateclienttest/services/callerid.go b/go/cmd/vtgateclienttest/services/callerid.go index 3bbd7941e30..54893f3bb07 100644 --- a/go/cmd/vtgateclienttest/services/callerid.go +++ b/go/cmd/vtgateclienttest/services/callerid.go @@ -23,7 +23,7 @@ import ( "context" - "github.com/golang/protobuf/proto" + "google.golang.org/protobuf/proto" "vitess.io/vitess/go/sqltypes" "vitess.io/vitess/go/vt/callerid" diff --git a/go/cmd/vtgateclienttest/services/echo.go b/go/cmd/vtgateclienttest/services/echo.go index 63102ce692e..3c79ddd4310 100644 --- a/go/cmd/vtgateclienttest/services/echo.go +++ b/go/cmd/vtgateclienttest/services/echo.go @@ -172,3 +172,15 @@ func (c *echoClient) VStream(ctx context.Context, tabletType topodatapb.TabletTy return c.fallbackClient.VStream(ctx, tabletType, vgtid, filter, flags, callback) } + +func (c *echoClient) Prepare(ctx context.Context, session *vtgatepb.Session, sql string, bindVariables map[string]*querypb.BindVariable) (*vtgatepb.Session, []*querypb.Field, error) { + if strings.HasPrefix(sql, EchoPrefix) { + return session, echoQueryResult(map[string]interface{}{ + "callerId": callerid.EffectiveCallerIDFromContext(ctx), + "query": sql, + "bindVars": bindVariables, + "session": session, + }).Fields, nil + } + return c.fallbackClient.Prepare(ctx, session, sql, bindVariables) +} diff --git a/go/cmd/vtgateclienttest/services/errors.go b/go/cmd/vtgateclienttest/services/errors.go index 570b7f08190..9a4a5e39366 100644 --- a/go/cmd/vtgateclienttest/services/errors.go +++ b/go/cmd/vtgateclienttest/services/errors.go @@ -139,3 +139,17 @@ func (c *errorClient) StreamExecute(ctx context.Context, session *vtgatepb.Sessi } return c.fallbackClient.StreamExecute(ctx, session, sql, bindVariables, callback) } + +func (c *errorClient) Prepare(ctx context.Context, session *vtgatepb.Session, sql string, bindVariables map[string]*querypb.BindVariable) (*vtgatepb.Session, []*querypb.Field, error) { + if err := requestToPartialError(sql, session); err != nil { + return session, nil, err + } + if err := requestToError(sql); err != nil { + return session, nil, err + } + return c.fallbackClient.Prepare(ctx, session, sql, bindVariables) +} + +func (c *errorClient) CloseSession(ctx context.Context, session *vtgatepb.Session) error { + return c.fallbackClient.CloseSession(ctx, session) +} diff --git a/go/cmd/vtgateclienttest/services/fallback.go b/go/cmd/vtgateclienttest/services/fallback.go index 5119fa36587..02f9239260b 100644 --- a/go/cmd/vtgateclienttest/services/fallback.go +++ b/go/cmd/vtgateclienttest/services/fallback.go @@ -52,6 +52,14 @@ func (c fallbackClient) StreamExecute(ctx context.Context, session *vtgatepb.Ses return c.fallback.StreamExecute(ctx, session, sql, bindVariables, callback) } +func (c fallbackClient) Prepare(ctx context.Context, session *vtgatepb.Session, sql string, bindVariables map[string]*querypb.BindVariable) (*vtgatepb.Session, []*querypb.Field, error) { + return c.fallback.Prepare(ctx, session, sql, bindVariables) +} + +func (c fallbackClient) CloseSession(ctx context.Context, session *vtgatepb.Session) error { + return c.fallback.CloseSession(ctx, session) +} + func (c fallbackClient) ResolveTransaction(ctx context.Context, dtid string) error { return c.fallback.ResolveTransaction(ctx, dtid) } diff --git a/go/cmd/vtgateclienttest/services/terminal.go b/go/cmd/vtgateclienttest/services/terminal.go index 6a8e30fd9da..85fa664c2c2 100644 --- a/go/cmd/vtgateclienttest/services/terminal.go +++ b/go/cmd/vtgateclienttest/services/terminal.go @@ -62,6 +62,14 @@ func (c *terminalClient) StreamExecute(ctx context.Context, session *vtgatepb.Se return errTerminal } +func (c *terminalClient) Prepare(ctx context.Context, session *vtgatepb.Session, sql string, bindVariables map[string]*querypb.BindVariable) (*vtgatepb.Session, []*querypb.Field, error) { + return session, nil, errTerminal +} + +func (c *terminalClient) CloseSession(ctx context.Context, session *vtgatepb.Session) error { + return errTerminal +} + func (c *terminalClient) ResolveTransaction(ctx context.Context, dtid string) error { return errTerminal } diff --git a/go/cmd/vtorc/main.go b/go/cmd/vtorc/main.go index e64ced875d0..840296fe102 100644 --- a/go/cmd/vtorc/main.go +++ b/go/cmd/vtorc/main.go @@ -42,7 +42,6 @@ func main() { verbose := flag.Bool("verbose", false, "verbose") debug := flag.Bool("debug", false, "debug mode (very verbose)") stack := flag.Bool("stack", false, "add stack trace upon error") - config.RuntimeCLIFlags.SkipBinlogSearch = flag.Bool("skip-binlog-search", false, "when matching via Pseudo-GTID, only use relay logs. This can save the hassle of searching for a non-existend pseudo-GTID entry, for example in servers with replication filters.") config.RuntimeCLIFlags.SkipUnresolve = flag.Bool("skip-unresolve", false, "Do not unresolve a host name") config.RuntimeCLIFlags.SkipUnresolveCheck = flag.Bool("skip-unresolve-check", false, "Skip/ignore checking an unresolve mapping (via hostname_unresolve table) resolves back to same hostname") config.RuntimeCLIFlags.Noop = flag.Bool("noop", false, "Dry run; do not perform destructing operations") diff --git a/go/cmd/vttablet/vttablet.go b/go/cmd/vttablet/vttablet.go index 1d6838c3e9b..86780ec72fe 100644 --- a/go/cmd/vttablet/vttablet.go +++ b/go/cmd/vttablet/vttablet.go @@ -204,7 +204,7 @@ func createTabletServer(config *tabletenv.TabletConfig, ts *topo.Server, tabletA log.Exit("table acl config has to be specified with table-acl-config flag because enforce-tableacl-config is set.") } // creates and registers the query service - qsc := tabletserver.NewTabletServer("", config, ts, *tabletAlias) + qsc := tabletserver.NewTabletServer("", config, ts, tabletAlias) servenv.OnRun(func() { qsc.Register() addStatusParts(qsc) diff --git a/go/cmd/vttestserver/main.go b/go/cmd/vttestserver/main.go index 2b645ec0812..0e01fd2ba6b 100644 --- a/go/cmd/vttestserver/main.go +++ b/go/cmd/vttestserver/main.go @@ -28,7 +28,7 @@ import ( "strings" "syscall" - "github.com/golang/protobuf/proto" + "google.golang.org/protobuf/encoding/prototext" "vitess.io/vitess/go/vt/log" vttestpb "vitess.io/vitess/go/vt/proto/vttest" @@ -147,6 +147,10 @@ func init() { flag.BoolVar(&config.InitWorkflowManager, "workflow_manager_init", false, "Enable workflow manager") flag.StringVar(&config.VSchemaDDLAuthorizedUsers, "vschema_ddl_authorized_users", "", "Comma separated list of users authorized to execute vschema ddl operations via vtgate") + + flag.StringVar(&config.ForeignKeyMode, "foreign_key_mode", "allow", "This is to provide how to handle foreign key constraint in create/alter table. Valid values are: allow, disallow") + flag.BoolVar(&config.EnableOnlineDDL, "enable_online_ddl", true, "Allow users to submit, review and control Online DDL") + flag.BoolVar(&config.EnableDirectDDL, "enable_direct_ddl", true, "Allow users to submit direct DDL statements") } func (t *topoFlags) buildTopology() (*vttestpb.VTTestTopology, error) { @@ -208,7 +212,7 @@ func parseFlags() (env vttest.Environment, err error) { } } else { var topology vttestpb.VTTestTopology - err = proto.UnmarshalText(protoTopo, &topology) + err = prototext.Unmarshal([]byte(protoTopo), &topology) if err != nil { return } diff --git a/go/cmd/vttestserver/vttestserver_test.go b/go/cmd/vttestserver/vttestserver_test.go index 1fe55919d7f..47a22ee7b38 100644 --- a/go/cmd/vttestserver/vttestserver_test.go +++ b/go/cmd/vttestserver/vttestserver_test.go @@ -28,6 +28,8 @@ import ( "testing" "time" + "google.golang.org/protobuf/encoding/protojson" + "vitess.io/vitess/go/sqltypes" "vitess.io/vitess/go/vt/tlstest" @@ -38,8 +40,6 @@ import ( "vitess.io/vitess/go/vt/vttest" - "github.com/golang/protobuf/jsonpb" - "vitess.io/vitess/go/vt/proto/logutil" "vitess.io/vitess/go/vt/proto/vschema" "vitess.io/vitess/go/vt/vtctl/vtctlclient" @@ -126,6 +126,59 @@ func TestPersistentMode(t *testing.T) { assert.Equal(t, expectedRows, res.Rows) } +func TestForeignKeysAndDDLModes(t *testing.T) { + args := os.Args + conf := config + defer resetFlags(args, conf) + + cluster, err := startCluster("-foreign_key_mode=allow", "-enable_online_ddl=true", "-enable_direct_ddl=true") + assert.NoError(t, err) + defer cluster.TearDown() + + execOnCluster(cluster, "test_keyspace", func(conn *mysql.Conn) error { + _, err := conn.ExecuteFetch(`CREATE TABLE test_table_2 ( + id BIGINT, + test_table_id BIGINT, + FOREIGN KEY (test_table_id) REFERENCES test_table(id) + )`, 1, false) + assert.NoError(t, err) + _, err = conn.ExecuteFetch("SET @@ddl_strategy='online'", 1, false) + assert.NoError(t, err) + _, err = conn.ExecuteFetch("ALTER TABLE test_table ADD COLUMN something_else VARCHAR(255) NOT NULL DEFAULT ''", 1, false) + assert.NoError(t, err) + _, err = conn.ExecuteFetch("SET @@ddl_strategy='direct'", 1, false) + assert.NoError(t, err) + _, err = conn.ExecuteFetch("ALTER TABLE test_table ADD COLUMN something_else_2 VARCHAR(255) NOT NULL DEFAULT ''", 1, false) + assert.NoError(t, err) + _, err = conn.ExecuteFetch("SELECT something_else_2 FROM test_table", 1, false) + assert.NoError(t, err) + return nil + }) + + cluster.TearDown() + cluster, err = startCluster("-foreign_key_mode=disallow", "-enable_online_ddl=false", "-enable_direct_ddl=false") + assert.NoError(t, err) + defer cluster.TearDown() + + execOnCluster(cluster, "test_keyspace", func(conn *mysql.Conn) error { + _, err := conn.ExecuteFetch(`CREATE TABLE test_table_2 ( + id BIGINT, + test_table_id BIGINT, + FOREIGN KEY (test_table_id) REFERENCES test_table(id) + )`, 1, false) + assert.Error(t, err) + _, err = conn.ExecuteFetch("SET @@ddl_strategy='online'", 1, false) + assert.NoError(t, err) + _, err = conn.ExecuteFetch("ALTER TABLE test_table ADD COLUMN something_else VARCHAR(255) NOT NULL DEFAULT ''", 1, false) + assert.Error(t, err) + _, err = conn.ExecuteFetch("SET @@ddl_strategy='direct'", 1, false) + assert.NoError(t, err) + _, err = conn.ExecuteFetch("ALTER TABLE test_table ADD COLUMN something_else VARCHAR(255) NOT NULL DEFAULT ''", 1, false) + assert.Error(t, err) + return nil + }) +} + func TestCanVtGateExecute(t *testing.T) { args := os.Args conf := config @@ -305,7 +358,7 @@ func assertColumnVindex(t *testing.T, cluster vttest.LocalCluster, expected colu err := vtctlclient.RunCommandAndWait(ctx, server, args, func(e *logutil.Event) { var keyspace vschema.Keyspace - if err := jsonpb.UnmarshalString(e.Value, &keyspace); err != nil { + if err := protojson.Unmarshal([]byte(e.Value), &keyspace); err != nil { t.Error(err) } diff --git a/go/flagutil/optional.go b/go/flagutil/optional.go new file mode 100644 index 00000000000..3bfcd3dd473 --- /dev/null +++ b/go/flagutil/optional.go @@ -0,0 +1,146 @@ +/* +Copyright 2021 The Vitess Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package flagutil + +import ( + "errors" + "flag" + "strconv" +) + +// OptionalFlag augements the flag.Value interface with a method to determine +// if a flag was set explicitly on the comand-line. +// +// Though not part of the interface, because the return type would be different +// for each implementation, by convention, each implementation should define a +// Get() method to access the underlying value. +type OptionalFlag interface { + flag.Value + IsSet() bool +} + +var ( + _ OptionalFlag = (*OptionalFloat64)(nil) + _ OptionalFlag = (*OptionalString)(nil) +) + +// OptionalFloat64 implements OptionalFlag for float64 values. +type OptionalFloat64 struct { + val float64 + set bool +} + +// NewOptionalFloat64 returns an OptionalFloat64 with the specified value as its +// starting value. +func NewOptionalFloat64(val float64) *OptionalFloat64 { + return &OptionalFloat64{ + val: val, + set: false, + } +} + +// Set is part of the flag.Value interface. +func (f *OptionalFloat64) Set(arg string) error { + v, err := strconv.ParseFloat(arg, 64) + if err != nil { + return numError(err) + } + + f.val = v + f.set = true + + return nil +} + +// String is part of the flag.Value interface. +func (f *OptionalFloat64) String() string { + return strconv.FormatFloat(f.val, 'g', -1, 64) +} + +// Get returns the underlying float64 value of this flag. If the flag was not +// explicitly set, this will be the initial value passed to the constructor. +func (f *OptionalFloat64) Get() float64 { + return f.val +} + +// IsSet is part of the OptionalFlag interface. +func (f *OptionalFloat64) IsSet() bool { + return f.set +} + +// OptionalString implements OptionalFlag for string values. +type OptionalString struct { + val string + set bool +} + +// NewOptionalString returns an OptionalString with the specified value as its +// starting value. +func NewOptionalString(val string) *OptionalString { + return &OptionalString{ + val: val, + set: false, + } +} + +// Set is part of the flag.Value interface. +func (f *OptionalString) Set(arg string) error { + f.val = arg + f.set = true + return nil +} + +// String is part of the flag.Value interface. +func (f *OptionalString) String() string { + return f.val +} + +// Get returns the underlying string value of this flag. If the flag was not +// explicitly set, this will be the initial value passed to the constructor. +func (f *OptionalString) Get() string { + return f.val +} + +// IsSet is part of the OptionalFlag interface. +func (f *OptionalString) IsSet() bool { + return f.set +} + +// lifted directly from package flag to make the behavior of numeric parsing +// consistent with the standard library for our custom optional types. +var ( + errParse = errors.New("parse error") + errRange = errors.New("value out of range") +) + +// lifted directly from package flag to make the behavior of numeric parsing +// consistent with the standard library for our custom optional types. +func numError(err error) error { + ne, ok := err.(*strconv.NumError) + if !ok { + return err + } + + switch ne.Err { + case strconv.ErrSyntax: + return errParse + case strconv.ErrRange: + return errRange + default: + return err + } +} diff --git a/go/flagutil/sets.go b/go/flagutil/sets.go new file mode 100644 index 00000000000..cfe21481f42 --- /dev/null +++ b/go/flagutil/sets.go @@ -0,0 +1,80 @@ +/* +Copyright 2021 The Vitess Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package flagutil + +import ( + "flag" + "strings" + + "github.com/spf13/pflag" + "k8s.io/apimachinery/pkg/util/sets" +) + +var ( + _ flag.Value = (*StringSetFlag)(nil) + _ pflag.Value = (*StringSetFlag)(nil) +) + +// StringSetFlag can be used to collect multiple instances of a flag into a set +// of values. +// +// For example, defining the following: +// +// var x flagutil.StringSetFlag +// flag.Var(&x, "foo", "") +// +// And then specifying "-foo x -foo y -foo x", will result in a set of {x, y}. +// +// In addition to implemnting the standard flag.Value interface, it also +// provides an implementation of pflag.Value, so it is usable in libraries like +// cobra. +type StringSetFlag struct { + set sets.String +} + +// ToSet returns the underlying string set, or an empty set if the underlying +// set is nil. +func (set *StringSetFlag) ToSet() sets.String { + if set.set == nil { + set.set = sets.NewString() + } + + return set.set +} + +// Set is part of the pflag.Value and flag.Value interfaces. +func (set *StringSetFlag) Set(s string) error { + if set.set == nil { + set.set = sets.NewString(s) + return nil + } + + set.set.Insert(s) + return nil +} + +// String is part of the pflag.Value and flag.Value interfaces. +func (set *StringSetFlag) String() string { + if set.set == nil { + return "" + } + + return strings.Join(set.set.List(), ", ") +} + +// Type is part of the pflag.Value interface. +func (set *StringSetFlag) Type() string { return "StringSetFlag" } diff --git a/go/hack/detrand.go b/go/hack/detrand.go new file mode 100644 index 00000000000..1ea8a53ada7 --- /dev/null +++ b/go/hack/detrand.go @@ -0,0 +1,41 @@ +/* +Copyright 2021 The Vitess Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package hack + +import ( + _ "unsafe" +) + +// DisableProtoBufRandomness disables the random insertion of whitespace characters when +// serializing Protocol Buffers in textual form (both when serializing to JSON or to ProtoText) +// +// Since the introduction of the APIv2 for Protocol Buffers, the default serializers in the +// package insert random whitespace characters that don't change the meaning of the serialized +// code but make byte-wise comparison impossible. The rationale behind this decision is as follows: +// +// "The ProtoBuf authors believe that golden tests are Wrong" +// +// Fine. Unfortunately, Vitess makes extensive use of golden tests through its test suite, which +// expect byte-wise comparison to be stable between test runs. Using the new version of the +// package would require us to rewrite hundreds of tests, or alternatively, we could disable +// the randomness and call it a day. The method required to disable the randomness is not public, but +// that won't stop us because we're good at computers. +// +// Tracking issue: https://github.com/golang/protobuf/issues/1121 +// +//go:linkname DisableProtoBufRandomness google.golang.org/protobuf/internal/detrand.Disable +func DisableProtoBufRandomness() diff --git a/go/json2/marshal.go b/go/json2/marshal.go index 763860b730e..b260d7934db 100644 --- a/go/json2/marshal.go +++ b/go/json2/marshal.go @@ -17,30 +17,25 @@ limitations under the License. package json2 import ( - "bytes" + "google.golang.org/protobuf/encoding/protojson" + "google.golang.org/protobuf/proto" - "github.com/golang/protobuf/jsonpb" - "github.com/golang/protobuf/proto" + "vitess.io/vitess/go/hack" ) +func init() { + hack.DisableProtoBufRandomness() +} + // MarshalPB marshals a proto. func MarshalPB(pb proto.Message) ([]byte, error) { - buf := new(bytes.Buffer) - m := jsonpb.Marshaler{} - if err := m.Marshal(buf, pb); err != nil { - return nil, err - } - return buf.Bytes(), nil + return protojson.Marshal(pb) } // MarshalIndentPB MarshalIndents a proto. func MarshalIndentPB(pb proto.Message, indent string) ([]byte, error) { - buf := new(bytes.Buffer) - m := jsonpb.Marshaler{ - Indent: indent, - } - if err := m.Marshal(buf, pb); err != nil { - return nil, err - } - return buf.Bytes(), nil + return protojson.MarshalOptions{ + Multiline: true, + Indent: indent, + }.Marshal(pb) } diff --git a/go/json2/marshal_test.go b/go/json2/marshal_test.go index cf6841472b3..96b7f508d73 100644 --- a/go/json2/marshal_test.go +++ b/go/json2/marshal_test.go @@ -33,24 +33,7 @@ func TestMarshalPB(t *testing.T) { t.Fatal(err) } want := "{\"name\":\"c1\",\"type\":\"VARCHAR\"}" - got := string(b) - if got != want { - t.Errorf("MarshalPB(col): %q, want %q", got, want) - } -} - -func TestMarshalIndentPB(t *testing.T) { - col := &vschemapb.Column{ - Name: "c1", - Type: querypb.Type_VARCHAR, - } - b, err := MarshalIndentPB(col, " ") - if err != nil { - t.Fatal(err) - } - want := "{\n \"name\": \"c1\",\n \"type\": \"VARCHAR\"\n}" - got := string(b) - if got != want { - t.Errorf("MarshalPB(col): %q, want %q", got, want) + if string(b) != want { + t.Errorf("MarshalPB(col): %q, want %q", b, want) } } diff --git a/go/json2/unmarshal.go b/go/json2/unmarshal.go index 10584cbaae0..ddc6035f610 100644 --- a/go/json2/unmarshal.go +++ b/go/json2/unmarshal.go @@ -22,8 +22,8 @@ import ( "encoding/json" "fmt" - "github.com/golang/protobuf/jsonpb" - "github.com/golang/protobuf/proto" + "google.golang.org/protobuf/encoding/protojson" + "google.golang.org/protobuf/proto" ) var carriageReturn = []byte("\n") @@ -33,7 +33,7 @@ var carriageReturn = []byte("\n") // efficient and should not be used for high QPS operations. func Unmarshal(data []byte, v interface{}) error { if pb, ok := v.(proto.Message); ok { - return annotate(data, jsonpb.Unmarshal(bytes.NewBuffer(data), pb)) + return annotate(data, protojson.Unmarshal(data, pb)) } return annotate(data, json.Unmarshal(data, v)) } diff --git a/go/json2/unmarshal_test.go b/go/json2/unmarshal_test.go index 28a317631f0..9daed8d71cd 100644 --- a/go/json2/unmarshal_test.go +++ b/go/json2/unmarshal_test.go @@ -18,8 +18,6 @@ package json2 import ( "testing" - - vschemapb "vitess.io/vitess/go/vt/proto/vschema" ) func TestUnmarshal(t *testing.T) { @@ -50,40 +48,3 @@ func TestUnmarshal(t *testing.T) { } } } - -func TestUnmarshalPB(t *testing.T) { - tcases := []struct { - in, err string - }{{ - in: `{ - "name": "c1", - "type": "VARCHAR" -}`, - }, { - in: `{ - "name": "c1", - "type": "badtype" -}`, - err: "unknown value \"badtype\" for enum query.Type", - }, { - in: `{ - "l2": "val", - "l3": [ - "l4", - "l5"asdas" - ] -}`, - err: "line: 5, position 9: invalid character 'a' after array element", - }} - for _, tcase := range tcases { - var out vschemapb.Column - err := Unmarshal([]byte(tcase.in), &out) - got := "" - if err != nil { - got = err.Error() - } - if got != tcase.err { - t.Errorf("Unmarshal(%v) err: %v, want %v", tcase.in, got, tcase.err) - } - } -} diff --git a/go/mathstats/beta.go b/go/mathstats/beta.go new file mode 100644 index 00000000000..f70565a28bb --- /dev/null +++ b/go/mathstats/beta.go @@ -0,0 +1,87 @@ +// Copyright 2015 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +package mathstats + +import "math" + +func lgamma(x float64) float64 { + y, _ := math.Lgamma(x) + return y +} + +// mathBetaInc returns the value of the regularized incomplete beta +// function Iₓ(a, b). +// +// This is not to be confused with the "incomplete beta function", +// which can be computed as BetaInc(x, a, b)*Beta(a, b). +// +// If x < 0 or x > 1, returns NaN. +func mathBetaInc(x, a, b float64) float64 { + // Based on Numerical Recipes in C, section 6.4. This uses the + // continued fraction definition of I: + // + // (xᵃ*(1-x)ᵇ)/(a*B(a,b)) * (1/(1+(d₁/(1+(d₂/(1+...)))))) + // + // where B(a,b) is the beta function and + // + // d_{2m+1} = -(a+m)(a+b+m)x/((a+2m)(a+2m+1)) + // d_{2m} = m(b-m)x/((a+2m-1)(a+2m)) + if x < 0 || x > 1 { + return math.NaN() + } + bt := 0.0 + if 0 < x && x < 1 { + // Compute the coefficient before the continued + // fraction. + bt = math.Exp(lgamma(a+b) - lgamma(a) - lgamma(b) + + a*math.Log(x) + b*math.Log(1-x)) + } + if x < (a+1)/(a+b+2) { + // Compute continued fraction directly. + return bt * betacf(x, a, b) / a + } else { + // Compute continued fraction after symmetry transform. + return 1 - bt*betacf(1-x, b, a)/b + } +} + +// betacf is the continued fraction component of the regularized +// incomplete beta function Iₓ(a, b). +func betacf(x, a, b float64) float64 { + const maxIterations = 200 + const epsilon = 3e-14 + + raiseZero := func(z float64) float64 { + if math.Abs(z) < math.SmallestNonzeroFloat64 { + return math.SmallestNonzeroFloat64 + } + return z + } + + c := 1.0 + d := 1 / raiseZero(1-(a+b)*x/(a+1)) + h := d + for m := 1; m <= maxIterations; m++ { + mf := float64(m) + + // Even step of the recurrence. + numer := mf * (b - mf) * x / ((a + 2*mf - 1) * (a + 2*mf)) + d = 1 / raiseZero(1+numer*d) + c = raiseZero(1 + numer/c) + h *= d * c + + // Odd step of the recurrence. + numer = -(a + mf) * (a + b + mf) * x / ((a + 2*mf) * (a + 2*mf + 1)) + d = 1 / raiseZero(1+numer*d) + c = raiseZero(1 + numer/c) + hfac := d * c + h *= hfac + + if math.Abs(hfac-1) < epsilon { + return h + } + } + panic("betainc: a or b too big; failed to converge") +} diff --git a/go/mathstats/beta_test.go b/go/mathstats/beta_test.go new file mode 100644 index 00000000000..2878493a57d --- /dev/null +++ b/go/mathstats/beta_test.go @@ -0,0 +1,28 @@ +// Copyright 2015 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +package mathstats + +import ( + "testing" +) + +func TestBetaInc(t *testing.T) { + // Example values from MATLAB betainc documentation. + testFunc(t, "I_0.5(%v, 3)", + func(a float64) float64 { return mathBetaInc(0.5, a, 3) }, + map[float64]float64{ + 0: 1.00000000000000, + 1: 0.87500000000000, + 2: 0.68750000000000, + 3: 0.50000000000000, + 4: 0.34375000000000, + 5: 0.22656250000000, + 6: 0.14453125000000, + 7: 0.08984375000000, + 8: 0.05468750000000, + 9: 0.03271484375000, + 10: 0.01928710937500, + }) +} diff --git a/go/mathstats/sample.go b/go/mathstats/sample.go new file mode 100644 index 00000000000..d645ee1a7f3 --- /dev/null +++ b/go/mathstats/sample.go @@ -0,0 +1,235 @@ +// Copyright 2015 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +package mathstats + +import ( + "math" + "sort" +) + +// Sample is a collection of possibly weighted data points. +type Sample struct { + // Xs is the slice of sample values. + Xs []float64 + + // Sorted indicates that Xs is sorted in ascending order. + Sorted bool +} + +// Bounds returns the minimum and maximum values of xs. +func Bounds(xs []float64) (min float64, max float64) { + if len(xs) == 0 { + return math.NaN(), math.NaN() + } + min, max = xs[0], xs[0] + for _, x := range xs { + if x < min { + min = x + } + if x > max { + max = x + } + } + return +} + +// Bounds returns the minimum and maximum values of the Sample. +// +// If the Sample is weighted, this ignores samples with zero weight. +// +// This is constant time if s.Sorted and there are no zero-weighted +// values. +func (s Sample) Bounds() (min float64, max float64) { + if len(s.Xs) == 0 || !s.Sorted { + return Bounds(s.Xs) + } + return s.Xs[0], s.Xs[len(s.Xs)-1] +} + +// vecSum returns the sum of xs. +func vecSum(xs []float64) float64 { + sum := 0.0 + for _, x := range xs { + sum += x + } + return sum +} + +// Sum returns the (possibly weighted) sum of the Sample. +func (s Sample) Sum() float64 { + return vecSum(s.Xs) +} + +// Weight returns the total weight of the Sasmple. +func (s Sample) Weight() float64 { + return float64(len(s.Xs)) +} + +// Mean returns the arithmetic mean of xs. +func Mean(xs []float64) float64 { + if len(xs) == 0 { + return math.NaN() + } + m := 0.0 + for i, x := range xs { + m += (x - m) / float64(i+1) + } + return m +} + +// Mean returns the arithmetic mean of the Sample. +func (s Sample) Mean() float64 { + return Mean(s.Xs) +} + +// GeoMean returns the geometric mean of xs. xs must be positive. +func GeoMean(xs []float64) float64 { + if len(xs) == 0 { + return math.NaN() + } + m := 0.0 + for i, x := range xs { + if x <= 0 { + return math.NaN() + } + lx := math.Log(x) + m += (lx - m) / float64(i+1) + } + return math.Exp(m) +} + +// GeoMean returns the geometric mean of the Sample. All samples +// values must be positive. +func (s Sample) GeoMean() float64 { + return GeoMean(s.Xs) +} + +// Variance returns the sample variance of xs. +func Variance(xs []float64) float64 { + if len(xs) == 0 { + return math.NaN() + } else if len(xs) <= 1 { + return 0 + } + + // Based on Wikipedia's presentation of Welford 1962 + // (http://en.wikipedia.org/wiki/Algorithms_for_calculating_variance#Online_algorithm). + // This is more numerically stable than the standard two-pass + // formula and not prone to massive cancellation. + mean, M2 := 0.0, 0.0 + for n, x := range xs { + delta := x - mean + mean += delta / float64(n+1) + M2 += delta * (x - mean) + } + return M2 / float64(len(xs)-1) +} + +// Variance returns the variance of xs +func (s Sample) Variance() float64 { + return Variance(s.Xs) +} + +// StdDev returns the sample standard deviation of xs. +func StdDev(xs []float64) float64 { + return math.Sqrt(Variance(xs)) +} + +// StdDev returns the sample standard deviation of the Sample. +func (s Sample) StdDev() float64 { + return StdDev(s.Xs) +} + +// Percentile returns the pctileth value from the Sample. This uses +// interpolation method R8 from Hyndman and Fan (1996). +// +// pctile will be capped to the range [0, 1]. If len(xs) == 0 or all +// weights are 0, returns NaN. +// +// Percentile(0.5) is the median. Percentile(0.25) and +// Percentile(0.75) are the first and third quartiles, respectively. +// +// This is constant time if s.Sorted and s.Weights == nil. +func (s *Sample) Percentile(pctile float64) float64 { + if len(s.Xs) == 0 { + return math.NaN() + } else if pctile <= 0 { + min, _ := s.Bounds() + return min + } else if pctile >= 1 { + _, max := s.Bounds() + return max + } + + if !s.Sorted { + s.Sort() + } + + N := float64(len(s.Xs)) + //n := pctile * (N + 1) // R6 + n := 1/3.0 + pctile*(N+1/3.0) // R8 + kf, frac := math.Modf(n) + k := int(kf) + if k <= 0 { + return s.Xs[0] + } else if k >= len(s.Xs) { + return s.Xs[len(s.Xs)-1] + } + return s.Xs[k-1] + frac*(s.Xs[k]-s.Xs[k-1]) +} + +// IQR returns the interquartile range of the Sample. +// +// This is constant time if s.Sorted and s.Weights == nil. +func (s Sample) IQR() float64 { + if !s.Sorted { + s = *s.Copy().Sort() + } + return s.Percentile(0.75) - s.Percentile(0.25) +} + +// Sort sorts the samples in place in s and returns s. +// +// A sorted sample improves the performance of some algorithms. +func (s *Sample) Sort() *Sample { + if s.Sorted || sort.Float64sAreSorted(s.Xs) { + // All set + } else { + sort.Float64s(s.Xs) + } + s.Sorted = true + return s +} + +// Copy returns a copy of the Sample. +// +// The returned Sample shares no data with the original, so they can +// be modified (for example, sorted) independently. +func (s Sample) Copy() *Sample { + xs := make([]float64, len(s.Xs)) + copy(xs, s.Xs) + return &Sample{xs, s.Sorted} +} + +// FilterOutliers updates this sample in-place by removing all the values that are outliers +func (s *Sample) FilterOutliers() { + // Discard outliers. + q1, q3 := s.Percentile(0.25), s.Percentile(0.75) + lo, hi := q1-1.5*(q3-q1), q3+1.5*(q3-q1) + nn := 0 + for _, value := range s.Xs { + if lo <= value && value <= hi { + s.Xs[nn] = value + nn++ + } + } + s.Xs = s.Xs[:nn] +} + +// Clear resets this sample so it contains 0 values +func (s *Sample) Clear() { + s.Xs = s.Xs[:0] + s.Sorted = false +} diff --git a/go/mathstats/sample_test.go b/go/mathstats/sample_test.go new file mode 100644 index 00000000000..fb9d6dbc6ee --- /dev/null +++ b/go/mathstats/sample_test.go @@ -0,0 +1,21 @@ +// Copyright 2015 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +package mathstats + +import "testing" + +func TestSamplePercentile(t *testing.T) { + s := Sample{Xs: []float64{15, 20, 35, 40, 50}} + testFunc(t, "Percentile", s.Percentile, map[float64]float64{ + -1: 15, + 0: 15, + .05: 15, + .30: 19.666666666666666, + .40: 27, + .95: 50, + 1: 50, + 2: 50, + }) +} diff --git a/go/mathstats/tdist.go b/go/mathstats/tdist.go new file mode 100644 index 00000000000..5376669f32e --- /dev/null +++ b/go/mathstats/tdist.go @@ -0,0 +1,33 @@ +// Copyright 2015 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +package mathstats + +import "math" + +// A TDist is a Student's t-distribution with V degrees of freedom. +type TDist struct { + V float64 +} + +func (t TDist) PDF(x float64) float64 { + return math.Exp(lgamma((t.V+1)/2)-lgamma(t.V/2)) / + math.Sqrt(t.V*math.Pi) * math.Pow(1+(x*x)/t.V, -(t.V+1)/2) +} + +func (t TDist) CDF(x float64) float64 { + if x == 0 { + return 0.5 + } else if x > 0 { + return 1 - 0.5*mathBetaInc(t.V/(t.V+x*x), t.V/2, 0.5) + } else if x < 0 { + return 1 - t.CDF(-x) + } else { + return math.NaN() + } +} + +func (t TDist) Bounds() (float64, float64) { + return -4, 4 +} diff --git a/go/mathstats/tdist_test.go b/go/mathstats/tdist_test.go new file mode 100644 index 00000000000..b30ba95662b --- /dev/null +++ b/go/mathstats/tdist_test.go @@ -0,0 +1,95 @@ +// Copyright 2015 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +package mathstats + +import "testing" + +func TestT(t *testing.T) { + testFunc(t, "PDF(%v|v=1)", TDist{1}.PDF, map[float64]float64{ + -10: 0.0031515830315226806, + -9: 0.0038818278802901312, + -8: 0.0048970751720583188, + -7: 0.0063661977236758151, + -6: 0.0086029698968592104, + -5: 0.012242687930145799, + -4: 0.018724110951987692, + -3: 0.031830988618379075, + -2: 0.063661977236758149, + -1: 0.15915494309189537, + 0: 0.31830988618379075, + 1: 0.15915494309189537, + 2: 0.063661977236758149, + 3: 0.031830988618379075, + 4: 0.018724110951987692, + 5: 0.012242687930145799, + 6: 0.0086029698968592104, + 7: 0.0063661977236758151, + 8: 0.0048970751720583188, + 9: 0.0038818278802901312}) + testFunc(t, "PDF(%v|v=5)", TDist{5}.PDF, map[float64]float64{ + -10: 4.0989816415343313e-05, + -9: 7.4601664362590413e-05, + -8: 0.00014444303269563934, + -7: 0.00030134402928803911, + -6: 0.00068848154013743002, + -5: 0.0017574383788078445, + -4: 0.0051237270519179133, + -3: 0.017292578800222964, + -2: 0.065090310326216455, + -1: 0.21967979735098059, + 0: 0.3796066898224944, + 1: 0.21967979735098059, + 2: 0.065090310326216455, + 3: 0.017292578800222964, + 4: 0.0051237270519179133, + 5: 0.0017574383788078445, + 6: 0.00068848154013743002, + 7: 0.00030134402928803911, + 8: 0.00014444303269563934, + 9: 7.4601664362590413e-05}) + + testFunc(t, "CDF(%v|v=1)", TDist{1}.CDF, map[float64]float64{ + -10: 0.03172551743055356, + -9: 0.035223287477277272, + -8: 0.039583424160565539, + -7: 0.045167235300866547, + -6: 0.052568456711253424, + -5: 0.06283295818900117, + -4: 0.077979130377369324, + -3: 0.10241638234956672, + -2: 0.14758361765043321, + -1: 0.24999999999999978, + 0: 0.5, + 1: 0.75000000000000022, + 2: 0.85241638234956674, + 3: 0.89758361765043326, + 4: 0.92202086962263075, + 5: 0.93716704181099886, + 6: 0.94743154328874657, + 7: 0.95483276469913347, + 8: 0.96041657583943452, + 9: 0.96477671252272279}) + testFunc(t, "CDF(%v|v=5)", TDist{5}.CDF, map[float64]float64{ + -10: 8.5473787871481787e-05, + -9: 0.00014133998712194845, + -8: 0.00024645333028622187, + -7: 0.00045837375719920225, + -6: 0.00092306914479700695, + -5: 0.0020523579900266612, + -4: 0.0051617077404157259, + -3: 0.015049623948731284, + -2: 0.05096973941492914, + -1: 0.18160873382456127, + 0: 0.5, + 1: 0.81839126617543867, + 2: 0.9490302605850709, + 3: 0.98495037605126878, + 4: 0.99483829225958431, + 5: 0.99794764200997332, + 6: 0.99907693085520299, + 7: 0.99954162624280074, + 8: 0.99975354666971372, + 9: 0.9998586600128780}) +} diff --git a/go/mathstats/ttest.go b/go/mathstats/ttest.go new file mode 100644 index 00000000000..218ad8c0807 --- /dev/null +++ b/go/mathstats/ttest.go @@ -0,0 +1,170 @@ +// Copyright 2015 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +package mathstats + +import ( + "errors" + "math" +) + +// A LocationHypothesis specifies the alternative hypothesis of a +// location test such as a t-test or a Mann-Whitney U-test. The +// default (zero) value is to test against the alternative hypothesis +// that they differ. +type LocationHypothesis int + +const ( + // LocationLess specifies the alternative hypothesis that the + // location of the first sample is less than the second. This + // is a one-tailed test. + LocationLess LocationHypothesis = -1 + + // LocationDiffers specifies the alternative hypothesis that + // the locations of the two samples are not equal. This is a + // two-tailed test. + LocationDiffers LocationHypothesis = 0 + + // LocationGreater specifies the alternative hypothesis that + // the location of the first sample is greater than the + // second. This is a one-tailed test. + LocationGreater LocationHypothesis = 1 +) + +// A TTestResult is the result of a t-test. +type TTestResult struct { + // N1 and N2 are the sizes of the input samples. For a + // one-sample t-test, N2 is 0. + N1, N2 int + + // T is the value of the t-statistic for this t-test. + T float64 + + // DoF is the degrees of freedom for this t-test. + DoF float64 + + // AltHypothesis specifies the alternative hypothesis tested + // by this test against the null hypothesis that there is no + // difference in the means of the samples. + AltHypothesis LocationHypothesis + + // P is p-value for this t-test for the given null hypothesis. + P float64 +} + +func newTTestResult(n1, n2 int, t, dof float64, alt LocationHypothesis) *TTestResult { + dist := TDist{dof} + var p float64 + switch alt { + case LocationDiffers: + p = 2 * (1 - dist.CDF(math.Abs(t))) + case LocationLess: + p = dist.CDF(t) + case LocationGreater: + p = 1 - dist.CDF(t) + } + return &TTestResult{N1: n1, N2: n2, T: t, DoF: dof, AltHypothesis: alt, P: p} +} + +// A TTestSample is a sample that can be used for a one or two sample +// t-test. +type TTestSample interface { + Weight() float64 + Mean() float64 + Variance() float64 +} + +var ( + ErrSampleSize = errors.New("sample is too small") + ErrZeroVariance = errors.New("sample has zero variance") + ErrMismatchedSamples = errors.New("samples have different lengths") +) + +// TwoSampleTTest performs a two-sample (unpaired) Student's t-test on +// samples x1 and x2. This is a test of the null hypothesis that x1 +// and x2 are drawn from populations with equal means. It assumes x1 +// and x2 are independent samples, that the distributions have equal +// variance, and that the populations are normally distributed. +func TwoSampleTTest(x1, x2 TTestSample, alt LocationHypothesis) (*TTestResult, error) { + n1, n2 := x1.Weight(), x2.Weight() + if n1 == 0 || n2 == 0 { + return nil, ErrSampleSize + } + v1, v2 := x1.Variance(), x2.Variance() + if v1 == 0 && v2 == 0 { + return nil, ErrZeroVariance + } + + dof := n1 + n2 - 2 + v12 := ((n1-1)*v1 + (n2-1)*v2) / dof + t := (x1.Mean() - x2.Mean()) / math.Sqrt(v12*(1/n1+1/n2)) + return newTTestResult(int(n1), int(n2), t, dof, alt), nil +} + +// TwoSampleWelchTTest performs a two-sample (unpaired) Welch's t-test +// on samples x1 and x2. This is like TwoSampleTTest, but does not +// assume the distributions have equal variance. +func TwoSampleWelchTTest(x1, x2 TTestSample, alt LocationHypothesis) (*TTestResult, error) { + n1, n2 := x1.Weight(), x2.Weight() + if n1 <= 1 || n2 <= 1 { + // TODO: Can we still do this with n == 1? + return nil, ErrSampleSize + } + v1, v2 := x1.Variance(), x2.Variance() + if v1 == 0 && v2 == 0 { + return nil, ErrZeroVariance + } + + dof := math.Pow(v1/n1+v2/n2, 2) / + (math.Pow(v1/n1, 2)/(n1-1) + math.Pow(v2/n2, 2)/(n2-1)) + s := math.Sqrt(v1/n1 + v2/n2) + t := (x1.Mean() - x2.Mean()) / s + return newTTestResult(int(n1), int(n2), t, dof, alt), nil +} + +// PairedTTest performs a two-sample paired t-test on samples x1 and +// x2. If μ0 is non-zero, this tests if the average of the difference +// is significantly different from μ0. If x1 and x2 are identical, +// this returns nil. +func PairedTTest(x1, x2 []float64, μ0 float64, alt LocationHypothesis) (*TTestResult, error) { + if len(x1) != len(x2) { + return nil, ErrMismatchedSamples + } + if len(x1) <= 1 { + // TODO: Can we still do this with n == 1? + return nil, ErrSampleSize + } + + dof := float64(len(x1) - 1) + + diff := make([]float64, len(x1)) + for i := range x1 { + diff[i] = x1[i] - x2[i] + } + sd := StdDev(diff) + if sd == 0 { + // TODO: Can we still do the test? + return nil, ErrZeroVariance + } + t := (Mean(diff) - μ0) * math.Sqrt(float64(len(x1))) / sd + return newTTestResult(len(x1), len(x2), t, dof, alt), nil +} + +// OneSampleTTest performs a one-sample t-test on sample x. This tests +// the null hypothesis that the population mean is equal to μ0. This +// assumes the distribution of the population of sample means is +// normal. +func OneSampleTTest(x TTestSample, μ0 float64, alt LocationHypothesis) (*TTestResult, error) { + n, v := x.Weight(), x.Variance() + if n == 0 { + return nil, ErrSampleSize + } + if v == 0 { + // TODO: Can we still do the test? + return nil, ErrZeroVariance + } + dof := n - 1 + t := (x.Mean() - μ0) * math.Sqrt(n) / math.Sqrt(v) + return newTTestResult(int(n), 0, t, dof, alt), nil +} diff --git a/go/mathstats/ttest_test.go b/go/mathstats/ttest_test.go new file mode 100644 index 00000000000..0c9b78fdb9f --- /dev/null +++ b/go/mathstats/ttest_test.go @@ -0,0 +1,71 @@ +// Copyright 2015 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +package mathstats + +import "testing" + +func TestTTest(t *testing.T) { + s1 := Sample{Xs: []float64{2, 1, 3, 4}} + s2 := Sample{Xs: []float64{6, 5, 7, 9}} + + check := func(want, got *TTestResult) { + if want.N1 != got.N1 || want.N2 != got.N2 || + !aeq(want.T, got.T) || !aeq(want.DoF, got.DoF) || + want.AltHypothesis != got.AltHypothesis || + !aeq(want.P, got.P) { + t.Errorf("want %+v, got %+v", want, got) + } + } + check3 := func(test func(alt LocationHypothesis) (*TTestResult, error), n1, n2 int, t, dof float64, pless, pdiff, pgreater float64) { + want := &TTestResult{N1: n1, N2: n2, T: t, DoF: dof} + + want.AltHypothesis = LocationLess + want.P = pless + got, _ := test(want.AltHypothesis) + check(want, got) + + want.AltHypothesis = LocationDiffers + want.P = pdiff + got, _ = test(want.AltHypothesis) + check(want, got) + + want.AltHypothesis = LocationGreater + want.P = pgreater + got, _ = test(want.AltHypothesis) + check(want, got) + } + + check3(func(alt LocationHypothesis) (*TTestResult, error) { + return TwoSampleTTest(s1, s1, alt) + }, 4, 4, 0, 6, + 0.5, 1, 0.5) + check3(func(alt LocationHypothesis) (*TTestResult, error) { + return TwoSampleWelchTTest(s1, s1, alt) + }, 4, 4, 0, 6, + 0.5, 1, 0.5) + + check3(func(alt LocationHypothesis) (*TTestResult, error) { + return TwoSampleTTest(s1, s2, alt) + }, 4, 4, -3.9703446152237674, 6, + 0.0036820296121056195, 0.0073640592242113214, 0.9963179703878944) + check3(func(alt LocationHypothesis) (*TTestResult, error) { + return TwoSampleWelchTTest(s1, s2, alt) + }, 4, 4, -3.9703446152237674, 5.584615384615385, + 0.004256431565689112, 0.0085128631313781695, 0.9957435684343109) + + check3(func(alt LocationHypothesis) (*TTestResult, error) { + return PairedTTest(s1.Xs, s2.Xs, 0, alt) + }, 4, 4, -17, 3, + 0.0002216717691559955, 0.00044334353831207749, 0.999778328230844) + + check3(func(alt LocationHypothesis) (*TTestResult, error) { + return OneSampleTTest(s1, 0, alt) + }, 4, 0, 3.872983346207417, 3, + 0.9847668541689145, 0.030466291662170977, 0.015233145831085482) + check3(func(alt LocationHypothesis) (*TTestResult, error) { + return OneSampleTTest(s1, 2.5, alt) + }, 4, 0, 0, 3, + 0.5, 1, 0.5) +} diff --git a/go/mathstats/util_test.go b/go/mathstats/util_test.go new file mode 100644 index 00000000000..68fac8488f4 --- /dev/null +++ b/go/mathstats/util_test.go @@ -0,0 +1,44 @@ +// Copyright 2015 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +package mathstats + +import ( + "fmt" + "math" + "sort" + "strings" + "testing" +) + +// aeq returns true if expect and got are equal to 8 significant +// figures (1 part in 100 million). +func aeq(expect, got float64) bool { + if expect < 0 && got < 0 { + expect, got = -expect, -got + } + return expect*0.99999999 <= got && got*0.99999999 <= expect +} + +func testFunc(t *testing.T, name string, f func(float64) float64, vals map[float64]float64) { + xs := make([]float64, 0, len(vals)) + for x := range vals { + xs = append(xs, x) + } + sort.Float64s(xs) + + for _, x := range xs { + want, got := vals[x], f(x) + if math.IsNaN(want) && math.IsNaN(got) || aeq(want, got) { + continue + } + var label string + if strings.Contains(name, "%v") { + label = fmt.Sprintf(name, x) + } else { + label = fmt.Sprintf("%s(%v)", name, x) + } + t.Errorf("want %s=%v, got %v", label, want, got) + } +} diff --git a/go/mysql/binlog_event_json.go b/go/mysql/binlog_event_json.go index a55762ea8f8..efc5648f326 100644 --- a/go/mysql/binlog_event_json.go +++ b/go/mysql/binlog_event_json.go @@ -21,6 +21,8 @@ import ( "fmt" "math" + "vitess.io/vitess/go/vt/log" + "vitess.io/vitess/go/vt/vtgate/evalengine" "github.com/spyzhov/ajson" @@ -28,6 +30,22 @@ import ( querypb "vitess.io/vitess/go/vt/proto/query" ) +/* + +References: + +* C source of mysql json data type implementation +https://fossies.org/linux/mysql/sql/json_binary.cc + +* nice description of MySQL's json representation +https://lafengnan.gitbooks.io/blog/content/mysql/chapter2.html + +* java/python connector links: useful for test cases and reverse engineering +https://github.com/shyiko/mysql-binlog-connector-java/pull/119/files +https://github.com/noplay/python-mysql-replication/blob/175df28cc8b536a68522ff9b09dc5440adad6094/pymysqlreplication/packet.py + +*/ + //region debug-only //TODO remove once the json refactor is tested live var jsonDebug = false @@ -36,7 +54,7 @@ func jlog(tpl string, vals ...interface{}) { if !jsonDebug { return } - fmt.Printf(tpl+"\n", vals...) + log.Infof("JSON:"+tpl+"\n", vals...) _ = printASCIIBytes } @@ -44,15 +62,41 @@ func printASCIIBytes(data []byte) { if !jsonDebug { return } - fmt.Printf("\n\n%v\n[", data) + s := "" for _, c := range data { if c < 127 && c > 32 { - fmt.Printf("%c ", c) + s += fmt.Sprintf("%c ", c) } else { - fmt.Printf("%02d ", c) + s += fmt.Sprintf("%02d ", c) } } - fmt.Printf("]\n") + log.Infof("[%s]", s) +} + +// only used for logging/debugging +var jsonTypeToName = map[uint]string{ + jsonSmallObject: "sObject", + jsonLargeObject: "lObject", + jsonSmallArray: "sArray", + jsonLargeArray: "lArray", + jsonLiteral: "literal", + jsonInt16: "int16", + jsonUint16: "uint16", + jsonInt32: "int32", + jsonUint32: "uint32", + jsonInt64: "int64", + jsonUint64: "uint64", + jsonDouble: "double", //0x0b + jsonString: "string", //0x0c a utf8mb4 string + jsonOpaque: "opaque", //0x0f "custom" data +} + +func jsonDataTypeToString(typ uint) string { + sType, ok := jsonTypeToName[typ] + if !ok { + return "undefined" + } + return sType } //endregion @@ -64,7 +108,7 @@ func getJSONValue(data []byte) (string, error) { if len(data) == 0 { ast = ajson.NullNode("") } else { - ast, _, err = binlogJSON.parse(data) + ast, err = binlogJSON.parse(data) if err != nil { return "", err } @@ -86,27 +130,32 @@ func init() { //region plugin manager -// BinlogJSON contains the plugins for all json types and methods for parsing the binary json representation from the binlog +// BinlogJSON contains the plugins for all json types and methods for parsing the +// binary json representation of a specific type from the binlog type BinlogJSON struct { plugins map[jsonDataType]jsonPlugin } -func (jh *BinlogJSON) parse(data []byte) (node *ajson.Node, newPos int, err error) { - var pos int - typ := data[0] +// parse decodes a value from the binlog +func (jh *BinlogJSON) parse(data []byte) (node *ajson.Node, err error) { + // pos keeps track of the offset of the current node being parsed + pos := 0 + typ := data[pos] jlog("Top level object is type %s\n", jsonDataTypeToString(uint(typ))) pos++ return jh.getNode(jsonDataType(typ), data, pos) } +// each plugin registers itself in init()s func (jh *BinlogJSON) register(typ jsonDataType, Plugin jsonPlugin) { jh.plugins[typ] = Plugin } -func (jh *BinlogJSON) getNode(typ jsonDataType, data []byte, pos int) (node *ajson.Node, newPos int, err error) { +// gets the node at this position +func (jh *BinlogJSON) getNode(typ jsonDataType, data []byte, pos int) (node *ajson.Node, err error) { Plugin := jh.plugins[typ] if Plugin == nil { - return nil, 0, fmt.Errorf("Plugin not found for type %d", typ) + return nil, fmt.Errorf("Plugin not found for type %d", typ) } return Plugin.getNode(typ, data, pos) } @@ -115,9 +164,9 @@ func (jh *BinlogJSON) getNode(typ jsonDataType, data []byte, pos int) (node *ajs //region enums -// jsonDataType has the values used in the mysql json binary representation to denote types -// we have string, literal(true/false/null), number, object or array types -// large object => doc size > 64K, you get pointers instead of inline values +// jsonDataType has the values used in the mysql json binary representation to denote types. +// We have string, literal(true/false/null), number, object or array types. +// large object => doc size > 64K: you get pointers instead of inline values. type jsonDataType byte // type mapping as defined by the mysql json representation @@ -138,79 +187,43 @@ const ( jsonOpaque = 15 //0x0f "custom" data ) -func jsonDataTypeToString(typ uint) string { - switch typ { - case jsonSmallObject: - return "sObject" - case jsonLargeObject: - return "lObject" - case jsonSmallArray: - return "sArray" - case jsonLargeArray: - return "lArray" - case jsonLiteral: - return "literal" - case jsonInt16: - return "int16" - case jsonUint16: - return "uint16" - case jsonInt32: - return "int32" - case jsonUint32: - return "uint32" - case jsonInt64: - return "int64" - case jsonUint64: - return "uint64" - case jsonDouble: - return "double" - case jsonString: - return "string" - case jsonOpaque: - return "opaque" - default: - return "undefined" - } -} - // literals in the binary json format can be one of three types: null, true, false type jsonDataLiteral byte -// this is how mysql maps the three literals (null, true and false) in the binlog +// this is how mysql maps the three literals in the binlog const ( jsonNullLiteral = '\x00' jsonTrueLiteral = '\x01' jsonFalseLiteral = '\x02' ) -// in objects and arrays some values are inlined, others have offsets into the raw data -var inlineTypes = map[jsonDataType]bool{ - jsonSmallObject: false, - jsonLargeObject: false, - jsonSmallArray: false, - jsonLargeArray: false, - jsonLiteral: true, - jsonInt16: true, - jsonUint16: true, - jsonInt32: false, - jsonUint32: false, - jsonInt64: false, - jsonUint64: false, - jsonDouble: false, - jsonString: false, - jsonOpaque: false, -} - //endregion //region util funcs -// readInt returns either 32-bit or a 16-bit int from the passed buffer. Which one it is, depends on whether the document is "large" or not +// in objects and arrays some values are inlined, other types have offsets into the raw data. +// literals (true/false/null) and 16bit integers are always inlined. +// for large documents 32bit integers are also inlined. +// principle is that two byte values are inlined in "small", and four byte in "large" docs +func isInline(typ jsonDataType, large bool) bool { + switch typ { + case jsonLiteral, jsonInt16, jsonUint16: + return true + case jsonInt32, jsonUint32: + if large { + return true + } + } + return false +} + +// readInt returns either a 32-bit or a 16-bit int from the passed buffer. Which one it is, +// depends on whether the document is "large" or not. // JSON documents stored are considered "large" if the size of the stored json document is -// more than 64K bytes. For a large document all types which have their inlineTypes entry as true -// are inlined. Others only store the offset in the document -// This int is either an offset into the raw data, count of elements or size of the represented data structure -// (This design decision allows a fixed number of bytes to be used for representing object keys and array entries) +// more than 64K bytes. Values of non-inlined types are stored as offsets into the document. +// The int returned is either an (i) offset into the raw data, (ii) count of elements, or (iii) size of the represented data structure. +// (This design decision allows a fixed number of bytes to be used for representing object keys and array indices.) +// readInt also returns the new position (by advancing the position by the number of bytes read). func readInt(data []byte, pos int, large bool) (int, int) { if large { return int(data[pos]) + @@ -227,29 +240,64 @@ func readInt(data []byte, pos int, large bool) (int, int) { // of an arbitrarily long string as implemented by the mysql server // https://github.com/mysql/mysql-server/blob/5.7/sql/json_binary.cc#L234 // https://github.com/mysql/mysql-server/blob/8.0/sql/json_binary.cc#L283 +// readVariableLength also returns the new position (by advancing the position by the number of bytes read). func readVariableLength(data []byte, pos int) (int, int) { var bb byte - var res int + var length int var idx byte for { bb = data[pos] pos++ - res |= int(bb&0x7f) << (7 * idx) - // if the high bit is 1, the integer value of the byte will be negative - // high bit of 1 signifies that the next byte is part of the length encoding + length |= int(bb&0x7f) << (7 * idx) + // if the high bit is 1, the integer value of the byte will be negative. + // high bit of 1 signifies that the next byte is part of the length encoding. if int8(bb) >= 0 { break } idx++ } - return res, pos + return length, pos +} + +// getElem returns the json value found inside json objects and arrays at the provided position +func getElem(data []byte, pos int, large bool) (*ajson.Node, int, error) { + var elem *ajson.Node + var err error + var offset int + typ := jsonDataType(data[pos]) + pos++ + if isInline(typ, large) { + elem, err = binlogJSON.getNode(typ, data, pos) + if err != nil { + return nil, 0, err + } + if large { + pos += 4 + } else { + pos += 2 + } + } else { + offset, pos = readInt(data, pos, large) + if offset >= len(data) { // consistency check, should only come here is there is a bug in the code + log.Errorf("unable to decode element") + return nil, 0, fmt.Errorf("unable to decode element: %+v", data) + } + newData := data[offset:] + //newPos ignored because this is an offset into the "extra" section of the buffer + elem, err = binlogJSON.getNode(typ, newData, 1) + if err != nil { + return nil, 0, err + } + } + return elem, pos, nil } //endregion // json sub-type interface +// one plugin for each sub-type, plugins are stateless and initialized on load via individual init() functions type jsonPlugin interface { - getNode(typ jsonDataType, data []byte, pos int) (node *ajson.Node, newPos int, err error) + getNode(typ jsonDataType, data []byte, pos int) (node *ajson.Node, err error) } type jsonPluginInfo struct { @@ -270,14 +318,13 @@ type intPlugin struct { var _ jsonPlugin = (*intPlugin)(nil) -func (ih intPlugin) getVal(typ jsonDataType, data []byte, pos int) (value float64, newPos int) { +func (ih intPlugin) getVal(typ jsonDataType, data []byte, pos int) (value float64) { var val uint64 var val2 float64 size := ih.sizes[typ] for i := 0; i < size; i++ { val = val + uint64(data[pos+i])<<(8*i) } - pos += size switch typ { case jsonInt16: val2 = float64(int16(val)) @@ -294,13 +341,13 @@ func (ih intPlugin) getVal(typ jsonDataType, data []byte, pos int) (value float6 case jsonDouble: val2 = math.Float64frombits(val) } - return val2, pos + return val2 } -func (ih intPlugin) getNode(typ jsonDataType, data []byte, pos int) (node *ajson.Node, newPos int, err error) { - val, pos := ih.getVal(typ, data, pos) +func (ih intPlugin) getNode(typ jsonDataType, data []byte, pos int) (node *ajson.Node, err error) { + val := ih.getVal(typ, data, pos) node = ajson.NumericNode("", val) - return node, pos, nil + return node, nil } func newIntPlugin() *intPlugin { @@ -340,9 +387,8 @@ type literalPlugin struct { var _ jsonPlugin = (*literalPlugin)(nil) -func (lh literalPlugin) getNode(typ jsonDataType, data []byte, pos int) (node *ajson.Node, newPos int, err error) { +func (lh literalPlugin) getNode(typ jsonDataType, data []byte, pos int) (node *ajson.Node, err error) { val := jsonDataLiteral(data[pos]) - pos += 2 switch val { case jsonNullLiteral: node = ajson.NullNode("") @@ -351,9 +397,9 @@ func (lh literalPlugin) getNode(typ jsonDataType, data []byte, pos int) (node *a case jsonFalseLiteral: node = ajson.BoolNode("", false) default: - return nil, 0, fmt.Errorf("unknown literal value %v", val) + return nil, fmt.Errorf("unknown literal value %v", val) } - return node, pos, nil + return node, nil } func newLiteralPlugin() *literalPlugin { @@ -381,13 +427,15 @@ type opaquePlugin struct { var _ jsonPlugin = (*opaquePlugin)(nil) -func (oh opaquePlugin) getNode(typ jsonDataType, data []byte, pos int) (node *ajson.Node, newPos int, err error) { +// other types are stored as catch-all opaque types: documentation on these is scarce. +// we currently know about (and support) date/time/datetime/decimal. +func (oh opaquePlugin) getNode(typ jsonDataType, data []byte, pos int) (node *ajson.Node, err error) { dataType := data[pos] - pos++ - _, pos = readVariableLength(data, pos) + start := 3 // account for length of stored value + end := start + 8 // all currently supported opaque data types are 8 bytes in size switch dataType { case TypeDate: - raw := binary.LittleEndian.Uint64(data[3:11]) + raw := binary.LittleEndian.Uint64(data[start:end]) value := raw >> 24 yearMonth := (value >> 22) & 0x01ffff // 17 bits starting at 22nd year := yearMonth / 13 @@ -396,7 +444,7 @@ func (oh opaquePlugin) getNode(typ jsonDataType, data []byte, pos int) (node *aj dateString := fmt.Sprintf("%04d-%02d-%02d", year, month, day) node = ajson.StringNode("", dateString) case TypeTime: - raw := binary.LittleEndian.Uint64(data[3:11]) + raw := binary.LittleEndian.Uint64(data[start:end]) value := raw >> 24 hour := (value >> 12) & 0x03ff // 10 bits starting at 12th minute := (value >> 6) & 0x3f // 6 bits starting at 6th @@ -405,7 +453,7 @@ func (oh opaquePlugin) getNode(typ jsonDataType, data []byte, pos int) (node *aj timeString := fmt.Sprintf("%02d:%02d:%02d.%06d", hour, minute, second, microSeconds) node = ajson.StringNode("", timeString) case TypeDateTime: - raw := binary.LittleEndian.Uint64(data[3:11]) + raw := binary.LittleEndian.Uint64(data[start:end]) value := raw >> 24 yearMonth := (value >> 22) & 0x01ffff // 17 bits starting at 22nd year := yearMonth / 13 @@ -418,24 +466,23 @@ func (oh opaquePlugin) getNode(typ jsonDataType, data []byte, pos int) (node *aj timeString := fmt.Sprintf("%04d-%02d-%02d %02d:%02d:%02d.%06d", year, month, day, hour, minute, second, microSeconds) node = ajson.StringNode("", timeString) case TypeNewDecimal: - decimalData := data[3:11] + decimalData := data[start:end] precision := decimalData[0] scale := decimalData[1] metadata := (uint16(precision) << 8) + uint16(scale) val, _, err := CellValue(decimalData, 2, TypeNewDecimal, metadata, querypb.Type_DECIMAL) if err != nil { - return nil, 0, err + return nil, err } float, err := evalengine.ToFloat64(val) if err != nil { - return nil, 0, err + return nil, err } node = ajson.NumericNode("", float) default: - return nil, 0, fmt.Errorf("opaque type %d is not supported yet, data %v", dataType, data[2:]) + return nil, fmt.Errorf("opaque type %d is not supported yet, data %v", dataType, data[2:]) } - pos += 8 - return node, pos, nil + return node, nil } func newOpaquePlugin() *opaquePlugin { @@ -463,11 +510,11 @@ type stringPlugin struct { var _ jsonPlugin = (*stringPlugin)(nil) -func (sh stringPlugin) getNode(typ jsonDataType, data []byte, pos int) (node *ajson.Node, newPos int, err error) { +func (sh stringPlugin) getNode(typ jsonDataType, data []byte, pos int) (node *ajson.Node, err error) { size, pos := readVariableLength(data, pos) node = ajson.StringNode("", string(data[pos:pos+size])) - return node, pos, nil + return node, nil } func newStringPlugin() *stringPlugin { @@ -495,37 +542,28 @@ type arrayPlugin struct { var _ jsonPlugin = (*arrayPlugin)(nil) -func (ah arrayPlugin) getNode(typ jsonDataType, data []byte, pos int) (node *ajson.Node, newPos int, err error) { - //printAsciiBytes(data) +// arrays are stored thus: +// | type_identifier(one of [2,3]) | elem count | obj size | list of offsets+lengths of values | actual values | +func (ah arrayPlugin) getNode(typ jsonDataType, data []byte, pos int) (node *ajson.Node, err error) { + jlog("JSON Array %s, len %d", jsonDataTypeToString(uint(typ)), len(data)) var nodes []*ajson.Node var elem *ajson.Node - var elementCount, offset, size int + var elementCount, size int large := typ == jsonLargeArray elementCount, pos = readInt(data, pos, large) jlog("Array(%t): elem count: %d\n", large, elementCount) size, pos = readInt(data, pos, large) jlog("Array(%t): elem count: %d, size:%d\n", large, elementCount, size) for i := 0; i < elementCount; i++ { - typ = jsonDataType(data[pos]) - pos++ - if inlineTypes[typ] { - elem, pos, err = binlogJSON.getNode(typ, data, pos) - if err != nil { - return nil, 0, err - } - } else { - offset, pos = readInt(data, pos, large) - newData := data[offset:] - elem, _, err = binlogJSON.getNode(typ, newData, 1) //newPos ignored because this is an offset into the "extra" section of the buffer - if err != nil { - return nil, 0, err - } + elem, pos, err = getElem(data, pos, large) + if err != nil { + return nil, err } nodes = append(nodes, elem) - jlog("Index is %s:%s", i, jsonDataTypeToString(uint(typ))) + jlog("Index is %d:%s", i, jsonDataTypeToString(uint(typ))) } node = ajson.ArrayNode("", nodes) - return node, pos, nil + return node, nil } func newArrayPlugin() *arrayPlugin { @@ -554,47 +592,53 @@ type objectPlugin struct { var _ jsonPlugin = (*objectPlugin)(nil) -func (oh objectPlugin) getNode(typ jsonDataType, data []byte, pos int) (node *ajson.Node, newPos int, err error) { - jlog("JSON Type is %s", jsonDataTypeToString(uint(typ))) - //printAsciiBytes(data) - nodes := make(map[string]*ajson.Node) - var elem *ajson.Node - var elementCount, offset, size int +// objects are stored thus: +// | type_identifier(0/1) | elem count | obj size | list of offsets+lengths of keys | list of offsets+lengths of values | actual keys | actual values | +func (oh objectPlugin) getNode(typ jsonDataType, data []byte, pos int) (node *ajson.Node, err error) { + jlog("JSON Type is %s, len %d", jsonDataTypeToString(uint(typ)), len(data)) + + // "large" decides number of bytes used to specify element count and total object size: 4 bytes for large, 2 for small var large = typ == jsonLargeObject + + var elementCount int // total number of elements (== keys) in this object map. (element can be another object: recursively handled) + var size int // total size of object + elementCount, pos = readInt(data, pos, large) - jlog("Object: elem count: %d\n", elementCount) size, pos = readInt(data, pos, large) jlog("Object: elem count: %d, size %d\n", elementCount, size) - keys := make([]string, elementCount) + + keys := make([]string, elementCount) // stores all the keys in this object for i := 0; i < elementCount; i++ { - var keyOffset, keyLength int + var keyOffset int + var keyLength int keyOffset, pos = readInt(data, pos, large) keyLength, pos = readInt(data, pos, false) // keyLength is always a 16-bit int - keys[i] = string(data[keyOffset+1 : keyOffset+keyLength+1]) + + keyOffsetStart := keyOffset + 1 + // check that offsets are not out of bounds (can happen only if there is a bug in the parsing code) + if keyOffsetStart >= len(data) || keyOffsetStart+keyLength > len(data) { + log.Errorf("unable to decode object elements") + return nil, fmt.Errorf("unable to decode object elements: %v", data) + } + keys[i] = string(data[keyOffsetStart : keyOffsetStart+keyLength]) } + jlog("Object keys: %+v", keys) + + object := make(map[string]*ajson.Node) + var elem *ajson.Node + // get the value for each key for i := 0; i < elementCount; i++ { - typ = jsonDataType(data[pos]) - pos++ - if inlineTypes[typ] { - elem, pos, err = binlogJSON.getNode(typ, data, pos) - if err != nil { - return nil, 0, err - } - } else { - offset, pos = readInt(data, pos, large) - newData := data[offset:] - elem, _, err = binlogJSON.getNode(typ, newData, 1) //newPos ignored because this is an offset into the "extra" section of the buffer - if err != nil { - return nil, 0, err - } + elem, pos, err = getElem(data, pos, large) + if err != nil { + return nil, err } - nodes[keys[i]] = elem + object[keys[i]] = elem jlog("Key is %s:%s", keys[i], jsonDataTypeToString(uint(typ))) } - node = ajson.ObjectNode("", nodes) - return node, pos, nil + node = ajson.ObjectNode("", object) + return node, nil } func newObjectPlugin() *objectPlugin { @@ -610,19 +654,3 @@ func newObjectPlugin() *objectPlugin { } //endregion - -/* - -References: - -* C source of mysql json data type implementation -https://fossies.org/linux/mysql/sql/json_binary.cc - -* nice description of MySQL's json representation -https://lafengnan.gitbooks.io/blog/content/mysql/chapter2.html - -* java/python connector links: useful for test cases and reverse engineering -https://github.com/shyiko/mysql-binlog-connector-java/pull/119/files -https://github.com/noplay/python-mysql-replication/blob/175df28cc8b536a68522ff9b09dc5440adad6094/pymysqlreplication/packet.py - -*/ diff --git a/go/mysql/binlog_event_test.go b/go/mysql/binlog_event_test.go index 3a35b958f9c..af276396930 100644 --- a/go/mysql/binlog_event_test.go +++ b/go/mysql/binlog_event_test.go @@ -17,6 +17,7 @@ limitations under the License. package mysql import ( + "strings" "testing" binlogdatapb "vitess.io/vitess/go/vt/proto/binlogdata" @@ -32,8 +33,9 @@ func TestQueryString(t *testing.T) { }, SQL: "sql", } - want := `{Database: "test_database", Charset: client:12 conn:34 server:56 , SQL: "sql"}` - if got := input.String(); got != want { + want := `{Database: "test_database", Charset:` + got := input.String() + if !strings.HasPrefix(got, want) { t.Errorf("%#v.String() = %#v, want %#v", input, got, want) } } diff --git a/go/mysql/conn.go b/go/mysql/conn.go index c2543ef9e01..edb1201afe3 100644 --- a/go/mysql/conn.go +++ b/go/mysql/conn.go @@ -129,6 +129,7 @@ type Conn struct { bufferedReader *bufio.Reader flushTimer *time.Timer + header [packetHeaderSize]byte // Keep track of how and of the buffer we allocated for an // ephemeral packet on the read and write sides. @@ -324,14 +325,13 @@ func (c *Conn) getReader() io.Reader { } func (c *Conn) readHeaderFrom(r io.Reader) (int, error) { - var header [packetHeaderSize]byte // Note io.ReadFull will return two different types of errors: // 1. if the socket is already closed, and the go runtime knows it, // then ReadFull will return an error (different than EOF), // something like 'read: connection reset by peer'. // 2. if the socket is not closed while we start the read, // but gets closed after the read is started, we'll get io.EOF. - if _, err := io.ReadFull(r, header[:]); err != nil { + if _, err := io.ReadFull(r, c.header[:]); err != nil { // The special casing of propagating io.EOF up // is used by the server side only, to suppress an error // message if a client just disconnects. @@ -344,14 +344,14 @@ func (c *Conn) readHeaderFrom(r io.Reader) (int, error) { return 0, vterrors.Wrapf(err, "io.ReadFull(header size) failed") } - sequence := uint8(header[3]) + sequence := uint8(c.header[3]) if sequence != c.sequence { return 0, vterrors.Errorf(vtrpc.Code_INTERNAL, "invalid sequence, expected %v got %v", c.sequence, sequence) } c.sequence++ - return int(uint32(header[0]) | uint32(header[1])<<8 | uint32(header[2])<<16), nil + return int(uint32(c.header[0]) | uint32(c.header[1])<<8 | uint32(c.header[2])<<16), nil } // readEphemeralPacket attempts to read a packet into buffer from sync.Pool. Do @@ -860,6 +860,9 @@ func (c *Conn) handleNextCommand(handler Handler) bool { } return false } + if len(data) == 0 { + return false + } switch data[0] { case ComQuit: @@ -897,7 +900,7 @@ func (c *Conn) handleNextCommand(handler Handler) bool { default: log.Errorf("Got unhandled packet (default) from %s, returning error: %v", c, data) c.recycleReadPacket() - if !c.writeErrorAndLog(ERUnknownComError, SSUnknownComError, "command handling not implemented yet: %v", data[0]) { + if !c.writeErrorAndLog(ERUnknownComError, SSNetError, "command handling not implemented yet: %v", data[0]) { return false } } @@ -922,7 +925,7 @@ func (c *Conn) handleComStmtReset(data []byte) bool { c.recycleReadPacket() if !ok { log.Error("Got unhandled packet from client %v, returning error: %v", c.ConnectionID, data) - if !c.writeErrorAndLog(ERUnknownComError, SSUnknownComError, "error handling packet: %v", data) { + if !c.writeErrorAndLog(ERUnknownComError, SSNetError, "error handling packet: %v", data) { return false } } @@ -930,7 +933,7 @@ func (c *Conn) handleComStmtReset(data []byte) bool { prepare, ok := c.PrepareData[stmtID] if !ok { log.Error("Commands were executed in an improper order from client %v, packet: %v", c.ConnectionID, data) - if !c.writeErrorAndLog(CRCommandsOutOfSync, SSUnknownComError, "commands were executed in an improper order: %v", data) { + if !c.writeErrorAndLog(CRCommandsOutOfSync, SSNetError, "commands were executed in an improper order: %v", data) { return false } } @@ -1115,7 +1118,7 @@ func (c *Conn) handleComPrepare(handler Handler, data []byte) (kontinue bool) { _ = sqlparser.Walk(func(node sqlparser.SQLNode) (bool, error) { switch node := node.(type) { case sqlparser.Argument: - if strings.HasPrefix(string(node), ":v") { + if strings.HasPrefix(string(node), "v") { paramsCount++ } } @@ -1160,7 +1163,7 @@ func (c *Conn) handleComSetOption(data []byte) bool { c.Capabilities &^= CapabilityClientMultiStatements default: log.Errorf("Got unhandled packet (ComSetOption default) from client %v, returning error: %v", c.ConnectionID, data) - if !c.writeErrorAndLog(ERUnknownComError, SSUnknownComError, "error handling packet: %v", data) { + if !c.writeErrorAndLog(ERUnknownComError, SSNetError, "error handling packet: %v", data) { return false } } @@ -1170,7 +1173,7 @@ func (c *Conn) handleComSetOption(data []byte) bool { } } else { log.Errorf("Got unhandled packet (ComSetOption else) from client %v, returning error: %v", c.ConnectionID, data) - if !c.writeErrorAndLog(ERUnknownComError, SSUnknownComError, "error handling packet: %v", data) { + if !c.writeErrorAndLog(ERUnknownComError, SSNetError, "error handling packet: %v", data) { return false } } @@ -1487,3 +1490,8 @@ func (c *Conn) GetTLSClientCerts() []*x509.Certificate { } return nil } + +// GetRawConn returns the raw net.Conn for nefarious purposes. +func (c *Conn) GetRawConn() net.Conn { + return c.conn +} diff --git a/go/mysql/conn_test.go b/go/mysql/conn_test.go index 668c86c1fea..b24a0ca4280 100644 --- a/go/mysql/conn_test.go +++ b/go/mysql/conn_test.go @@ -18,7 +18,9 @@ package mysql import ( "bytes" + "context" crypto_rand "crypto/rand" + "encoding/binary" "encoding/hex" "fmt" "math/rand" @@ -177,6 +179,17 @@ func verifyPacketComms(t *testing.T, cConn, sConn *Conn, data []byte) { } } +func TestRawConnection(t *testing.T) { + listener, sConn, cConn := createSocketPair(t) + defer func() { + listener.Close() + sConn.Close() + cConn.Close() + }() + assert.IsType(t, &net.TCPConn{}, sConn.GetRawConn()) + assert.IsType(t, &net.TCPConn{}, cConn.GetRawConn()) +} + func TestPackets(t *testing.T) { listener, sConn, cConn := createSocketPair(t) defer func() { @@ -630,49 +643,6 @@ func TestConnectionErrorWhileWritingComStmtExecute(t *testing.T) { require.False(t, res, "we should beak the connection in case of error writing error packet") } -type testRun struct { - t *testing.T - err error -} - -func (t testRun) NewConnection(c *Conn) { - panic("implement me") -} - -func (t testRun) ConnectionClosed(c *Conn) { - panic("implement me") -} - -func (t testRun) ComQuery(c *Conn, query string, callback func(*sqltypes.Result) error) error { - if strings.Contains(query, "error") { - return t.err - } - if strings.Contains(query, "panic") { - panic("test panic attack!") - } - if strings.Contains(query, "twice") { - callback(selectRowsResult) - } - callback(selectRowsResult) - return nil -} - -func (t testRun) ComPrepare(c *Conn, query string, bindVars map[string]*querypb.BindVariable) ([]*querypb.Field, error) { - panic("implement me") -} - -func (t testRun) ComStmtExecute(c *Conn, prepare *PrepareData, callback func(*sqltypes.Result) error) error { - panic("implement me") -} - -func (t testRun) WarningCount(c *Conn) uint16 { - return 0 -} - -func (t testRun) ComResetConnection(c *Conn) { - panic("implement me") -} - var _ Handler = (*testRun)(nil) type testConn struct { @@ -735,3 +705,151 @@ func (m mockAddress) String() string { } var _ net.Addr = (*mockAddress)(nil) + +var letters = []rune("abcdefghijklmnopqrstuvwxyzABCDEFGHIJKLMNOPQRSTUVWXYZ") + +func randSeq(n int) string { + b := make([]rune, n) + for i := range b { + b[i] = letters[rand.Intn(len(letters))] + } + return string(b) +} + +func TestPrepareAndExecute(t *testing.T) { + // this test starts a lot of clients that all send prepared statement parameter values + // and check that the handler received the correct input + ctx, cancel := context.WithTimeout(context.Background(), 3*time.Second) + defer cancel() + for i := 0; i < 1000; i++ { + startGoRoutine(ctx, t, randSeq(i)) + } + + for { + select { + case <-ctx.Done(): + return + default: + if t.Failed() { + return + } + } + } +} + +func startGoRoutine(ctx context.Context, t *testing.T, s string) { + go func(longData string) { + listener, sConn, cConn := createSocketPair(t) + defer func() { + listener.Close() + sConn.Close() + cConn.Close() + }() + + sql := "SELECT * FROM test WHERE id = ?" + mockData := preparePacket(t, sql) + + err := cConn.writePacket(mockData) + require.NoError(t, err) + + handler := &testRun{ + t: t, + expParamCounts: 1, + expQuery: sql, + expStmtID: 1, + } + + ok := sConn.handleNextCommand(handler) + require.True(t, ok, "oh noes") + + resp, err := cConn.ReadPacket() + require.NoError(t, err) + require.EqualValues(t, 0, resp[0]) + + for count := 0; ; count++ { + select { + case <-ctx.Done(): + return + default: + } + cConn.sequence = 0 + longDataPacket := createSendLongDataPacket(sConn.StatementID, 0, []byte(longData)) + err = cConn.writePacket(longDataPacket) + assert.NoError(t, err) + + assert.True(t, sConn.handleNextCommand(handler)) + data := sConn.PrepareData[sConn.StatementID] + assert.NotNil(t, data) + variable := data.BindVars["v1"] + assert.NotNil(t, variable, fmt.Sprintf("%#v", data.BindVars)) + assert.Equalf(t, []byte(longData), variable.Value[len(longData)*count:], "failed at: %d", count) + } + }(s) +} + +func createSendLongDataPacket(stmtID uint32, paramID uint16, data []byte) []byte { + stmtIDBinary := make([]byte, 4) + binary.LittleEndian.PutUint32(stmtIDBinary, stmtID) + + paramIDBinary := make([]byte, 2) + binary.LittleEndian.PutUint16(paramIDBinary, paramID) + + packet := []byte{0, 0, 0, 0, ComStmtSendLongData} + packet = append(packet, stmtIDBinary...) // append stmt ID + packet = append(packet, paramIDBinary...) // append param ID + packet = append(packet, data...) // append data + return packet +} + +type testRun struct { + t *testing.T + err error + expParamCounts int + expQuery string + expStmtID int +} + +func (t testRun) ComStmtExecute(c *Conn, prepare *PrepareData, callback func(*sqltypes.Result) error) error { + panic("implement me") +} + +func (t testRun) NewConnection(c *Conn) { + panic("implement me") +} + +func (t testRun) ConnectionClosed(c *Conn) { + panic("implement me") +} + +func (t testRun) ComQuery(c *Conn, query string, callback func(*sqltypes.Result) error) error { + if strings.Contains(query, "error") { + return t.err + } + if strings.Contains(query, "panic") { + panic("test panic attack!") + } + if strings.Contains(query, "twice") { + callback(selectRowsResult) + } + callback(selectRowsResult) + return nil +} + +func (t testRun) ComPrepare(c *Conn, query string, bv map[string]*querypb.BindVariable) ([]*querypb.Field, error) { + assert.Equal(t.t, t.expQuery, query) + assert.EqualValues(t.t, t.expStmtID, c.StatementID) + assert.NotNil(t.t, c.PrepareData[c.StatementID]) + assert.EqualValues(t.t, t.expParamCounts, c.PrepareData[c.StatementID].ParamsCount) + assert.Len(t.t, c.PrepareData, int(c.PrepareData[c.StatementID].ParamsCount)) + return nil, nil +} + +func (t testRun) WarningCount(c *Conn) uint16 { + return 0 +} + +func (t testRun) ComResetConnection(c *Conn) { + panic("implement me") +} + +var _ Handler = (*testRun)(nil) diff --git a/go/mysql/constants.go b/go/mysql/constants.go index 965714e08ca..726ec00b76e 100644 --- a/go/mysql/constants.go +++ b/go/mysql/constants.go @@ -16,7 +16,13 @@ limitations under the License. package mysql -import "strings" +import ( + "strings" + + "golang.org/x/text/encoding" + "golang.org/x/text/encoding/charmap" + "golang.org/x/text/encoding/simplifiedchinese" +) const ( // MaxPacketSize is the maximum payload length of a packet @@ -516,6 +522,9 @@ const ( ERDataTooLong = 1406 ERForbidSchemaChange = 1450 ERDataOutOfRange = 1690 + + // server not available + ERServerIsntAvailable = 3168 ) // Sql states for errors. @@ -527,9 +536,6 @@ const ( // in client.c. So using that one. SSUnknownSQLState = "HY000" - // SSUnknownComError is ER_UNKNOWN_COM_ERROR - SSUnknownComError = "08S01" - // SSNetError is network related error SSNetError = "08S01" @@ -626,6 +632,41 @@ var CharacterSetMap = map[string]uint8{ "eucjpms": 97, } +// CharacterSetEncoding maps a charset name to a golang encoder. +// golang does not support encoders for all MySQL charsets. +// A charset not in this map is unsupported. +// A trivial encoding (e.g. utf8) has a `nil` encoder +var CharacterSetEncoding = map[string]encoding.Encoding{ + "cp850": charmap.CodePage850, + "koi8r": charmap.KOI8R, + "latin1": charmap.Windows1252, + "latin2": charmap.ISO8859_2, + "ascii": nil, + "hebrew": charmap.ISO8859_8, + "greek": charmap.ISO8859_7, + "cp1250": charmap.Windows1250, + "gbk": simplifiedchinese.GBK, + "latin5": charmap.ISO8859_9, + "utf8": nil, + "cp866": charmap.CodePage866, + "cp852": charmap.CodePage852, + "latin7": charmap.ISO8859_13, + "utf8mb4": nil, + "cp1251": charmap.Windows1251, + "cp1256": charmap.Windows1256, + "cp1257": charmap.Windows1257, + "binary": nil, +} + +// ReverseCharacterSetMap maps a charset integer code to charset name +var ReverseCharacterSetMap = map[uint8]string{} + +func init() { + for c, i := range CharacterSetMap { + ReverseCharacterSetMap[i] = c + } +} + // IsNum returns true if a MySQL type is a numeric value. // It is the same as IS_NUM defined in mysql.h. func IsNum(typ uint8) bool { diff --git a/go/mysql/endtoend/query_test.go b/go/mysql/endtoend/query_test.go index 1827372b17c..0e3999060bf 100644 --- a/go/mysql/endtoend/query_test.go +++ b/go/mysql/endtoend/query_test.go @@ -220,7 +220,7 @@ func readRowsUsingStream(t *testing.T, conn *mysql.Conn, expectedCount int) { // Read the rows. count := 0 for { - row, err := conn.FetchNext() + row, err := conn.FetchNext(nil) if err != nil { t.Fatalf("FetchNext failed: %v", err) } diff --git a/go/mysql/endtoend/schema_change_test.go b/go/mysql/endtoend/schema_change_test.go new file mode 100644 index 00000000000..4ddcfbb94a9 --- /dev/null +++ b/go/mysql/endtoend/schema_change_test.go @@ -0,0 +1,140 @@ +/* +Copyright 2021 The Vitess Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package endtoend + +import ( + "context" + "fmt" + "strings" + "testing" + + "vitess.io/vitess/go/vt/sqlparser" + + "github.com/stretchr/testify/require" + + "vitess.io/vitess/go/mysql" +) + +var ctx = context.Background() + +const ( + createDb = `create database if not exists _vt` + createUserTable = `create table vttest.product (id bigint(20) primary key, name char(10) CHARACTER SET utf8 COLLATE utf8_unicode_ci, created bigint(20))` + dropTestTable = `drop table if exists product` +) + +func TestChangeSchemaIsNoticed(t *testing.T) { + conn, err := mysql.Connect(ctx, &connParams) + require.NoError(t, err) + defer conn.Close() + + _, err = conn.ExecuteFetch(createDb, 1000, true) + require.NoError(t, err) + _, err = conn.ExecuteFetch(mysql.CreateSchemaCopyTable, 1000, true) + require.NoError(t, err) + + tests := []struct { + name string + changeQ string + }{{ + name: "add column", + changeQ: "alter table vttest.product add column phone VARCHAR(15)", + }, { + name: "rename column", + changeQ: "alter table vttest.product change name firstname char(10)", + }, { + name: "change column type", + changeQ: "alter table vttest.product change name name char(100)", + }, { + name: "remove column", + changeQ: "alter table vttest.product drop column name", + }, { + name: "remove last column", + changeQ: "alter table vttest.product drop column created", + }, { + name: "remove table", + changeQ: "drop table product", + }, { + name: "create table", + changeQ: `create table vttest.new_table (id bigint(20) primary key)`, + }, { + name: "change character set", + changeQ: "alter table vttest.product change name name char(10) CHARACTER SET utf8mb4", + }, { + name: "change collation", + changeQ: "alter table vttest.product change name name char(10) COLLATE utf8_unicode_520_ci", + }, { + name: "drop PK", + changeQ: "alter table vttest.product drop primary key", + }, { + name: "change PK", + changeQ: "alter table vttest.product drop primary key, add primary key (name)", + }, { + name: "two tables changes", + changeQ: "create table vttest.new_table2 (id bigint(20) primary key);alter table vttest.product drop column name", + }} + + for _, test := range tests { + t.Run(test.name, func(t *testing.T) { + // reset schemacopy + _, err := conn.ExecuteFetch(mysql.ClearSchemaCopy, 1000, true) + require.NoError(t, err) + _, err = conn.ExecuteFetch(dropTestTable, 1000, true) + require.NoError(t, err) + _, err = conn.ExecuteFetch(createUserTable, 1000, true) + require.NoError(t, err) + rs, err := conn.ExecuteFetch(mysql.InsertIntoSchemaCopy, 1000, true) + require.NoError(t, err) + require.NotZero(t, rs.RowsAffected) + + // make sure no changes are detected + rs, err = conn.ExecuteFetch(mysql.DetectSchemaChange, 1000, true) + require.NoError(t, err) + require.Empty(t, rs.Rows) + + for _, q := range strings.Split(test.changeQ, ";") { + // make the schema change + _, err = conn.ExecuteFetch(q, 1000, true) + require.NoError(t, err) + } + + // make sure the change is detected + rs, err = conn.ExecuteFetch(mysql.DetectSchemaChange, 1000, true) + require.NoError(t, err) + require.NotEmpty(t, rs.Rows) + + var tables []string + for _, row := range rs.Rows { + apa := sqlparser.NewStrLiteral(row[0].ToString()) + tables = append(tables, "table_name = "+sqlparser.String(apa)) + } + tableNamePredicates := strings.Join(tables, " OR ") + del := fmt.Sprintf("%s WHERE %s", mysql.ClearSchemaCopy, tableNamePredicates) + upd := fmt.Sprintf("%s AND %s", mysql.InsertIntoSchemaCopy, tableNamePredicates) + + _, err = conn.ExecuteFetch(del, 1000, true) + require.NoError(t, err) + _, err = conn.ExecuteFetch(upd, 1000, true) + require.NoError(t, err) + + // make sure the change is detected + rs, err = conn.ExecuteFetch(mysql.DetectSchemaChange, 1000, true) + require.NoError(t, err) + require.Empty(t, rs.Rows) + }) + } +} diff --git a/go/mysql/flavor.go b/go/mysql/flavor.go index b178a5f0d5f..e0a22e1b966 100644 --- a/go/mysql/flavor.go +++ b/go/mysql/flavor.go @@ -34,8 +34,8 @@ var ( // Returned by ShowReplicationStatus(). ErrNotReplica = errors.New("no replication status") - // ErrNoMasterStatus means no status was returned by ShowMasterStatus(). - ErrNoMasterStatus = errors.New("no master status") + // ErrNoPrimaryStatus means no status was returned by ShowPrimaryStatus(). + ErrNoPrimaryStatus = errors.New("no master status") ) const ( @@ -57,8 +57,8 @@ const ( // 1. Oracle MySQL 5.6, 5.7, 8.0, ... // 2. MariaDB 10.X type flavor interface { - // masterGTIDSet returns the current GTIDSet of a server. - masterGTIDSet(c *Conn) (GTIDSet, error) + // primaryGTIDSet returns the current GTIDSet of a server. + primaryGTIDSet(c *Conn) (GTIDSet, error) // startReplicationCommand returns the command to start the replication. startReplicationCommand() string @@ -91,17 +91,17 @@ type flavor interface { // replication position at which the replica will resume. setReplicationPositionCommands(pos Position) []string - // changeMasterArg returns the specific parameter to add to - // a change master command. - changeMasterArg() string + // changeReplicationSourceArg returns the specific parameter to add to + // a "change master" command. + changeReplicationSourceArg() string // status returns the result of the appropriate status command, // with parsed replication position. status(c *Conn) (ReplicationStatus, error) - // masterStatus returns the result of 'SHOW MASTER STATUS', + // primaryStatus returns the result of 'SHOW MASTER STATUS', // with parsed executed position. - masterStatus(c *Conn) (MasterStatus, error) + primaryStatus(c *Conn) (PrimaryStatus, error) // waitUntilPositionCommand returns the SQL command to issue // to wait until the given position, until the context @@ -176,9 +176,9 @@ func (c *Conn) IsMariaDB() bool { return false } -// MasterPosition returns the current master replication position. -func (c *Conn) MasterPosition() (Position, error) { - gtidSet, err := c.flavor.masterGTIDSet(c) +// PrimaryPosition returns the current primary's replication position. +func (c *Conn) PrimaryPosition() (Position, error) { + gtidSet, err := c.flavor.primaryGTIDSet(c) if err != nil { return Position{}, err } @@ -187,10 +187,10 @@ func (c *Conn) MasterPosition() (Position, error) { }, nil } -// MasterFilePosition returns the current master's file based replication position. -func (c *Conn) MasterFilePosition() (Position, error) { +// PrimaryFilePosition returns the current primary's file based replication position. +func (c *Conn) PrimaryFilePosition() (Position, error) { filePosFlavor := filePosFlavor{} - gtidSet, err := filePosFlavor.masterGTIDSet(c) + gtidSet, err := filePosFlavor.primaryGTIDSet(c) if err != nil { return Position{}, err } @@ -245,22 +245,22 @@ func (c *Conn) ResetReplicationCommands() []string { // SetReplicationPositionCommands returns the commands to set the // replication position at which the replica will resume -// when it is later reparented with SetMasterCommands. +// when it is later reparented with SetReplicationSourceCommand. func (c *Conn) SetReplicationPositionCommands(pos Position) []string { return c.flavor.setReplicationPositionCommands(pos) } -// SetMasterCommand returns the command to use the provided master -// as the new master (without changing any GTID position). +// SetReplicationSourceCommand returns the command to use the provided host/port +// as the new replication source (without changing any GTID position). // It is guaranteed to be called with replication stopped. // It should not start or stop replication. -func (c *Conn) SetMasterCommand(params *ConnParams, masterHost string, masterPort int, masterConnectRetry int) string { +func (c *Conn) SetReplicationSourceCommand(params *ConnParams, host string, port int, connectRetry int) string { args := []string{ - fmt.Sprintf("MASTER_HOST = '%s'", masterHost), - fmt.Sprintf("MASTER_PORT = %d", masterPort), + fmt.Sprintf("MASTER_HOST = '%s'", host), + fmt.Sprintf("MASTER_PORT = %d", port), fmt.Sprintf("MASTER_USER = '%s'", params.Uname), fmt.Sprintf("MASTER_PASSWORD = '%s'", params.Pass), - fmt.Sprintf("MASTER_CONNECT_RETRY = %d", masterConnectRetry), + fmt.Sprintf("MASTER_CONNECT_RETRY = %d", connectRetry), } if params.SslEnabled() { args = append(args, "MASTER_SSL = 1") @@ -277,7 +277,7 @@ func (c *Conn) SetMasterCommand(params *ConnParams, masterHost string, masterPor if params.SslKey != "" { args = append(args, fmt.Sprintf("MASTER_SSL_KEY = '%s'", params.SslKey)) } - args = append(args, c.flavor.changeMasterArg()) + args = append(args, c.flavor.changeReplicationSourceArg()) return "CHANGE MASTER TO\n " + strings.Join(args, ",\n ") } @@ -350,9 +350,9 @@ func (c *Conn) ShowReplicationStatus() (ReplicationStatus, error) { return c.flavor.status(c) } -// parseMasterStatus parses the common fields of SHOW MASTER STATUS. -func parseMasterStatus(fields map[string]string) MasterStatus { - status := MasterStatus{} +// parsePrimaryStatus parses the common fields of SHOW MASTER STATUS. +func parsePrimaryStatus(fields map[string]string) PrimaryStatus { + status := PrimaryStatus{} fileExecPosStr := fields["Position"] file := fields["File"] @@ -369,10 +369,10 @@ func parseMasterStatus(fields map[string]string) MasterStatus { return status } -// ShowMasterStatus executes the right SHOW MASTER STATUS command, +// ShowPrimaryStatus executes the right SHOW MASTER STATUS command, // and returns a parsed executed Position, as well as file based Position. -func (c *Conn) ShowMasterStatus() (MasterStatus, error) { - return c.flavor.masterStatus(c) +func (c *Conn) ShowPrimaryStatus() (PrimaryStatus, error) { + return c.flavor.primaryStatus(c) } // WaitUntilPositionCommand returns the SQL command to issue diff --git a/go/mysql/flavor_filepos.go b/go/mysql/flavor_filepos.go index 33e67b76624..0e6b2f0ff4c 100644 --- a/go/mysql/flavor_filepos.go +++ b/go/mysql/flavor_filepos.go @@ -17,7 +17,6 @@ limitations under the License. package mysql import ( - "errors" "fmt" "io" "strconv" @@ -38,14 +37,14 @@ func newFilePosFlavor() flavor { return &filePosFlavor{} } -// masterGTIDSet is part of the Flavor interface. -func (flv *filePosFlavor) masterGTIDSet(c *Conn) (GTIDSet, error) { +// primaryGTIDSet is part of the Flavor interface. +func (flv *filePosFlavor) primaryGTIDSet(c *Conn) (GTIDSet, error) { qr, err := c.ExecuteFetch("SHOW MASTER STATUS", 100, true /* wantfields */) if err != nil { return nil, err } if len(qr.Rows) == 0 { - return nil, errors.New("no master status") + return nil, ErrNoPrimaryStatus } resultMap, err := resultToMap(qr) @@ -180,7 +179,7 @@ func (flv *filePosFlavor) setReplicationPositionCommands(pos Position) []string } // setReplicationPositionCommands is part of the Flavor interface. -func (flv *filePosFlavor) changeMasterArg() string { +func (flv *filePosFlavor) changeReplicationSourceArg() string { return "unsupported" } @@ -214,26 +213,26 @@ func parseFilePosReplicationStatus(resultMap map[string]string) (ReplicationStat } // masterStatus is part of the Flavor interface. -func (flv *filePosFlavor) masterStatus(c *Conn) (MasterStatus, error) { +func (flv *filePosFlavor) primaryStatus(c *Conn) (PrimaryStatus, error) { qr, err := c.ExecuteFetch("SHOW MASTER STATUS", 100, true /* wantfields */) if err != nil { - return MasterStatus{}, err + return PrimaryStatus{}, err } if len(qr.Rows) == 0 { // The query returned no data. We don't know how this could happen. - return MasterStatus{}, ErrNoMasterStatus + return PrimaryStatus{}, ErrNoPrimaryStatus } resultMap, err := resultToMap(qr) if err != nil { - return MasterStatus{}, err + return PrimaryStatus{}, err } return parseFilePosMasterStatus(resultMap) } -func parseFilePosMasterStatus(resultMap map[string]string) (MasterStatus, error) { - status := parseMasterStatus(resultMap) +func parseFilePosMasterStatus(resultMap map[string]string) (PrimaryStatus, error) { + status := parsePrimaryStatus(resultMap) status.Position = status.FilePosition diff --git a/go/mysql/flavor_filepos_test.go b/go/mysql/flavor_filepos_test.go index 0570c137b40..9af21dc7a9e 100644 --- a/go/mysql/flavor_filepos_test.go +++ b/go/mysql/flavor_filepos_test.go @@ -64,7 +64,7 @@ func TestFilePosShouldGetMasterPosition(t *testing.T) { "File": "source-bin.000003", } - want := MasterStatus{ + want := PrimaryStatus{ Position: Position{GTIDSet: filePosGTID{file: "source-bin.000003", pos: 1307}}, FilePosition: Position{GTIDSet: filePosGTID{file: "source-bin.000003", pos: 1307}}, } diff --git a/go/mysql/flavor_mariadb.go b/go/mysql/flavor_mariadb.go index 422344d4f5a..cf53e6e6521 100644 --- a/go/mysql/flavor_mariadb.go +++ b/go/mysql/flavor_mariadb.go @@ -40,8 +40,8 @@ type mariadbFlavor102 struct { var _ flavor = (*mariadbFlavor101)(nil) var _ flavor = (*mariadbFlavor102)(nil) -// masterGTIDSet is part of the Flavor interface. -func (mariadbFlavor) masterGTIDSet(c *Conn) (GTIDSet, error) { +// primaryGTIDSet is part of the Flavor interface. +func (mariadbFlavor) primaryGTIDSet(c *Conn) (GTIDSet, error) { qr, err := c.ExecuteFetch("SELECT @@GLOBAL.gtid_binlog_pos", 1, false) if err != nil { return nil, err @@ -108,7 +108,7 @@ func (mariadbFlavor) sendBinlogDumpCommand(c *Conn, serverID uint32, startPos Po func (mariadbFlavor) resetReplicationCommands(c *Conn) []string { resetCommands := []string{ "STOP SLAVE", - "RESET SLAVE ALL", // "ALL" makes it forget master host:port. + "RESET SLAVE ALL", // "ALL" makes it forget source host:port. "RESET MASTER", "SET GLOBAL gtid_slave_pos = ''", } @@ -130,8 +130,8 @@ func (mariadbFlavor) setReplicationPositionCommands(pos Position) []string { // Set gtid_slave_pos to tell the replica where to start // replicating. fmt.Sprintf("SET GLOBAL gtid_slave_pos = '%s'", pos), - // Set gtid_binlog_state so that if this server later becomes a - // master, it will know that it has seen everything up to and + // Set gtid_binlog_state so that if this server later becomes the + // primary, it will know that it has seen everything up to and // including 'pos'. Otherwise, if another replica asks this // server to replicate starting at exactly 'pos', this server // will throw an error when in gtid_strict_mode, since it @@ -142,7 +142,7 @@ func (mariadbFlavor) setReplicationPositionCommands(pos Position) []string { } // setReplicationPositionCommands is part of the Flavor interface. -func (mariadbFlavor) changeMasterArg() string { +func (mariadbFlavor) changeReplicationSourceArg() string { return "MASTER_USE_GTID = current_pos" } @@ -178,24 +178,24 @@ func parseMariadbReplicationStatus(resultMap map[string]string) (ReplicationStat return status, nil } -// masterStatus is part of the Flavor interface. -func (m mariadbFlavor) masterStatus(c *Conn) (MasterStatus, error) { +// primaryStatus is part of the Flavor interface. +func (m mariadbFlavor) primaryStatus(c *Conn) (PrimaryStatus, error) { qr, err := c.ExecuteFetch("SHOW MASTER STATUS", 100, true /* wantfields */) if err != nil { - return MasterStatus{}, err + return PrimaryStatus{}, err } if len(qr.Rows) == 0 { // The query returned no data. We don't know how this could happen. - return MasterStatus{}, ErrNoMasterStatus + return PrimaryStatus{}, ErrNoPrimaryStatus } resultMap, err := resultToMap(qr) if err != nil { - return MasterStatus{}, err + return PrimaryStatus{}, err } - status := parseMasterStatus(resultMap) - status.Position.GTIDSet, err = m.masterGTIDSet(c) + status := parsePrimaryStatus(resultMap) + status.Position.GTIDSet, err = m.primaryGTIDSet(c) return status, err } diff --git a/go/mysql/flavor_mariadb_test.go b/go/mysql/flavor_mariadb_test.go index 82a5b1312b4..49b7aac1d74 100644 --- a/go/mysql/flavor_mariadb_test.go +++ b/go/mysql/flavor_mariadb_test.go @@ -41,7 +41,7 @@ func TestMariadbSetMasterCommands(t *testing.T) { MASTER_USE_GTID = current_pos` conn := &Conn{flavor: mariadbFlavor101{}} - got := conn.SetMasterCommand(params, masterHost, masterPort, masterConnectRetry) + got := conn.SetReplicationSourceCommand(params, masterHost, masterPort, masterConnectRetry) if got != want { t.Errorf("mariadbFlavor.SetMasterCommands(%#v, %#v, %#v, %#v) = %#v, want %#v", params, masterHost, masterPort, masterConnectRetry, got, want) } @@ -74,7 +74,7 @@ func TestMariadbSetMasterCommandsSSL(t *testing.T) { MASTER_USE_GTID = current_pos` conn := &Conn{flavor: mariadbFlavor101{}} - got := conn.SetMasterCommand(params, masterHost, masterPort, masterConnectRetry) + got := conn.SetReplicationSourceCommand(params, masterHost, masterPort, masterConnectRetry) if got != want { t.Errorf("mariadbFlavor.SetMasterCommands(%#v, %#v, %#v, %#v) = %#v, want %#v", params, masterHost, masterPort, masterConnectRetry, got, want) } diff --git a/go/mysql/flavor_mysql.go b/go/mysql/flavor_mysql.go index 9d4dd5e08da..8f2f8e61d7f 100644 --- a/go/mysql/flavor_mysql.go +++ b/go/mysql/flavor_mysql.go @@ -43,8 +43,8 @@ var _ flavor = (*mysqlFlavor56)(nil) var _ flavor = (*mysqlFlavor57)(nil) var _ flavor = (*mysqlFlavor80)(nil) -// masterGTIDSet is part of the Flavor interface. -func (mysqlFlavor) masterGTIDSet(c *Conn) (GTIDSet, error) { +// primaryGTIDSet is part of the Flavor interface. +func (mysqlFlavor) primaryGTIDSet(c *Conn) (GTIDSet, error) { // keep @@global as lowercase, as some servers like the Ripple binlog server only honors a lowercase `global` value qr, err := c.ExecuteFetch("SELECT @@global.gtid_executed", 1, false) if err != nil { @@ -96,7 +96,7 @@ func (mysqlFlavor) sendBinlogDumpCommand(c *Conn, serverID uint32, startPos Posi func (mysqlFlavor) resetReplicationCommands(c *Conn) []string { resetCommands := []string{ "STOP SLAVE", - "RESET SLAVE ALL", // "ALL" makes it forget master host:port. + "RESET SLAVE ALL", // "ALL" makes it forget source host:port. "RESET MASTER", // This will also clear gtid_executed and gtid_purged. } if c.SemiSyncExtensionLoaded() { @@ -114,7 +114,7 @@ func (mysqlFlavor) setReplicationPositionCommands(pos Position) []string { } // setReplicationPositionCommands is part of the Flavor interface. -func (mysqlFlavor) changeMasterArg() string { +func (mysqlFlavor) changeReplicationSourceArg() string { return "MASTER_AUTO_POSITION = 1" } @@ -166,32 +166,32 @@ func parseMysqlReplicationStatus(resultMap map[string]string) (ReplicationStatus return status, nil } -// masterStatus is part of the Flavor interface. -func (mysqlFlavor) masterStatus(c *Conn) (MasterStatus, error) { +// primaryStatus is part of the Flavor interface. +func (mysqlFlavor) primaryStatus(c *Conn) (PrimaryStatus, error) { qr, err := c.ExecuteFetch("SHOW MASTER STATUS", 100, true /* wantfields */) if err != nil { - return MasterStatus{}, err + return PrimaryStatus{}, err } if len(qr.Rows) == 0 { // The query returned no data. We don't know how this could happen. - return MasterStatus{}, ErrNoMasterStatus + return PrimaryStatus{}, ErrNoPrimaryStatus } resultMap, err := resultToMap(qr) if err != nil { - return MasterStatus{}, err + return PrimaryStatus{}, err } - return parseMysqlMasterStatus(resultMap) + return parseMysqlPrimaryStatus(resultMap) } -func parseMysqlMasterStatus(resultMap map[string]string) (MasterStatus, error) { - status := parseMasterStatus(resultMap) +func parseMysqlPrimaryStatus(resultMap map[string]string) (PrimaryStatus, error) { + status := parsePrimaryStatus(resultMap) var err error status.Position.GTIDSet, err = parseMysql56GTIDSet(resultMap["Executed_Gtid_Set"]) if err != nil { - return MasterStatus{}, vterrors.Wrapf(err, "MasterStatus can't parse MySQL 5.6 GTID (Executed_Gtid_Set: %#v)", resultMap["Executed_Gtid_Set"]) + return PrimaryStatus{}, vterrors.Wrapf(err, "PrimaryStatus can't parse MySQL 5.6 GTID (Executed_Gtid_Set: %#v)", resultMap["Executed_Gtid_Set"]) } return status, nil diff --git a/go/mysql/flavor_mysql_test.go b/go/mysql/flavor_mysql_test.go index 8f72242a891..50153011bcf 100644 --- a/go/mysql/flavor_mysql_test.go +++ b/go/mysql/flavor_mysql_test.go @@ -40,9 +40,9 @@ func TestMysql56SetMasterCommands(t *testing.T) { MASTER_AUTO_POSITION = 1` conn := &Conn{flavor: mysqlFlavor57{}} - got := conn.SetMasterCommand(params, masterHost, masterPort, masterConnectRetry) + got := conn.SetReplicationSourceCommand(params, masterHost, masterPort, masterConnectRetry) if got != want { - t.Errorf("mysqlFlavor.SetMasterCommand(%#v, %#v, %#v, %#v) = %#v, want %#v", params, masterHost, masterPort, masterConnectRetry, got, want) + t.Errorf("mysqlFlavor.SetReplicationSourceCommand(%#v, %#v, %#v, %#v) = %#v, want %#v", params, masterHost, masterPort, masterConnectRetry, got, want) } } @@ -73,7 +73,7 @@ func TestMysql56SetMasterCommandsSSL(t *testing.T) { MASTER_AUTO_POSITION = 1` conn := &Conn{flavor: mysqlFlavor57{}} - got := conn.SetMasterCommand(params, masterHost, masterPort, masterConnectRetry) + got := conn.SetReplicationSourceCommand(params, masterHost, masterPort, masterConnectRetry) if got != want { t.Errorf("mysqlFlavor.SetMasterCommands(%#v, %#v, %#v, %#v) = %#v, want %#v", params, masterHost, masterPort, masterConnectRetry, got, want) } @@ -136,11 +136,11 @@ func TestMysqlShouldGetMasterPosition(t *testing.T) { } sid, _ := ParseSID("3e11fa47-71ca-11e1-9e33-c80aa9429562") - want := MasterStatus{ + want := PrimaryStatus{ Position: Position{GTIDSet: Mysql56GTIDSet{sid: []interval{{start: 1, end: 5}}}}, FilePosition: Position{GTIDSet: filePosGTID{file: "source-bin.000003", pos: 1307}}, } - got, err := parseMysqlMasterStatus(resultMap) + got, err := parseMysqlPrimaryStatus(resultMap) require.NoError(t, err) assert.Equalf(t, got.Position.GTIDSet.String(), want.Position.GTIDSet.String(), "got Position: %v; want Position: %v", got.Position.GTIDSet, want.Position.GTIDSet) assert.Equalf(t, got.FilePosition.GTIDSet.String(), want.FilePosition.GTIDSet.String(), "got FilePosition: %v; want FilePosition: %v", got.FilePosition.GTIDSet, want.FilePosition.GTIDSet) diff --git a/go/mysql/fuzzdata/clusterfuzz-testcase-minimized-handle_next_command_fuzzer-4604217852559360 b/go/mysql/fuzzdata/clusterfuzz-testcase-minimized-handle_next_command_fuzzer-4604217852559360 new file mode 100644 index 00000000000..1a7853faf71 Binary files /dev/null and b/go/mysql/fuzzdata/clusterfuzz-testcase-minimized-handle_next_command_fuzzer-4604217852559360 differ diff --git a/go/mysql/fuzzdata/clusterfuzz-testcase-minimized-handle_next_command_fuzzer-4955625785262080 b/go/mysql/fuzzdata/clusterfuzz-testcase-minimized-handle_next_command_fuzzer-4955625785262080 new file mode 100644 index 00000000000..6c6583c8da7 Binary files /dev/null and b/go/mysql/fuzzdata/clusterfuzz-testcase-minimized-handle_next_command_fuzzer-4955625785262080 differ diff --git a/go/mysql/fuzzdata/clusterfuzz-testcase-minimized-handle_next_command_fuzzer-5298715423277056 b/go/mysql/fuzzdata/clusterfuzz-testcase-minimized-handle_next_command_fuzzer-5298715423277056 new file mode 100644 index 00000000000..823929fcc96 Binary files /dev/null and b/go/mysql/fuzzdata/clusterfuzz-testcase-minimized-handle_next_command_fuzzer-5298715423277056 differ diff --git a/go/mysql/fuzzdata/clusterfuzz-testcase-minimized-handle_next_command_fuzzer-6488670103273472 b/go/mysql/fuzzdata/clusterfuzz-testcase-minimized-handle_next_command_fuzzer-6488670103273472 new file mode 100644 index 00000000000..d09e54216bc Binary files /dev/null and b/go/mysql/fuzzdata/clusterfuzz-testcase-minimized-handle_next_command_fuzzer-6488670103273472 differ diff --git a/go/mysql/handshake_test.go b/go/mysql/handshake_test.go index 9015bcd0951..40ec8215a35 100644 --- a/go/mysql/handshake_test.go +++ b/go/mysql/handshake_test.go @@ -21,10 +21,11 @@ import ( "net" "os" "path" - "reflect" "strings" "testing" + "vitess.io/vitess/go/test/utils" + "context" "vitess.io/vitess/go/vt/tlstest" @@ -83,9 +84,7 @@ func TestClearTextClientAuth(t *testing.T) { if err != nil { t.Fatalf("ExecuteFetch failed: %v", err) } - if !reflect.DeepEqual(result, selectRowsResult) { - t.Errorf("Got wrong result from ExecuteFetch(select rows): %v", result) - } + utils.MustMatch(t, result, selectRowsResult) // Send a ComQuit to avoid the error message on the server side. conn.writeComQuit() @@ -202,9 +201,7 @@ func testSSLConnectionBasics(t *testing.T, params *ConnParams) { if err != nil { t.Fatalf("ExecuteFetch failed: %v", err) } - if !reflect.DeepEqual(result, selectRowsResult) { - t.Errorf("Got wrong result from ExecuteFetch(select rows): %v", result) - } + utils.MustMatch(t, result, selectRowsResult) // Make sure this went through SSL. result, err = conn.ExecuteFetch("ssl echo", 10000, true) diff --git a/go/mysql/mysql_fuzzer.go b/go/mysql/mysql_fuzzer.go index 569475c8b8e..46f5ce3c0fa 100644 --- a/go/mysql/mysql_fuzzer.go +++ b/go/mysql/mysql_fuzzer.go @@ -31,7 +31,6 @@ import ( "vitess.io/vitess/go/sqltypes" querypb "vitess.io/vitess/go/vt/proto/query" - "vitess.io/vitess/go/vt/sqlparser" "vitess.io/vitess/go/vt/tlstest" "vitess.io/vitess/go/vt/vttls" ) @@ -84,11 +83,9 @@ func createFuzzingSocketPair() (net.Listener, *Conn, *Conn) { type fuzztestRun struct{} func (t fuzztestRun) NewConnection(c *Conn) { - panic("implement me") } func (t fuzztestRun) ConnectionClosed(c *Conn) { - panic("implement me") } func (t fuzztestRun) ComQuery(c *Conn, query string, callback func(*sqltypes.Result) error) error { @@ -96,11 +93,11 @@ func (t fuzztestRun) ComQuery(c *Conn, query string, callback func(*sqltypes.Res } func (t fuzztestRun) ComPrepare(c *Conn, query string, bindVars map[string]*querypb.BindVariable) ([]*querypb.Field, error) { - panic("implement me") + return nil, nil } func (t fuzztestRun) ComStmtExecute(c *Conn, prepare *PrepareData, callback func(*sqltypes.Result) error) error { - panic("implement me") + return nil } func (t fuzztestRun) WarningCount(c *Conn) uint16 { @@ -108,7 +105,6 @@ func (t fuzztestRun) WarningCount(c *Conn) uint16 { } func (t fuzztestRun) ComResetConnection(c *Conn) { - panic("implement me") } var _ Handler = (*fuzztestRun)(nil) @@ -120,8 +116,8 @@ type fuzztestConn struct { } func (t fuzztestConn) Read(b []byte) (n int, err error) { - for j, i := range t.queryPacket { - b[j] = i + for i := 0; i < len(b) && i < len(t.queryPacket); i++ { + b[i] = t.queryPacket[i] } return len(b), nil } @@ -206,6 +202,7 @@ func FuzzHandleNextCommand(data []byte) int { pos: -1, queryPacket: data, }) + sConn.PrepareData = map[uint32]*PrepareData{} handler := &fuzztestRun{} _ = sConn.handleNextCommand(handler) @@ -302,23 +299,29 @@ func (th *fuzzTestHandler) WarningCount(c *Conn) uint16 { return th.warnings } +func (c *Conn) writeFuzzedPacket(packet []byte) { + c.sequence = 0 + data, pos := c.startEphemeralPacketWithHeader(len(packet) + 1) + copy(data[pos:], packet) + _ = c.writeEphemeralPacket() +} + func FuzzTLSServer(data []byte) int { + if len(data) < 40 { + return -1 + } // totalQueries is the number of queries the fuzzer // makes in each fuzz iteration totalQueries := 20 - var queries []string + var queries [][]byte c := gofuzzheaders.NewConsumer(data) for i := 0; i < totalQueries; i++ { - query, err := c.GetString() + query, err := c.GetBytes() if err != nil { return -1 } - - // We parse each query now to exit if the queries - // are invalid - _, err = sqlparser.Parse(query) - if err != nil { - return -1 + if len(query) < 40 { + continue } queries = append(queries, query) } @@ -379,7 +382,7 @@ func FuzzTLSServer(data []byte) int { } for i := 0; i < len(queries); i++ { - _, _ = conn.ExecuteFetch(queries[i], 1000, true) + conn.writeFuzzedPacket(queries[i]) } return 1 } diff --git a/go/mysql/mysql_fuzzer_test.go b/go/mysql/mysql_fuzzer_test.go new file mode 100644 index 00000000000..fa5e1ebc41d --- /dev/null +++ b/go/mysql/mysql_fuzzer_test.go @@ -0,0 +1,46 @@ +/* +Copyright 2021 The Vitess Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package mysql + +import ( + "io/ioutil" + "path" + "runtime/debug" + "testing" + + "github.com/stretchr/testify/require" +) + +func TestFuzzHandleNextCommandFromFile(t *testing.T) { + directoryName := "fuzzdata" + files, err := ioutil.ReadDir(directoryName) + require.NoError(t, err) + for _, file := range files { + t.Run(file.Name(), func(t *testing.T) { + defer func() { + r := recover() + if r != nil { + t.Error(r) + t.Fatal(string(debug.Stack())) + } + }() + testcase, err := ioutil.ReadFile(path.Join(directoryName, file.Name())) + require.NoError(t, err) + FuzzHandleNextCommand(testcase) + }) + } +} diff --git a/go/mysql/master_status.go b/go/mysql/primary_status.go similarity index 70% rename from go/mysql/master_status.go rename to go/mysql/primary_status.go index 0f5cfff679e..19ef63be60d 100644 --- a/go/mysql/master_status.go +++ b/go/mysql/primary_status.go @@ -20,16 +20,16 @@ import ( replicationdatapb "vitess.io/vitess/go/vt/proto/replicationdata" ) -// MasterStatus holds replication information from SHOW MASTER STATUS. -type MasterStatus struct { - // Position represents the master's GTID based position. +// PrimaryStatus holds replication information from SHOW MASTER STATUS. +type PrimaryStatus struct { + // Position represents the server's GTID based position. Position Position - // FilePosition represents the master's file based position. + // FilePosition represents the server's file based position. FilePosition Position } -// MasterStatusToProto translates a MasterStatus to proto3. -func MasterStatusToProto(s MasterStatus) *replicationdatapb.MasterStatus { +// PrimaryStatusToProto translates a PrimaryStatus to proto3. +func PrimaryStatusToProto(s PrimaryStatus) *replicationdatapb.MasterStatus { return &replicationdatapb.MasterStatus{ Position: EncodePosition(s.Position), FilePosition: EncodePosition(s.FilePosition), diff --git a/go/mysql/query.go b/go/mysql/query.go index 61858b73f7d..3bea7b5f516 100644 --- a/go/mysql/query.go +++ b/go/mysql/query.go @@ -253,12 +253,15 @@ func (c *Conn) readColumnDefinitionType(field *querypb.Field, index int) error { // parseRow parses an individual row. // Returns a SQLError. -func (c *Conn) parseRow(data []byte, fields []*querypb.Field, reader func([]byte, int) ([]byte, int, bool)) ([]sqltypes.Value, error) { +func (c *Conn) parseRow(data []byte, fields []*querypb.Field, reader func([]byte, int) ([]byte, int, bool), result []sqltypes.Value) ([]sqltypes.Value, error) { colNumber := len(fields) - result := make([]sqltypes.Value, colNumber) + if result == nil { + result = make([]sqltypes.Value, 0, colNumber) + } pos := 0 for i := 0; i < colNumber; i++ { if data[pos] == NullValue { + result = append(result, sqltypes.Value{}) pos++ continue } @@ -268,7 +271,7 @@ func (c *Conn) parseRow(data []byte, fields []*querypb.Field, reader func([]byte if !ok { return nil, NewSQLError(CRMalformedPacket, SSUnknownSQLState, "decoding string failed") } - result[i] = sqltypes.MakeTrusted(fields[i].Type, s) + result = append(result, sqltypes.MakeTrusted(fields[i].Type, s)) } return result, nil } @@ -464,7 +467,7 @@ func (c *Conn) ReadQueryResult(maxrows int, wantfields bool) (*sqltypes.Result, } // Regular row. - row, err := c.parseRow(data, result.Fields, readLenEncStringAsBytesCopy) + row, err := c.parseRow(data, result.Fields, readLenEncStringAsBytesCopy, nil) if err != nil { c.recycleReadPacket() return nil, false, 0, err diff --git a/go/mysql/query_test.go b/go/mysql/query_test.go index 8b5fae975e1..0a3689b719c 100644 --- a/go/mysql/query_test.go +++ b/go/mysql/query_test.go @@ -22,11 +22,11 @@ import ( "sync" "testing" + "google.golang.org/protobuf/proto" + "github.com/stretchr/testify/assert" "github.com/stretchr/testify/require" - "github.com/golang/protobuf/proto" - "vitess.io/vitess/go/sqltypes" querypb "vitess.io/vitess/go/vt/proto/query" @@ -702,7 +702,7 @@ func checkQueryInternal(t *testing.T, query string, sConn, cConn *Conn, result * got.Fields = nil } for { - row, err := cConn.FetchNext() + row, err := cConn.FetchNext(nil) if err != nil { fatalError = fmt.Sprintf("FetchNext(%v) failed: %v", query, err) return diff --git a/go/mysql/schema.go b/go/mysql/schema.go index 01841429732..59f98f0dbf1 100644 --- a/go/mysql/schema.go +++ b/go/mysql/schema.go @@ -34,8 +34,87 @@ const ( BaseShowPrimary = "SELECT table_name, column_name FROM information_schema.key_column_usage WHERE table_schema=database() AND constraint_name='PRIMARY' ORDER BY table_name, ordinal_position" // ShowRowsRead is the query used to find the number of rows read. ShowRowsRead = "show status like 'Innodb_rows_read'" + + // CreateVTDatabase creates the _vt database + CreateVTDatabase = `CREATE DATABASE IF NOT EXISTS _vt` + + // CreateSchemaCopyTable query creates schemacopy table in _vt schema. + CreateSchemaCopyTable = ` +CREATE TABLE if not exists _vt.schemacopy ( + table_schema varchar(64) NOT NULL, + table_name varchar(64) NOT NULL, + column_name varchar(64) NOT NULL, + ordinal_position bigint(21) unsigned NOT NULL, + character_set_name varchar(32) DEFAULT NULL, + collation_name varchar(32) DEFAULT NULL, + data_type varchar(64) NOT NULL, + column_key varchar(3) NOT NULL, + PRIMARY KEY (table_schema, table_name, ordinal_position))` + + detectNewColumns = ` +select ISC.table_name +from information_schema.columns as ISC + left join _vt.schemacopy as c on + ISC.table_name = c.table_name and + ISC.table_schema=c.table_schema and + ISC.ordinal_position = c.ordinal_position +where ISC.table_schema = database() AND c.table_schema is null` + + detectChangeColumns = ` +select ISC.table_name +from information_schema.columns as ISC + join _vt.schemacopy as c on + ISC.table_name = c.table_name and + ISC.table_schema=c.table_schema and + ISC.ordinal_position = c.ordinal_position +where ISC.table_schema = database() + AND (not(c.column_name <=> ISC.column_name) + OR not(ISC.character_set_name <=> c.character_set_name) + OR not(ISC.collation_name <=> c.collation_name) + OR not(ISC.data_type <=> c.data_type) + OR not(ISC.column_key <=> c.column_key))` + + detectRemoveColumns = ` +select c.table_name +from information_schema.columns as ISC + right join _vt.schemacopy as c on + ISC.table_name = c.table_name and + ISC.table_schema=c.table_schema and + ISC.ordinal_position = c.ordinal_position +where c.table_schema = database() AND ISC.table_schema is null` + + // DetectSchemaChange query detects if there is any schema change from previous copy. + DetectSchemaChange = detectChangeColumns + " UNION " + detectNewColumns + " UNION " + detectRemoveColumns + + // ClearSchemaCopy query clears the schemacopy table. + ClearSchemaCopy = `delete from _vt.schemacopy` + + // InsertIntoSchemaCopy query copies over the schema information from information_schema.columns table. + InsertIntoSchemaCopy = `insert _vt.schemacopy +select table_schema, table_name, column_name, ordinal_position, character_set_name, collation_name, data_type, column_key +from information_schema.columns +where table_schema = database()` + + // FetchUpdatedTables queries fetches all information about updated tables + FetchUpdatedTables = `select table_name, column_name, data_type +from _vt.schemacopy +where table_schema = database() and + table_name in ::tableNames +order by table_name, ordinal_position` + + // FetchTables queries fetches all information about tables + FetchTables = `select table_name, column_name, data_type +from _vt.schemacopy +where table_schema = database() +order by table_name, ordinal_position` ) +// VTDatabaseInit contains all the schema creation queries needed to +var VTDatabaseInit = []string{ + CreateVTDatabase, + CreateSchemaCopyTable, +} + // BaseShowTablesFields contains the fields returned by a BaseShowTables or a BaseShowTablesForTable command. // They are validated by the // testBaseShowTables test. diff --git a/go/mysql/server.go b/go/mysql/server.go index 0647e06644f..e6de1925974 100644 --- a/go/mysql/server.go +++ b/go/mysql/server.go @@ -183,7 +183,7 @@ type Listener struct { PreHandleFunc func(context.Context, net.Conn, uint32) (net.Conn, error) } -// NewFromListener creares a new mysql listener from an existing net.Listener +// NewFromListener creates a new mysql listener from an existing net.Listener func NewFromListener(l net.Listener, authServer AuthServer, handler Handler, connReadTimeout time.Duration, connWriteTimeout time.Duration) (*Listener, error) { cfg := ListenerConfig{ Listener: l, diff --git a/go/mysql/server_test.go b/go/mysql/server_test.go index a1ca1271255..e6e9eb69cfb 100644 --- a/go/mysql/server_test.go +++ b/go/mysql/server_test.go @@ -565,7 +565,7 @@ func TestServer(t *testing.T) { // If there's an error after streaming has started, // we should get a 2013 - th.SetErr(NewSQLError(ERUnknownComError, SSUnknownComError, "forced error after send")) + th.SetErr(NewSQLError(ERUnknownComError, SSNetError, "forced error after send")) output, err = runMysqlWithErr(t, params, "error after send") require.Error(t, err) assert.Contains(t, output, "ERROR 2013 (HY000)", "Unexpected output for 'panic'") @@ -650,7 +650,7 @@ func TestServerStats(t *testing.T) { connRefuse.Reset() // Run an 'error' command. - th.SetErr(NewSQLError(ERUnknownComError, SSUnknownComError, "forced query error")) + th.SetErr(NewSQLError(ERUnknownComError, SSNetError, "forced query error")) output, ok := runMysql(t, params, "error") require.False(t, ok, "mysql should have failed: %v", output) @@ -843,7 +843,17 @@ func TestTLSServer(t *testing.T) { "") require.NoError(t, err) l.TLSConfig.Store(serverConfig) - go l.Accept() + + var wg sync.WaitGroup + wg.Add(1) + go func(l *Listener) { + wg.Done() + l.Accept() + }(l) + // This is ensure the listener is called + wg.Wait() + // Sleep so that the Accept function is called as well.' + time.Sleep(3 * time.Second) connCountByTLSVer.ResetAll() // Setup the right parameters. @@ -933,7 +943,17 @@ func TestTLSRequired(t *testing.T) { require.NoError(t, err) l.TLSConfig.Store(serverConfig) l.RequireSecureTransport = true - go l.Accept() + + var wg sync.WaitGroup + wg.Add(1) + go func(l *Listener) { + wg.Done() + l.Accept() + }(l) + // This is ensure the listener is called + wg.Wait() + // Sleep so that the Accept function is called as well.' + time.Sleep(3 * time.Second) // Setup conn params without SSL. params := &ConnParams{ @@ -1254,7 +1274,7 @@ func TestServerFlush(t *testing.T) { flds, err := c.Fields() require.NoError(t, err) if duration, want := time.Since(start), 20*time.Millisecond; duration < *mysqlServerFlushDelay || duration > want { - assert.Fail(t, "duration: %v, want between %v and %v", duration, *mysqlServerFlushDelay, want) + assert.Fail(t, "duration: %v, want between %v and %v", duration.String(), (*mysqlServerFlushDelay).String(), want.String()) } want1 := []*querypb.Field{{ Name: "result", @@ -1262,7 +1282,7 @@ func TestServerFlush(t *testing.T) { }} assert.Equal(t, want1, flds) - row, err := c.FetchNext() + row, err := c.FetchNext(nil) require.NoError(t, err) if duration, want := time.Since(start), 50*time.Millisecond; duration < want { assert.Fail(t, "duration: %v, want > %v", duration, want) @@ -1270,7 +1290,7 @@ func TestServerFlush(t *testing.T) { want2 := []sqltypes.Value{sqltypes.MakeTrusted(querypb.Type_VARCHAR, []byte("delayed"))} assert.Equal(t, want2, row) - row, err = c.FetchNext() + row, err = c.FetchNext(nil) require.NoError(t, err) assert.Nil(t, row) } diff --git a/go/mysql/sql_error.go b/go/mysql/sql_error.go index fcf02abaf13..43ad76f6fed 100644 --- a/go/mysql/sql_error.go +++ b/go/mysql/sql_error.go @@ -92,46 +92,20 @@ func NewSQLErrorFromError(err error) error { } sErr := convertToMysqlError(err) - if _, ok := sErr.(*SQLError); ok { - return sErr + if serr, ok := sErr.(*SQLError); ok { + return serr } msg := err.Error() match := errExtract.FindStringSubmatch(msg) - if len(match) < 2 { - // Map vitess error codes into the mysql equivalent - code := vterrors.Code(err) - num := ERUnknownError - ss := SSUnknownSQLState - switch code { - case vtrpcpb.Code_CANCELED, vtrpcpb.Code_DEADLINE_EXCEEDED, vtrpcpb.Code_ABORTED: - num = ERQueryInterrupted - ss = SSQueryInterrupted - case vtrpcpb.Code_UNKNOWN, vtrpcpb.Code_INVALID_ARGUMENT, vtrpcpb.Code_NOT_FOUND, vtrpcpb.Code_ALREADY_EXISTS, - vtrpcpb.Code_FAILED_PRECONDITION, vtrpcpb.Code_OUT_OF_RANGE, vtrpcpb.Code_UNAVAILABLE, vtrpcpb.Code_DATA_LOSS: - num = ERUnknownError - case vtrpcpb.Code_PERMISSION_DENIED, vtrpcpb.Code_UNAUTHENTICATED: - num = ERAccessDeniedError - ss = SSAccessDeniedError - case vtrpcpb.Code_RESOURCE_EXHAUSTED: - num = demuxResourceExhaustedErrors(err.Error()) - ss = SSClientError - case vtrpcpb.Code_UNIMPLEMENTED: - num = ERNotSupportedYet - ss = SSClientError - case vtrpcpb.Code_INTERNAL: - num = ERInternalError - ss = SSUnknownSQLState - } - - // Not found, build a generic SQLError. - return &SQLError{ - Num: num, - State: ss, - Message: msg, - } + if len(match) >= 2 { + return extractSQLErrorFromMessage(match, msg) } + return mapToSQLErrorFromErrorCode(err, msg) +} + +func extractSQLErrorFromMessage(match []string, msg string) *SQLError { num, err := strconv.Atoi(match[1]) if err != nil { return &SQLError{ @@ -141,12 +115,44 @@ func NewSQLErrorFromError(err error) error { } } - serr := &SQLError{ + return &SQLError{ Num: num, State: match[2], Message: msg, } - return serr +} + +func mapToSQLErrorFromErrorCode(err error, msg string) *SQLError { + // Map vitess error codes into the mysql equivalent + num := ERUnknownError + ss := SSUnknownSQLState + switch vterrors.Code(err) { + case vtrpcpb.Code_CANCELED, vtrpcpb.Code_DEADLINE_EXCEEDED, vtrpcpb.Code_ABORTED: + num = ERQueryInterrupted + ss = SSQueryInterrupted + case vtrpcpb.Code_UNKNOWN, vtrpcpb.Code_INVALID_ARGUMENT, vtrpcpb.Code_NOT_FOUND, vtrpcpb.Code_ALREADY_EXISTS, + vtrpcpb.Code_FAILED_PRECONDITION, vtrpcpb.Code_OUT_OF_RANGE, vtrpcpb.Code_UNAVAILABLE, vtrpcpb.Code_DATA_LOSS: + num = ERUnknownError + case vtrpcpb.Code_PERMISSION_DENIED, vtrpcpb.Code_UNAUTHENTICATED: + num = ERAccessDeniedError + ss = SSAccessDeniedError + case vtrpcpb.Code_RESOURCE_EXHAUSTED: + num = demuxResourceExhaustedErrors(err.Error()) + ss = SSClientError + case vtrpcpb.Code_UNIMPLEMENTED: + num = ERNotSupportedYet + ss = SSClientError + case vtrpcpb.Code_INTERNAL: + num = ERInternalError + ss = SSUnknownSQLState + } + + // Not found, build a generic SQLError. + return &SQLError{ + Num: num, + State: ss, + Message: msg, + } } var stateToMysqlCode = map[vterrors.State]struct { @@ -157,6 +163,7 @@ var stateToMysqlCode = map[vterrors.State]struct { vterrors.AccessDeniedError: {num: ERAccessDeniedError, state: SSAccessDeniedError}, vterrors.BadDb: {num: ERBadDb, state: SSClientError}, vterrors.BadFieldError: {num: ERBadFieldError, state: SSBadFieldError}, + vterrors.BadTableError: {num: ERBadTable, state: SSUnknownTable}, vterrors.CantUseOptionHere: {num: ERCantUseOptionHere, state: SSClientError}, vterrors.DataOutOfRange: {num: ERDataOutOfRange, state: SSDataOutOfRange}, vterrors.DbCreateExists: {num: ERDbCreateExists, state: SSUnknownSQLState}, @@ -170,7 +177,9 @@ var stateToMysqlCode = map[vterrors.State]struct { vterrors.NotSupportedYet: {num: ERNotSupportedYet, state: SSClientError}, vterrors.ForbidSchemaChange: {num: ERForbidSchemaChange, state: SSUnknownSQLState}, vterrors.NetPacketTooLarge: {num: ERNetPacketTooLarge, state: SSNetError}, + vterrors.NonUniqError: {num: ERNonUniq, state: SSConstraintViolation}, vterrors.NonUniqTable: {num: ERNonUniqTable, state: SSClientError}, + vterrors.NonUpdateableTable: {num: ERNonUpdateableTable, state: SSUnknownSQLState}, vterrors.QueryInterrupted: {num: ERQueryInterrupted, state: SSQueryInterrupted}, vterrors.SPDoesNotExist: {num: ERSPDoesNotExist, state: SSClientError}, vterrors.SyntaxError: {num: ERSyntaxError, state: SSClientError}, @@ -181,6 +190,10 @@ var stateToMysqlCode = map[vterrors.State]struct { vterrors.WrongNumberOfColumnsInSelect: {num: ERWrongNumberOfColumnsInSelect, state: SSWrongNumberOfColumns}, vterrors.WrongTypeForVar: {num: ERWrongTypeForVar, state: SSClientError}, vterrors.WrongValueForVar: {num: ERWrongValueForVar, state: SSClientError}, + vterrors.ServerNotAvailable: {num: ERServerIsntAvailable, state: SSNetError}, + vterrors.CantDoThisInTransaction: {num: ERCantDoThisDuringAnTransaction, state: SSCantDoThisDuringAnTransaction}, + vterrors.RequiresPrimaryKey: {num: ERRequiresPrimaryKey, state: SSClientError}, + vterrors.NoSuchSession: {num: ERUnknownComError, state: SSNetError}, } func init() { diff --git a/go/mysql/sql_error_test.go b/go/mysql/sql_error_test.go index 223d26ce2c4..7efb1669534 100644 --- a/go/mysql/sql_error_test.go +++ b/go/mysql/sql_error_test.go @@ -19,8 +19,6 @@ package mysql import ( "testing" - "github.com/stretchr/testify/require" - "vitess.io/vitess/go/vt/proto/vtrpc" "vitess.io/vitess/go/vt/vterrors" @@ -154,11 +152,9 @@ func TestNewSQLErrorFromError(t *testing.T) { for _, tc := range tCases { t.Run(tc.err.Error(), func(t *testing.T) { - err := NewSQLErrorFromError(tc.err) - sErr, ok := err.(*SQLError) - require.True(t, ok) - assert.Equal(t, tc.num, sErr.Number()) - assert.Equal(t, tc.ss, sErr.SQLState()) + err := NewSQLErrorFromError(tc.err).(*SQLError) + assert.Equal(t, tc.num, err.Number()) + assert.Equal(t, tc.ss, err.SQLState()) }) } } diff --git a/go/mysql/streaming_query.go b/go/mysql/streaming_query.go index 63ce78e55dd..1f62dd35a99 100644 --- a/go/mysql/streaming_query.go +++ b/go/mysql/streaming_query.go @@ -107,7 +107,7 @@ func (c *Conn) Fields() ([]*querypb.Field, error) { // FetchNext returns the next result for an ongoing streaming query. // It returns (nil, nil) if there is nothing more to read. -func (c *Conn) FetchNext() ([]sqltypes.Value, error) { +func (c *Conn) FetchNext(in []sqltypes.Value) ([]sqltypes.Value, error) { if c.fields == nil { // We are already done, and the result was closed. return nil, NewSQLError(CRCommandsOutOfSync, SSUnknownSQLState, "no streaming query in progress") @@ -133,14 +133,15 @@ func (c *Conn) FetchNext() ([]sqltypes.Value, error) { } // Regular row. - return c.parseRow(data, c.fields, readLenEncStringAsBytes) + return c.parseRow(data, c.fields, readLenEncStringAsBytes, in) } // CloseResult can be used to terminate a streaming query // early. It just drains the remaining values. func (c *Conn) CloseResult() { + row := make([]sqltypes.Value, 0, len(c.fields)) for c.fields != nil { - rows, err := c.FetchNext() + rows, err := c.FetchNext(row[:0]) if err != nil || rows == nil { // We either got an error, or got the last result. c.fields = nil diff --git a/go/protoutil/time.go b/go/protoutil/time.go new file mode 100644 index 00000000000..c226001d4b2 --- /dev/null +++ b/go/protoutil/time.go @@ -0,0 +1,44 @@ +/* +Copyright 2021 The Vitess Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package protoutil + +import ( + "time" + + "vitess.io/vitess/go/vt/proto/vttime" +) + +// TimeFromProto converts a vttime.Time proto message into a time.Time object. +func TimeFromProto(tpb *vttime.Time) time.Time { + if tpb == nil { + return time.Time{} + } + + return time.Unix(tpb.Seconds, int64(tpb.Nanoseconds)) +} + +// TimeToProto converts a time.Time object into a vttime.Time proto mesasge. +func TimeToProto(t time.Time) *vttime.Time { + secs, nanos := t.Unix(), t.UnixNano() + + nsecs := secs * 1e9 + extraNanos := nanos - nsecs + return &vttime.Time{ + Seconds: secs, + Nanoseconds: int32(extraNanos), + } +} diff --git a/go/protoutil/time_test.go b/go/protoutil/time_test.go new file mode 100644 index 00000000000..eb3ecb2f0a9 --- /dev/null +++ b/go/protoutil/time_test.go @@ -0,0 +1,52 @@ +/* +Copyright 2021 The Vitess Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package protoutil + +import ( + "testing" + "time" + + "github.com/stretchr/testify/assert" + + "vitess.io/vitess/go/test/utils" + "vitess.io/vitess/go/vt/proto/vttime" +) + +func TestTimeFromProto(t *testing.T) { + now := time.Date(2021, time.June, 12, 13, 14, 15, 0 /* nanos */, time.UTC) + vtt := TimeToProto(now) + + utils.MustMatch(t, now, TimeFromProto(vtt)) + + vtt.Nanoseconds = 100 + utils.MustMatch(t, now.Add(100*time.Nanosecond), TimeFromProto(vtt)) + + vtt.Nanoseconds = 1e9 + utils.MustMatch(t, now.Add(time.Second), TimeFromProto(vtt)) + + assert.True(t, TimeFromProto(nil).IsZero(), "expected Go time from nil vttime to be Zero") +} + +func TestTimeToProto(t *testing.T) { + now := time.Date(2021, time.June, 12, 13, 14, 15, 0 /* nanos */, time.UTC) + secs := now.Unix() + utils.MustMatch(t, &vttime.Time{Seconds: secs}, TimeToProto(now)) + + // Testing secs/nanos conversions + utils.MustMatch(t, &vttime.Time{Seconds: secs, Nanoseconds: 100}, TimeToProto(now.Add(100*time.Nanosecond))) + utils.MustMatch(t, &vttime.Time{Seconds: secs + 1}, TimeToProto(now.Add(1e9*time.Nanosecond))) // this should rollover to a full second +} diff --git a/go/sqltypes/bind_variables.go b/go/sqltypes/bind_variables.go index 9d60c1c8e1a..9473f0efa73 100644 --- a/go/sqltypes/bind_variables.go +++ b/go/sqltypes/bind_variables.go @@ -22,7 +22,7 @@ import ( "fmt" "strconv" - "github.com/golang/protobuf/proto" + "google.golang.org/protobuf/proto" querypb "vitess.io/vitess/go/vt/proto/query" ) diff --git a/go/sqltypes/bind_variables_test.go b/go/sqltypes/bind_variables_test.go index b7e1275250c..fdf34755b6d 100644 --- a/go/sqltypes/bind_variables_test.go +++ b/go/sqltypes/bind_variables_test.go @@ -22,10 +22,8 @@ import ( "strings" "testing" - "vitess.io/vitess/go/test/utils" - - "github.com/golang/protobuf/proto" "github.com/stretchr/testify/require" + "google.golang.org/protobuf/proto" querypb "vitess.io/vitess/go/vt/proto/query" ) @@ -250,7 +248,7 @@ func TestBuildBindVariable(t *testing.T) { if tcase.err != "" { require.EqualError(t, err, tcase.err) } else { - utils.MustMatch(t, tcase.out, bv, "binvar output did not match") + require.Truef(t, proto.Equal(tcase.out, bv), "binvar output did not match") } }) } @@ -587,8 +585,7 @@ func TestBindVariablesFormat(t *testing.T) { if !strings.Contains(formattedStr, "key_3") || !strings.Contains(formattedStr, "val_3") { t.Fatalf("bind variable 'key_3': 'val_3' is not formatted") } - if !strings.Contains(formattedStr, "key_4") || - !strings.Contains(formattedStr, "values: values:") { + if !strings.Contains(formattedStr, "key_4:type:TUPLE") { t.Fatalf("bind variable 'key_4': (1, 2) is not formatted") } diff --git a/go/sqltypes/event_token_test.go b/go/sqltypes/event_token_test.go index 84465fa300d..80cfdc35404 100644 --- a/go/sqltypes/event_token_test.go +++ b/go/sqltypes/event_token_test.go @@ -19,7 +19,7 @@ package sqltypes import ( "testing" - "github.com/golang/protobuf/proto" + "google.golang.org/protobuf/proto" querypb "vitess.io/vitess/go/vt/proto/query" ) diff --git a/go/sqltypes/proto3.go b/go/sqltypes/proto3.go index 284e27fadbd..4b2c9869e78 100644 --- a/go/sqltypes/proto3.go +++ b/go/sqltypes/proto3.go @@ -17,7 +17,7 @@ limitations under the License. package sqltypes import ( - "github.com/golang/protobuf/proto" + "google.golang.org/protobuf/proto" "vitess.io/vitess/go/vt/vterrors" @@ -30,7 +30,17 @@ import ( // RowToProto3 converts []Value to proto3. func RowToProto3(row []Value) *querypb.Row { result := &querypb.Row{} - result.Lengths = make([]int64, 0, len(row)) + _ = RowToProto3Inplace(row, result) + return result +} + +// RowToProto3Inplace converts []Value to proto3 and stores the conversion in the provided Row +func RowToProto3Inplace(row []Value, result *querypb.Row) int { + if result.Lengths == nil { + result.Lengths = make([]int64, 0, len(row)) + } else { + result.Lengths = result.Lengths[:0] + } total := 0 for _, c := range row { if c.IsNull() { @@ -41,14 +51,18 @@ func RowToProto3(row []Value) *querypb.Row { result.Lengths = append(result.Lengths, int64(length)) total += length } - result.Values = make([]byte, 0, total) + if result.Values == nil { + result.Values = make([]byte, 0, total) + } else { + result.Values = result.Values[:0] + } for _, c := range row { if c.IsNull() { continue } result.Values = append(result.Values, c.Raw()...) } - return result + return total } // RowsToProto3 converts [][]Value to proto3. diff --git a/go/sqltypes/proto3_test.go b/go/sqltypes/proto3_test.go index 6b5701eed62..efa7b10a74b 100644 --- a/go/sqltypes/proto3_test.go +++ b/go/sqltypes/proto3_test.go @@ -19,8 +19,8 @@ package sqltypes import ( "testing" - "github.com/golang/protobuf/proto" "github.com/stretchr/testify/require" + "google.golang.org/protobuf/proto" querypb "vitess.io/vitess/go/vt/proto/query" vtrpcpb "vitess.io/vitess/go/vt/proto/vtrpc" diff --git a/go/sqltypes/result.go b/go/sqltypes/result.go index 7df1ef2aebf..4b555d0f691 100644 --- a/go/sqltypes/result.go +++ b/go/sqltypes/result.go @@ -19,7 +19,7 @@ package sqltypes import ( "reflect" - "github.com/golang/protobuf/proto" + "google.golang.org/protobuf/proto" querypb "vitess.io/vitess/go/vt/proto/query" ) @@ -91,13 +91,10 @@ func (result *Result) Copy() *Result { RowsAffected: result.RowsAffected, } if result.Fields != nil { - fieldsp := make([]*querypb.Field, len(result.Fields)) - fields := make([]querypb.Field, len(result.Fields)) + out.Fields = make([]*querypb.Field, len(result.Fields)) for i, f := range result.Fields { - fields[i] = *f - fieldsp[i] = &fields[i] + out.Fields[i] = proto.Clone(f).(*querypb.Field) } - out.Fields = fieldsp } if result.Rows != nil { out.Rows = make([][]Value, 0, len(result.Rows)) diff --git a/go/sqltypes/result_test.go b/go/sqltypes/result_test.go index 5f6a7e014fa..f14b6e15629 100644 --- a/go/sqltypes/result_test.go +++ b/go/sqltypes/result_test.go @@ -20,6 +20,8 @@ import ( "reflect" "testing" + "vitess.io/vitess/go/test/utils" + querypb "vitess.io/vitess/go/vt/proto/query" ) @@ -118,9 +120,7 @@ func TestCopy(t *testing.T) { }, } out := in.Copy() - if !reflect.DeepEqual(out, in) { - t.Errorf("Copy:\n%v, want\n%v", out, in) - } + utils.MustMatch(t, in, out) } func TestTruncate(t *testing.T) { @@ -346,9 +346,7 @@ func TestStripMetaData(t *testing.T) { } } // check we didn't change the original result. - if !reflect.DeepEqual(tcase.in, inCopy) { - t.Error("StripMetaData modified original result") - } + utils.MustMatch(t, tcase.in, inCopy) } } diff --git a/go/sqltypes/testing.go b/go/sqltypes/testing.go index 54cfdcedef8..3a82dafe3d2 100644 --- a/go/sqltypes/testing.go +++ b/go/sqltypes/testing.go @@ -145,9 +145,5 @@ func PrintResults(results []*Result) string { } func split(str string) []string { - splits := strings.Split(str, "|") - for i, v := range splits { - splits[i] = strings.TrimSpace(v) - } - return splits + return strings.Split(str, "|") } diff --git a/go/sqltypes/value.go b/go/sqltypes/value.go index 8b8bf84ad5c..491ee7d7059 100644 --- a/go/sqltypes/value.go +++ b/go/sqltypes/value.go @@ -284,6 +284,36 @@ func (v Value) EncodeSQL(b BinWriter) { } } +// EncodeSQLStringBuilder is identical to EncodeSQL but it takes a strings.Builder +// as its writer, so it can be inlined for performance. +func (v Value) EncodeSQLStringBuilder(b *strings.Builder) { + switch { + case v.typ == Null: + b.Write(nullstr) + case v.IsQuoted(): + encodeBytesSQLStringBuilder(v.val, b) + case v.typ == Bit: + encodeBytesSQLBits(v.val, b) + default: + b.Write(v.val) + } +} + +// EncodeSQLBytes2 is identical to EncodeSQL but it takes a bytes2.Buffer +// as its writer, so it can be inlined for performance. +func (v Value) EncodeSQLBytes2(b *bytes2.Buffer) { + switch { + case v.typ == Null: + b.Write(nullstr) + case v.IsQuoted(): + encodeBytesSQLBytes2(v.val, b) + case v.typ == Bit: + encodeBytesSQLBits(v.val, b) + default: + b.Write(v.val) + } +} + // EncodeASCII encodes the value using 7-bit clean ascii bytes. func (v Value) EncodeASCII(b BinWriter) { switch { @@ -387,6 +417,24 @@ func (v *Value) UnmarshalJSON(b []byte) error { func encodeBytesSQL(val []byte, b BinWriter) { buf := &bytes2.Buffer{} + encodeBytesSQLBytes2(val, buf) + b.Write(buf.Bytes()) +} + +func encodeBytesSQLBytes2(val []byte, buf *bytes2.Buffer) { + buf.WriteByte('\'') + for _, ch := range val { + if encodedChar := SQLEncodeMap[ch]; encodedChar == DontEscape { + buf.WriteByte(ch) + } else { + buf.WriteByte('\\') + buf.WriteByte(encodedChar) + } + } + buf.WriteByte('\'') +} + +func encodeBytesSQLStringBuilder(val []byte, buf *strings.Builder) { buf.WriteByte('\'') for _, ch := range val { if encodedChar := SQLEncodeMap[ch]; encodedChar == DontEscape { @@ -397,7 +445,6 @@ func encodeBytesSQL(val []byte, b BinWriter) { } } buf.WriteByte('\'') - b.Write(buf.Bytes()) } // BufEncodeStringSQL encodes the string into a strings.Builder diff --git a/go/stats/export.go b/go/stats/export.go index f14f4248293..1892f16ef0c 100644 --- a/go/stats/export.go +++ b/go/stats/export.go @@ -46,6 +46,9 @@ var statsBackend = flag.String("stats_backend", "", "The name of the registered var combineDimensions = flag.String("stats_combine_dimensions", "", `List of dimensions to be combined into a single "all" value in exported stats vars`) var dropVariables = flag.String("stats_drop_variables", "", `Variables to be dropped from the list of exported variables.`) +// CommonTags is a comma-separated list of common tags for stats backends +var CommonTags = flag.String("stats_common_tags", "", `Comma-separated list of common tags for the stats backend. It provides both label and values. Example: label1:value1,label2:value2`) + // StatsAllStr is the consolidated name if a dimension gets combined. const StatsAllStr = "all" @@ -317,3 +320,18 @@ func isVarDropped(name string) bool { } return droppedVars[name] } + +// ParseCommonTags parses a comma-separated string into map of tags +// If you want to global service values like host, service name, git revision, etc, +// this is the place to do it. +func ParseCommonTags(s string) map[string]string { + inputs := strings.Split(s, ",") + tags := make(map[string]string) + for _, input := range inputs { + if strings.Contains(input, ":") { + tag := strings.Split(input, ":") + tags[strings.TrimSpace(tag[0])] = strings.TrimSpace(tag[1]) + } + } + return tags +} diff --git a/go/stats/export_test.go b/go/stats/export_test.go index a02d5f8fd34..483d5a04d6a 100644 --- a/go/stats/export_test.go +++ b/go/stats/export_test.go @@ -18,6 +18,7 @@ package stats import ( "expvar" + "reflect" "testing" ) @@ -139,3 +140,20 @@ func TestStringMapToString(t *testing.T) { t.Errorf("expected %v or %v, got %v", expected1, expected2, got) } } + +func TestParseCommonTags(t *testing.T) { + res := ParseCommonTags("") + if len(res) != 0 { + t.Errorf("expected empty result, got %v", res) + } + res = ParseCommonTags("s,a:b ") + expected1 := map[string]string{"a": "b"} + if !reflect.DeepEqual(expected1, res) { + t.Errorf("expected %v, got %v", expected1, res) + } + res = ParseCommonTags("a:b, c:d") + expected2 := map[string]string{"a": "b", "c": "d"} + if !reflect.DeepEqual(expected2, res) { + t.Errorf("expected %v, got %v", expected2, res) + } +} diff --git a/go/stats/opentsdb/opentsdb.go b/go/stats/opentsdb/opentsdb.go index 8e67ee9bc8a..d0f63650396 100644 --- a/go/stats/opentsdb/opentsdb.go +++ b/go/stats/opentsdb/opentsdb.go @@ -87,30 +87,33 @@ type dataCollector struct { func Init(prefix string) { // Needs to happen in servenv.OnRun() instead of init because it requires flag parsing and logging servenv.OnRun(func() { - if *openTsdbURI == "" { - return - } + InitWithoutServenv(prefix) + }) +} - backend := &openTSDBBackend{ - prefix: prefix, - // If you want to global service values like host, service name, git revision, etc, - // this is the place to do it. - commonTags: map[string]string{}, - } +// InitWithoutServenv initializes the opentsdb without servenv +func InitWithoutServenv(prefix string) { + if *openTsdbURI == "" { + return + } - stats.RegisterPushBackend("opentsdb", backend) + backend := &openTSDBBackend{ + prefix: prefix, + commonTags: stats.ParseCommonTags(*stats.CommonTags), + } - http.HandleFunc("/debug/opentsdb", func(w http.ResponseWriter, r *http.Request) { - w.Header().Set("Content-Type", "application/json; charset=utf-8") - dataPoints := (*backend).getDataPoints() - sort.Sort(byMetric(dataPoints)) + stats.RegisterPushBackend("opentsdb", backend) - if b, err := json.MarshalIndent(dataPoints, "", " "); err != nil { - w.Write([]byte(err.Error())) - } else { - w.Write(b) - } - }) + http.HandleFunc("/debug/opentsdb", func(w http.ResponseWriter, r *http.Request) { + w.Header().Set("Content-Type", "application/json; charset=utf-8") + dataPoints := (*backend).getDataPoints() + sort.Sort(byMetric(dataPoints)) + + if b, err := json.MarshalIndent(dataPoints, "", " "); err != nil { + w.Write([]byte(err.Error())) + } else { + w.Write(b) + } }) } diff --git a/go/stats/statsd/statsd.go b/go/stats/statsd/statsd.go index aa54ffab506..dd7f5287fb1 100644 --- a/go/stats/statsd/statsd.go +++ b/go/stats/statsd/statsd.go @@ -46,33 +46,51 @@ func makeLabels(labelNames []string, labelValsCombined string) []string { return tags } +func makeCommonTags(tags map[string]string) []string { + var commonTags []string + for k, v := range tags { + commonTag := fmt.Sprintf("%s:%s", k, v) + commonTags = append(commonTags, commonTag) + } + return commonTags +} + // Init initializes the statsd with the given namespace. func Init(namespace string) { servenv.OnRun(func() { - if *statsdAddress == "" { - return - } - statsdC, err := statsd.NewBuffered(*statsdAddress, 100) - if err != nil { - log.Errorf("Failed to create statsd client %v", err) - return - } - statsdC.Namespace = namespace + "." - sb.namespace = namespace - sb.statsdClient = statsdC - sb.sampleRate = *statsdSampleRate - stats.RegisterPushBackend("statsd", sb) - stats.RegisterTimerHook(func(statsName, name string, value int64, timings *stats.Timings) { - tags := makeLabels(strings.Split(timings.Label(), "."), name) - if err := statsdC.TimeInMilliseconds(statsName, float64(value), tags, sb.sampleRate); err != nil { - log.Errorf("Fail to TimeInMilliseconds %v: %v", statsName, err) - } - }) - stats.RegisterHistogramHook(func(name string, val int64) { - if err := statsdC.Histogram(name, float64(val), []string{}, sb.sampleRate); err != nil { - log.Errorf("Fail to Histogram for %v: %v", name, err) - } - }) + InitWithoutServenv(namespace) + }) +} + +// InitWithoutServenv initializes the statsd using the namespace but without servenv +func InitWithoutServenv(namespace string) { + if *statsdAddress == "" { + log.Info("statsdAddress is empty") + return + } + statsdC, err := statsd.NewBuffered(*statsdAddress, 100) + if err != nil { + log.Errorf("Failed to create statsd client %v", err) + return + } + statsdC.Namespace = namespace + "." + if tags := stats.ParseCommonTags(*stats.CommonTags); len(tags) > 0 { + statsdC.Tags = makeCommonTags(tags) + } + sb.namespace = namespace + sb.statsdClient = statsdC + sb.sampleRate = *statsdSampleRate + stats.RegisterPushBackend("statsd", sb) + stats.RegisterTimerHook(func(statsName, name string, value int64, timings *stats.Timings) { + tags := makeLabels(strings.Split(timings.Label(), "."), name) + if err := statsdC.TimeInMilliseconds(statsName, float64(value), tags, sb.sampleRate); err != nil { + log.Errorf("Fail to TimeInMilliseconds %v: %v", statsName, err) + } + }) + stats.RegisterHistogramHook(func(statsName string, val int64) { + if err := statsdC.Histogram(statsName, float64(val), []string{}, sb.sampleRate); err != nil { + log.Errorf("Fail to Histogram for %v: %v", statsName, err) + } }) } @@ -181,7 +199,8 @@ func (sb StatsBackend) addExpVar(kv expvar.KeyValue) { } } } - case *stats.Rates, *stats.RatesFunc, *stats.String, *stats.StringFunc, *stats.StringMapFunc: + case *stats.Rates, *stats.RatesFunc, *stats.String, *stats.StringFunc, *stats.StringMapFunc, + stats.StringFunc, stats.StringMapFunc: // Silently ignore metrics that does not make sense to be exported to statsd default: log.Warningf("Silently ignore metrics with key %v [%T]", k, kv.Value) diff --git a/go/stats/statsd/statsd_test.go b/go/stats/statsd/statsd_test.go index a7ebce3769d..081f126240a 100644 --- a/go/stats/statsd/statsd_test.go +++ b/go/stats/statsd/statsd_test.go @@ -9,7 +9,7 @@ import ( "time" "github.com/DataDog/datadog-go/statsd" - "gotest.tools/assert" + "github.com/stretchr/testify/assert" "vitess.io/vitess/go/stats" ) @@ -446,3 +446,11 @@ func TestStatsdHistogram(t *testing.T) { t.Errorf("Stat %s not found...", name) } } + +func TestMakeCommonTags(t *testing.T) { + res1 := makeCommonTags(map[string]string{}) + assert.Equal(t, 0, len(res1)) + expected2 := []string{"a:b", "c:d"} + res2 := makeCommonTags(map[string]string{"a": "b", "c": "d"}) + assert.ElementsMatch(t, expected2, res2) +} diff --git a/go/sync2/atomic.go b/go/sync2/atomic.go index 2d8a532aac7..e974eae3960 100644 --- a/go/sync2/atomic.go +++ b/go/sync2/atomic.go @@ -17,6 +17,7 @@ limitations under the License. package sync2 import ( + "math" "sync" "sync/atomic" "time" @@ -82,6 +83,31 @@ func (i *AtomicInt64) CompareAndSwap(oldval, newval int64) (swapped bool) { return atomic.CompareAndSwapInt64(&i.int64, oldval, newval) } +// AtomicFloat64 is a wrapper with a simpler interface around atomic.(Add|Store|Load|CompareAndSwap)Flat64 functions. +type AtomicFloat64 struct { + uint64 +} + +// NewAtomicFloat64 initializes a new AtomicFloat64 with a given value. +func NewAtomicFloat64(n float64) AtomicFloat64 { + return AtomicFloat64{math.Float64bits(n)} +} + +// Set atomically sets n as new value. +func (f *AtomicFloat64) Set(n float64) { + atomic.StoreUint64(&f.uint64, math.Float64bits(n)) +} + +// Get atomically returns the current value. +func (f *AtomicFloat64) Get() float64 { + return math.Float64frombits(atomic.LoadUint64(&f.uint64)) +} + +// CompareAndSwap automatically swaps the old with the new value. +func (f *AtomicFloat64) CompareAndSwap(oldval, newval float64) (swapped bool) { + return atomic.CompareAndSwapUint64(&f.uint64, math.Float64bits(oldval), math.Float64bits(newval)) +} + // AtomicDuration is a wrapper with a simpler interface around atomic.(Add|Store|Load|CompareAndSwap)Int64 functions. type AtomicDuration struct { int64 diff --git a/go/sync2/atomic_test.go b/go/sync2/atomic_test.go index 4590c5b92ee..2754c7bef85 100644 --- a/go/sync2/atomic_test.go +++ b/go/sync2/atomic_test.go @@ -57,6 +57,24 @@ func TestAtomicInt64(t *testing.T) { assert.Equal(t, int64(4), i.Get()) } +func TestAtomicFloat64(t *testing.T) { + i := NewAtomicFloat64(1.0) + assert.Equal(t, float64(1.0), i.Get()) + + i.Set(2.0) + assert.Equal(t, float64(2.0), i.Get()) + { + swapped := i.CompareAndSwap(2.0, 4.0) + assert.Equal(t, float64(4), i.Get()) + assert.Equal(t, true, swapped) + } + { + swapped := i.CompareAndSwap(2.0, 5.0) + assert.Equal(t, float64(4), i.Get()) + assert.Equal(t, false, swapped) + } +} + func TestAtomicDuration(t *testing.T) { d := NewAtomicDuration(time.Second) assert.Equal(t, time.Second, d.Get()) diff --git a/go/sync2/norace.go b/go/sync2/norace.go new file mode 100644 index 00000000000..4a97ec1b442 --- /dev/null +++ b/go/sync2/norace.go @@ -0,0 +1,6 @@ +// +build !race + +package sync2 + +// Race reports if the race detector is enabled. +const Race = false diff --git a/go/sync2/race.go b/go/sync2/race.go new file mode 100644 index 00000000000..b640e121e4a --- /dev/null +++ b/go/sync2/race.go @@ -0,0 +1,6 @@ +// +build race + +package sync2 + +// Race reports if the race detector is enabled. +const Race = true diff --git a/go/test/endtoend/backup/vtctlbackup/backup_utils.go b/go/test/endtoend/backup/vtctlbackup/backup_utils.go index 7ca79b2b896..896e6e4038d 100644 --- a/go/test/endtoend/backup/vtctlbackup/backup_utils.go +++ b/go/test/endtoend/backup/vtctlbackup/backup_utils.go @@ -392,6 +392,11 @@ func testRestoreOldMaster(t *testing.T, method restoreMethod) { // insert data on master, wait for replica to get it verifyInitialReplication(t) + // TODO: The following Sleep in introduced as it seems like the previous step doesn't fully complete, causing + // this test to be flaky. Sleep seems to solve the problem. Need to fix this in a better way and Wait for + // previous test to complete (suspicion: MySQL does not fully start) + time.Sleep(5 * time.Second) + // backup the replica err := localCluster.VtctlclientProcess.ExecuteCommand("Backup", replica1.Alias) require.Nil(t, err) @@ -488,6 +493,11 @@ func terminatedRestore(t *testing.T) { // insert data on master, wait for replica to get it verifyInitialReplication(t) + // TODO: The following Sleep in introduced as it seems like the previous step doesn't fully complete, causing + // this test to be flaky. Sleep seems to solve the problem. Need to fix this in a better way and Wait for + // previous test to complete (suspicion: MySQL does not fully start) + time.Sleep(5 * time.Second) + // backup the replica err := localCluster.VtctlclientProcess.ExecuteCommand("Backup", replica1.Alias) require.Nil(t, err) diff --git a/go/test/endtoend/cluster/cluster_process.go b/go/test/endtoend/cluster/cluster_process.go index c92de019d90..f0e1a9a8380 100644 --- a/go/test/endtoend/cluster/cluster_process.go +++ b/go/test/endtoend/cluster/cluster_process.go @@ -48,6 +48,9 @@ var ( forceVTDATAROOT = flag.String("force-vtdataroot", "", "force path for VTDATAROOT, which may already be populated") forcePortStart = flag.Int("force-port-start", 0, "force assigning ports based on this seed") forceBaseTabletUID = flag.Int("force-base-tablet-uid", 0, "force assigning tablet ports based on this seed") + + // PerfTest controls whether to run the slower end-to-end tests that check the system's performance + PerfTest = flag.Bool("perf-test", false, "include end-to-end performance tests") ) // LocalProcessCluster Testcases need to use this to iniate a cluster diff --git a/go/test/endtoend/cluster/cluster_util.go b/go/test/endtoend/cluster/cluster_util.go index dd9790227aa..f1d9ea0ca04 100644 --- a/go/test/endtoend/cluster/cluster_util.go +++ b/go/test/endtoend/cluster/cluster_util.go @@ -59,6 +59,18 @@ func (tablet *Vttablet) Restart() error { return tablet.MysqlctldProcess.Start() } +// RestartOnlyTablet restarts vttablet, but not the underlying mysql instance +func (tablet *Vttablet) RestartOnlyTablet() error { + err := tablet.VttabletProcess.TearDown() + if err != nil { + return err + } + + tablet.VttabletProcess.ServingStatus = "SERVING" + + return tablet.VttabletProcess.Setup() +} + // ValidateTabletRestart restarts the tablet and validate error if there is any. func (tablet *Vttablet) ValidateTabletRestart(t *testing.T) { require.Nilf(t, tablet.Restart(), "tablet restart failed") diff --git a/go/test/endtoend/cluster/topo_process.go b/go/test/endtoend/cluster/topo_process.go index 01c90906482..0378b915330 100644 --- a/go/test/endtoend/cluster/topo_process.go +++ b/go/test/endtoend/cluster/topo_process.go @@ -17,6 +17,7 @@ limitations under the License. package cluster import ( + "encoding/json" "fmt" "io/ioutil" "net/http" @@ -89,7 +90,7 @@ func (topo *TopoProcess) SetupEtcd() (err error) { topo.proc.Env = append(topo.proc.Env, os.Environ()...) - log.Errorf("Starting etcd with command: %v", strings.Join(topo.proc.Args, " ")) + log.Infof("Starting etcd with command: %v", strings.Join(topo.proc.Args, " ")) err = topo.proc.Start() if err != nil { @@ -147,25 +148,65 @@ func (topo *TopoProcess) SetupZookeeper(cluster *LocalProcessCluster) (err error return } +// ConsulConfigs are the configurations that are added the config files which are used by consul +type ConsulConfigs struct { + Ports PortsInfo `json:"ports"` + DataDir string `json:"data_dir"` + LogFile string `json:"log_file"` +} + +// PortsInfo is the different ports used by consul +type PortsInfo struct { + DNS int `json:"dns"` + HTTP int `json:"http"` + SerfLan int `json:"serf_lan"` + SerfWan int `json:"serf_wan"` + Server int `json:"server"` +} + // SetupConsul spawns a new consul service and initializes it with the defaults. // The service is kept running in the background until TearDown() is called. func (topo *TopoProcess) SetupConsul(cluster *LocalProcessCluster) (err error) { topo.VerifyURL = fmt.Sprintf("http://%s:%d/v1/kv/?keys", topo.Host, topo.Port) + _ = os.MkdirAll(topo.LogDirectory, os.ModePerm) + _ = os.MkdirAll(topo.DataDirectory, os.ModePerm) + configFile := path.Join(os.Getenv("VTDATAROOT"), "consul.json") - config := fmt.Sprintf(`{"ports":{"dns":%d,"http":%d,"serf_lan":%d,"serf_wan":%d}}`, - cluster.GetAndReservePort(), topo.Port, cluster.GetAndReservePort(), cluster.GetAndReservePort()) + logFile := path.Join(topo.LogDirectory, "/consul.log") + _, _ = os.Create(logFile) + + var config []byte + configs := ConsulConfigs{ + Ports: PortsInfo{ + DNS: cluster.GetAndReservePort(), + HTTP: topo.Port, + SerfLan: cluster.GetAndReservePort(), + SerfWan: cluster.GetAndReservePort(), + Server: cluster.GetAndReservePort(), + }, + DataDir: topo.DataDirectory, + LogFile: logFile, + } + config, err = json.Marshal(configs) + if err != nil { + log.Error(err.Error()) + return + } - err = ioutil.WriteFile(configFile, []byte(config), 0666) + err = ioutil.WriteFile(configFile, config, 0666) if err != nil { return } topo.proc = exec.Command( topo.Binary, "agent", - "-dev", + "-server", + "-ui", + "-bootstrap-expect", "1", + "-bind", "127.0.0.1", "-config-file", configFile, ) @@ -174,7 +215,7 @@ func (topo *TopoProcess) SetupConsul(cluster *LocalProcessCluster) (err error) { topo.proc.Env = append(topo.proc.Env, os.Environ()...) - log.Infof("Starting consul with args %v", strings.Join(topo.proc.Args, " ")) + log.Errorf("Starting consul with args %v", strings.Join(topo.proc.Args, " ")) err = topo.proc.Start() if err != nil { return @@ -225,7 +266,9 @@ func (topo *TopoProcess) TearDown(Cell string, originalVtRoot string, currentRoo return nil } - topo.removeTopoDirectories(Cell) + if !(*keepData || keepdata) { + topo.removeTopoDirectories(Cell) + } // Attempt graceful shutdown with SIGTERM first _ = topo.proc.Process.Signal(syscall.SIGTERM) @@ -233,8 +276,8 @@ func (topo *TopoProcess) TearDown(Cell string, originalVtRoot string, currentRoo if !(*keepData || keepdata) { _ = os.RemoveAll(topo.DataDirectory) _ = os.RemoveAll(currentRoot) + _ = os.Setenv("VTDATAROOT", originalVtRoot) } - _ = os.Setenv("VTDATAROOT", originalVtRoot) select { case <-topo.exit: diff --git a/go/test/endtoend/cluster/vtctl_process.go b/go/test/endtoend/cluster/vtctl_process.go index b2dfe753f37..b27cdea9911 100644 --- a/go/test/endtoend/cluster/vtctl_process.go +++ b/go/test/endtoend/cluster/vtctl_process.go @@ -58,19 +58,11 @@ func (vtctl *VtctlProcess) AddCellInfo(Cell string) (err error) { // CreateKeyspace executes vtctl command to create keyspace func (vtctl *VtctlProcess) CreateKeyspace(keyspace string) (err error) { - tmpProcess := exec.Command( - vtctl.Binary, - "-topo_implementation", vtctl.TopoImplementation, - "-topo_global_server_address", vtctl.TopoGlobalAddress, - "-topo_global_root", vtctl.TopoGlobalRoot, - ) - if *isCoverage { - tmpProcess.Args = append(tmpProcess.Args, "-test.coverprofile="+getCoveragePath("vtctl-create-ks.out")) + output, err := vtctl.ExecuteCommandWithOutput("CreateKeyspace", keyspace) + if err != nil { + log.Errorf("CreateKeyspace returned err: %s, output: %s", err, output) } - tmpProcess.Args = append(tmpProcess.Args, - "CreateKeyspace", keyspace) - log.Infof("Running CreateKeyspace with command: %v", strings.Join(tmpProcess.Args, " ")) - return tmpProcess.Run() + return err } // ExecuteCommandWithOutput executes any vtctlclient command and returns output diff --git a/go/test/endtoend/cluster/vtctlclient_process.go b/go/test/endtoend/cluster/vtctlclient_process.go index 99b68b7cba4..beaa704d5ef 100644 --- a/go/test/endtoend/cluster/vtctlclient_process.go +++ b/go/test/endtoend/cluster/vtctlclient_process.go @@ -36,6 +36,12 @@ type VtctlClientProcess struct { ZoneName string } +// VtctlClientParams encapsulated params to provide if non-default +type VtctlClientParams struct { + DDLStrategy string + SkipPreflight bool +} + // InitShardMaster executes vtctlclient command to make one of tablet as master func (vtctlclient *VtctlClientProcess) InitShardMaster(Keyspace string, Shard string, Cell string, TabletUID int) (err error) { output, err := vtctlclient.ExecuteCommandWithOutput( @@ -50,13 +56,16 @@ func (vtctlclient *VtctlClientProcess) InitShardMaster(Keyspace string, Shard st } // ApplySchemaWithOutput applies SQL schema to the keyspace -func (vtctlclient *VtctlClientProcess) ApplySchemaWithOutput(Keyspace string, SQL string, ddlStrategy string) (result string, err error) { +func (vtctlclient *VtctlClientProcess) ApplySchemaWithOutput(Keyspace string, SQL string, params VtctlClientParams) (result string, err error) { args := []string{ "ApplySchema", "-sql", SQL, } - if ddlStrategy != "" { - args = append(args, "-ddl_strategy", ddlStrategy) + if params.DDLStrategy != "" { + args = append(args, "-ddl_strategy", params.DDLStrategy) + } + if params.SkipPreflight { + args = append(args, "-skip_preflight") } args = append(args, Keyspace) return vtctlclient.ExecuteCommandWithOutput(args...) @@ -64,7 +73,7 @@ func (vtctlclient *VtctlClientProcess) ApplySchemaWithOutput(Keyspace string, SQ // ApplySchema applies SQL schema to the keyspace func (vtctlclient *VtctlClientProcess) ApplySchema(Keyspace string, SQL string) error { - message, err := vtctlclient.ApplySchemaWithOutput(Keyspace, SQL, "direct") + message, err := vtctlclient.ApplySchemaWithOutput(Keyspace, SQL, VtctlClientParams{DDLStrategy: "direct"}) return vterrors.Wrap(err, message) } diff --git a/go/test/endtoend/cluster/vtgate_process.go b/go/test/endtoend/cluster/vtgate_process.go index ed2c287304d..c7fc39a77d5 100644 --- a/go/test/endtoend/cluster/vtgate_process.go +++ b/go/test/endtoend/cluster/vtgate_process.go @@ -203,7 +203,7 @@ func (vtgate *VtgateProcess) TearDown() error { vtgate.proc = nil return nil - case <-time.After(10 * time.Second): + case <-time.After(30 * time.Second): vtgate.proc.Process.Kill() vtgate.proc = nil return <-vtgate.exit diff --git a/go/test/endtoend/cluster/vttablet_process.go b/go/test/endtoend/cluster/vttablet_process.go index b6335d97762..967aeadfcb9 100644 --- a/go/test/endtoend/cluster/vttablet_process.go +++ b/go/test/endtoend/cluster/vttablet_process.go @@ -18,9 +18,11 @@ limitations under the License. package cluster import ( + "bufio" "context" "encoding/json" "fmt" + "io" "io/ioutil" "net/http" "os" @@ -29,6 +31,7 @@ import ( "reflect" "strings" "syscall" + "testing" "time" "vitess.io/vitess/go/mysql" @@ -104,6 +107,9 @@ func (vttablet *VttabletProcess) Setup() (err error) { if *isCoverage { vttablet.proc.Args = append(vttablet.proc.Args, "-test.coverprofile="+getCoveragePath("vttablet.out")) } + if *PerfTest { + vttablet.proc.Args = append(vttablet.proc.Args, "-pprof", fmt.Sprintf("cpu,waitSig,path=vttablet_pprof_%s", vttablet.Name)) + } if vttablet.SupportsBackup { vttablet.proc.Args = append(vttablet.proc.Args, "-restore_from_backup") @@ -113,8 +119,8 @@ func (vttablet *VttabletProcess) Setup() (err error) { } vttablet.proc.Args = append(vttablet.proc.Args, vttablet.ExtraArgs...) - - errFile, _ := os.Create(path.Join(vttablet.LogDir, vttablet.TabletPath+"-vttablet-stderr.txt")) + fname := path.Join(vttablet.LogDir, vttablet.TabletPath+"-vttablet-stderr.txt") + errFile, _ := os.Create(fname) vttablet.proc.Stderr = errFile vttablet.proc.Env = append(vttablet.proc.Env, os.Environ()...) @@ -135,6 +141,10 @@ func (vttablet *VttabletProcess) Setup() (err error) { if vttablet.ServingStatus != "" { if err = vttablet.WaitForTabletType(vttablet.ServingStatus); err != nil { + errFileContent, _ := ioutil.ReadFile(fname) + if errFileContent != nil { + log.Infof("vttablet error:\n%s\n", string(errFileContent)) + } return fmt.Errorf("process '%s' timed out after 10s (err: %s)", vttablet.Name, err) } } @@ -295,8 +305,11 @@ func (vttablet *VttabletProcess) TearDown() error { return nil case <-time.After(10 * time.Second): - vttablet.proc.Process.Kill() - vttablet.proc = nil + proc := vttablet.proc + if proc != nil { + vttablet.proc.Process.Kill() + vttablet.proc = nil + } return <-vttablet.exit } } @@ -314,11 +327,15 @@ func (vttablet *VttabletProcess) QueryTablet(query string, keyspace string, useD keyspace = "" } dbParams := NewConnParams(vttablet.DbPort, vttablet.DbPassword, path.Join(vttablet.Directory, "mysql.sock"), keyspace) - return executeQuery(dbParams, query) + conn, err := vttablet.conn(&dbParams) + if err != nil { + return nil, err + } + defer conn.Close() + return executeQuery(conn, query) } -// QueryTabletWithDB lets you execute query on a specific DB in this tablet and get the result -func (vttablet *VttabletProcess) QueryTabletWithDB(query string, dbname string) (*sqltypes.Result, error) { +func (vttablet *VttabletProcess) defaultConn(dbname string) (*mysql.Conn, error) { dbParams := mysql.ConnParams{ Uname: "vt_dba", UnixSocket: path.Join(vttablet.Directory, "mysql.sock"), @@ -327,18 +344,26 @@ func (vttablet *VttabletProcess) QueryTabletWithDB(query string, dbname string) if vttablet.DbPassword != "" { dbParams.Pass = vttablet.DbPassword } - return executeQuery(dbParams, query) + return vttablet.conn(&dbParams) } -func executeQuery(dbParams mysql.ConnParams, query string) (*sqltypes.Result, error) { +func (vttablet *VttabletProcess) conn(dbParams *mysql.ConnParams) (*mysql.Conn, error) { ctx := context.Background() - dbConn, err := mysql.Connect(ctx, &dbParams) + return mysql.Connect(ctx, dbParams) +} + +// QueryTabletWithDB lets you execute query on a specific DB in this tablet and get the result +func (vttablet *VttabletProcess) QueryTabletWithDB(query string, dbname string) (*sqltypes.Result, error) { + conn, err := vttablet.defaultConn(dbname) if err != nil { return nil, err } - defer dbConn.Close() - qr, err := dbConn.ExecuteFetch(query, 10000, true) - return qr, err + defer conn.Close() + return executeQuery(conn, query) +} + +func executeQuery(dbConn *mysql.Conn, query string) (*sqltypes.Result, error) { + return dbConn.ExecuteFetch(query, 10000, true) } // GetDBVar returns first matching database variable's value @@ -362,6 +387,91 @@ func (vttablet *VttabletProcess) getDBSystemValues(placeholder string, value str return "", nil } +// ToggleProfiling enables or disables the configured CPU profiler on this vttablet +func (vttablet *VttabletProcess) ToggleProfiling() error { + return vttablet.proc.Process.Signal(syscall.SIGUSR1) +} + +// WaitForVReplicationToCatchup waits for "workflow" to finish copying +func (vttablet *VttabletProcess) WaitForVReplicationToCatchup(t testing.TB, workflow, database string, duration time.Duration) { + queries := [3]string{ + fmt.Sprintf(`select count(*) from _vt.vreplication where workflow = "%s" and db_name = "%s" and pos = ''`, workflow, database), + "select count(*) from information_schema.tables where table_schema='_vt' and table_name='copy_state' limit 1;", + fmt.Sprintf(`select count(*) from _vt.copy_state where vrepl_id in (select id from _vt.vreplication where workflow = "%s" and db_name = "%s" )`, workflow, database), + } + results := [3]string{"[INT64(0)]", "[INT64(1)]", "[INT64(0)]"} + + conn, err := vttablet.defaultConn("") + if err != nil { + t.Fatal(err) + } + defer conn.Close() + + var lastChecked time.Time + for ind, query := range queries { + waitDuration := 500 * time.Millisecond + for duration > 0 { + log.Infof("Executing query %s on %s", query, vttablet.Name) + lastChecked = time.Now() + qr, err := conn.ExecuteFetch(query, 1000, true) + if err != nil { + t.Fatal(err) + } + if qr != nil && qr.Rows != nil && len(qr.Rows) > 0 && fmt.Sprintf("%v", qr.Rows[0]) == string(results[ind]) { + break + } else { + log.Infof("In WaitForVReplicationToCatchup: %s %+v", query, qr.Rows) + } + time.Sleep(waitDuration) + duration -= waitDuration + } + if duration <= 0 { + t.Fatalf("WaitForVReplicationToCatchup timed out for workflow %s, keyspace %s", workflow, database) + } + } + log.Infof("WaitForVReplicationToCatchup succeeded at %v", lastChecked) +} + +// BulkLoad performs a bulk load of rows into a given vttablet. +func (vttablet *VttabletProcess) BulkLoad(t testing.TB, db, table string, bulkInsert func(io.Writer)) { + tmpbulk, err := ioutil.TempFile(path.Join(vttablet.Directory, "tmp"), "bulk_load") + if err != nil { + t.Fatalf("failed to create tmp file for loading: %v", err) + } + defer os.Remove(tmpbulk.Name()) + + log.Infof("create temporary file for bulk loading %q", tmpbulk.Name()) + bufStart := time.Now() + + bulkBuffer := bufio.NewWriter(tmpbulk) + bulkInsert(bulkBuffer) + bulkBuffer.Flush() + + pos, _ := tmpbulk.Seek(0, 1) + bufFinish := time.Now() + log.Infof("bulk loading %d bytes from %q...", pos, tmpbulk.Name()) + + if err := tmpbulk.Close(); err != nil { + t.Fatal(err) + } + + conn, err := vttablet.defaultConn("vt_" + db) + if err != nil { + t.Fatal(err) + } + defer conn.Close() + + query := fmt.Sprintf("LOAD DATA INFILE '%s' INTO TABLE `%s` FIELDS TERMINATED BY ',' ENCLOSED BY '\"'", tmpbulk.Name(), table) + _, err = conn.ExecuteFetch(query, 1, false) + if err != nil { + t.Fatal(err) + } + + end := time.Now() + log.Infof("bulk insert successful (write tmp file = %v, mysql bulk load = %v, total = %v", + bufFinish.Sub(bufStart), end.Sub(bufFinish), end.Sub(bufStart)) +} + // VttabletProcessInstance returns a VttabletProcess handle for vttablet process // configured with the given Config. // The process must be manually started by calling setup() diff --git a/go/test/endtoend/docker/vttestserver.go b/go/test/endtoend/docker/vttestserver.go new file mode 100644 index 00000000000..e6bc93d4e6c --- /dev/null +++ b/go/test/endtoend/docker/vttestserver.go @@ -0,0 +1,165 @@ +/* +Copyright 2021 The Vitess Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package docker + +import ( + "encoding/json" + "fmt" + "os" + "os/exec" + "path" + "strconv" + "strings" + "time" + + "vitess.io/vitess/go/vt/log" +) + +const ( + vttestserverMysql57image = "vttestserver-e2etest/mysql57" + vttestserverMysql80image = "vttestserver-e2etest/mysql80" +) + +type vttestserver struct { + dockerImage string + keyspaces []string + numShards []int + mysqlMaxConnecetions int + port int +} + +func newVttestserver(dockerImage string, keyspaces []string, numShards []int, mysqlMaxConnections, port int) *vttestserver { + return &vttestserver{ + dockerImage: dockerImage, + keyspaces: keyspaces, + numShards: numShards, + mysqlMaxConnecetions: mysqlMaxConnections, + port: port, + } +} + +func (v *vttestserver) teardown() { + cmd := exec.Command("docker", "rm", "--force", "vttestserver-end2end-test") + err := cmd.Run() + if err != nil { + log.Errorf("docker teardown failed :- %s", err.Error()) + } +} + +// startDockerImage starts the docker image for the vttestserver +func (v *vttestserver) startDockerImage() error { + cmd := exec.Command("docker", "run") + cmd.Args = append(cmd.Args, "--name=vttestserver-end2end-test") + cmd.Args = append(cmd.Args, "-p", fmt.Sprintf("%d:33577", v.port)) + cmd.Args = append(cmd.Args, "-e", "PORT=33574") + cmd.Args = append(cmd.Args, "-e", fmt.Sprintf("KEYSPACES=%s", strings.Join(v.keyspaces, ","))) + cmd.Args = append(cmd.Args, "-e", fmt.Sprintf("NUM_SHARDS=%s", strings.Join(convertToStringSlice(v.numShards), ","))) + cmd.Args = append(cmd.Args, "-e", "MYSQL_BIND_HOST=0.0.0.0") + cmd.Args = append(cmd.Args, "-e", fmt.Sprintf("MYSQL_MAX_CONNECTIONS=%d", v.mysqlMaxConnecetions)) + cmd.Args = append(cmd.Args, "--health-cmd", "mysqladmin ping -h127.0.0.1 -P33577") + cmd.Args = append(cmd.Args, "--health-interval=5s") + cmd.Args = append(cmd.Args, "--health-timeout=2s") + cmd.Args = append(cmd.Args, "--health-retries=5") + cmd.Args = append(cmd.Args, v.dockerImage) + + err := cmd.Start() + if err != nil { + return err + } + return nil +} + +// dockerStatus is a struct used to unmarshal json output from `docker inspect` +type dockerStatus struct { + State struct { + Health struct { + Status string + } + } +} + +// waitUntilDockerHealthy waits until the docker image is healthy. It takes in as argument the amount of seconds to wait before timeout +func (v *vttestserver) waitUntilDockerHealthy(timeoutDelay int) error { + timeOut := time.After(time.Duration(timeoutDelay) * time.Second) + + for { + select { + case <-timeOut: + // return error due to timeout + return fmt.Errorf("timed out waiting for docker image to start") + case <-time.After(time.Second): + cmd := exec.Command("docker", "inspect", "vttestserver-end2end-test") + out, err := cmd.Output() + if err != nil { + return err + } + var x []dockerStatus + err = json.Unmarshal(out, &x) + if err != nil { + return err + } + if len(x) > 0 { + status := x[0].State.Health.Status + if status == "healthy" { + return nil + } + } + } + } +} + +// convertToStringSlice converts an integer slice to string slice +func convertToStringSlice(intSlice []int) []string { + var stringSlice []string + for _, val := range intSlice { + str := strconv.Itoa(val) + stringSlice = append(stringSlice, str) + } + return stringSlice +} + +//makeVttestserverDockerImages creates the vttestserver docker images for both MySQL57 and MySQL80 +func makeVttestserverDockerImages() error { + mainVitessPath := path.Join(os.Getenv("PWD"), "../../../..") + dockerFilePath := path.Join(mainVitessPath, "docker/vttestserver/Dockerfile.mysql57") + cmd57 := exec.Command("docker", "build", "-f", dockerFilePath, "-t", vttestserverMysql57image, ".") + cmd57.Dir = mainVitessPath + err := cmd57.Start() + if err != nil { + return err + } + + dockerFilePath = path.Join(mainVitessPath, "docker/vttestserver/Dockerfile.mysql80") + cmd80 := exec.Command("docker", "build", "-f", dockerFilePath, "-t", vttestserverMysql80image, ".") + cmd80.Dir = mainVitessPath + err = cmd80.Start() + if err != nil { + return err + } + + err = cmd57.Wait() + if err != nil { + return err + } + + err = cmd80.Wait() + if err != nil { + return err + } + + return nil +} diff --git a/go/test/endtoend/docker/vttestserver_test.go b/go/test/endtoend/docker/vttestserver_test.go new file mode 100644 index 00000000000..797e8f2e5c7 --- /dev/null +++ b/go/test/endtoend/docker/vttestserver_test.go @@ -0,0 +1,196 @@ +/* +Copyright 2021 The Vitess Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package docker + +import ( + "context" + "fmt" + "os" + "testing" + + "github.com/google/go-cmp/cmp" + + "vitess.io/vitess/go/sqltypes" + + "vitess.io/vitess/go/mysql" + + "github.com/stretchr/testify/require" +) + +func TestMain(m *testing.M) { + exitCode := func() int { + err := makeVttestserverDockerImages() + if err != nil { + return 1 + } + return m.Run() + }() + os.Exit(exitCode) +} + +func TestUnsharded(t *testing.T) { + dockerImages := []string{vttestserverMysql57image, vttestserverMysql80image} + for _, image := range dockerImages { + t.Run(image, func(t *testing.T) { + vtest := newVttestserver(image, []string{"unsharded_ks"}, []int{1}, 1000, 33577) + err := vtest.startDockerImage() + require.NoError(t, err) + defer vtest.teardown() + + // wait for the docker to be setup + err = vtest.waitUntilDockerHealthy(10) + require.NoError(t, err) + + ctx := context.Background() + vttestParams := mysql.ConnParams{ + Host: "localhost", + Port: vtest.port, + } + conn, err := mysql.Connect(ctx, &vttestParams) + require.NoError(t, err) + defer conn.Close() + assertMatches(t, conn, "show databases", `[[VARCHAR("unsharded_ks")] [VARCHAR("information_schema")] [VARCHAR("mysql")] [VARCHAR("sys")] [VARCHAR("performance_schema")]]`) + _, err = execute(t, conn, "create table unsharded_ks.t1(id int)") + require.NoError(t, err) + _, err = execute(t, conn, "insert into unsharded_ks.t1(id) values (10),(20),(30)") + require.NoError(t, err) + assertMatches(t, conn, "select * from unsharded_ks.t1", `[[INT32(10)] [INT32(20)] [INT32(30)]]`) + }) + } +} + +func TestSharded(t *testing.T) { + dockerImages := []string{vttestserverMysql57image, vttestserverMysql80image} + for _, image := range dockerImages { + t.Run(image, func(t *testing.T) { + vtest := newVttestserver(image, []string{"ks"}, []int{2}, 1000, 33577) + err := vtest.startDockerImage() + require.NoError(t, err) + defer vtest.teardown() + + // wait for the docker to be setup + err = vtest.waitUntilDockerHealthy(10) + require.NoError(t, err) + + ctx := context.Background() + vttestParams := mysql.ConnParams{ + Host: "localhost", + Port: vtest.port, + } + conn, err := mysql.Connect(ctx, &vttestParams) + require.NoError(t, err) + defer conn.Close() + assertMatches(t, conn, "show databases", `[[VARCHAR("ks")] [VARCHAR("information_schema")] [VARCHAR("mysql")] [VARCHAR("sys")] [VARCHAR("performance_schema")]]`) + _, err = execute(t, conn, "create table ks.t1(id int)") + require.NoError(t, err) + _, err = execute(t, conn, "alter vschema on ks.t1 add vindex `binary_md5`(id) using `binary_md5`") + require.NoError(t, err) + _, err = execute(t, conn, "insert into ks.t1(id) values (10),(20),(30)") + require.NoError(t, err) + assertMatches(t, conn, "select id from ks.t1 order by id", `[[INT32(10)] [INT32(20)] [INT32(30)]]`) + }) + } +} + +func TestMysqlMaxCons(t *testing.T) { + dockerImages := []string{vttestserverMysql57image, vttestserverMysql80image} + for _, image := range dockerImages { + t.Run(image, func(t *testing.T) { + vtest := newVttestserver(image, []string{"ks"}, []int{2}, 100000, 33577) + err := vtest.startDockerImage() + require.NoError(t, err) + defer vtest.teardown() + + // wait for the docker to be setup + err = vtest.waitUntilDockerHealthy(10) + require.NoError(t, err) + + ctx := context.Background() + vttestParams := mysql.ConnParams{ + Host: "localhost", + Port: vtest.port, + } + conn, err := mysql.Connect(ctx, &vttestParams) + require.NoError(t, err) + defer conn.Close() + assertMatches(t, conn, "select @@max_connections", `[[UINT64(100000)]]`) + }) + } +} + +func TestLargeNumberOfKeyspaces(t *testing.T) { + dockerImages := []string{vttestserverMysql57image, vttestserverMysql80image} + for _, image := range dockerImages { + t.Run(image, func(t *testing.T) { + var keyspaces []string + var numShards []int + for i := 0; i < 100; i++ { + keyspaces = append(keyspaces, fmt.Sprintf("unsharded_ks%d", i)) + numShards = append(numShards, 1) + } + + vtest := newVttestserver(image, keyspaces, numShards, 100000, 33577) + err := vtest.startDockerImage() + require.NoError(t, err) + defer vtest.teardown() + + // wait for the docker to be setup + err = vtest.waitUntilDockerHealthy(15) + require.NoError(t, err) + + ctx := context.Background() + vttestParams := mysql.ConnParams{ + Host: "localhost", + Port: vtest.port, + } + conn, err := mysql.Connect(ctx, &vttestParams) + require.NoError(t, err) + defer conn.Close() + + // assert that all the keyspaces are correctly setup + for _, keyspace := range keyspaces { + _, err = execute(t, conn, "create table "+keyspace+".t1(id int)") + require.NoError(t, err) + _, err = execute(t, conn, "insert into "+keyspace+".t1(id) values (10),(20),(30)") + require.NoError(t, err) + assertMatches(t, conn, "select * from "+keyspace+".t1", `[[INT32(10)] [INT32(20)] [INT32(30)]]`) + } + }) + } +} + +func execute(t *testing.T, conn *mysql.Conn, query string) (*sqltypes.Result, error) { + t.Helper() + return conn.ExecuteFetch(query, 1000, true) +} + +func checkedExec(t *testing.T, conn *mysql.Conn, query string) *sqltypes.Result { + t.Helper() + qr, err := conn.ExecuteFetch(query, 1000, true) + require.NoError(t, err) + return qr +} + +func assertMatches(t *testing.T, conn *mysql.Conn, query, expected string) { + t.Helper() + qr := checkedExec(t, conn, query) + got := fmt.Sprintf("%v", qr.Rows) + diff := cmp.Diff(expected, got) + if diff != "" { + t.Errorf("Query: %s (-want +got):\n%s", query, diff) + } +} diff --git a/go/test/endtoend/messaging/message_test.go b/go/test/endtoend/messaging/message_test.go index e69f79d65ce..90d6326caeb 100644 --- a/go/test/endtoend/messaging/message_test.go +++ b/go/test/endtoend/messaging/message_test.go @@ -26,6 +26,8 @@ import ( "testing" "time" + "vitess.io/vitess/go/test/utils" + "vitess.io/vitess/go/vt/vtgate/evalengine" "github.com/stretchr/testify/assert" @@ -94,7 +96,7 @@ func TestMessage(t *testing.T) { } } require.NoError(t, err) - assert.Equal(t, wantFields, gotFields) + utils.MustMatch(t, wantFields, gotFields) exec(t, conn, "insert into vitess_message(id, message) values(1, 'hello world')") @@ -104,14 +106,14 @@ func TestMessage(t *testing.T) { // Consume first message. start := time.Now().UnixNano() - got, err := streamConn.FetchNext() + got, err := streamConn.FetchNext(nil) require.NoError(t, err) want := []sqltypes.Value{ sqltypes.NewInt64(1), sqltypes.NewVarChar("hello world"), } - assert.Equal(t, want, got) + utils.MustMatch(t, want, got) qr := exec(t, conn, "select time_next, epoch from vitess_message where id = 1") next, epoch := getTimeEpoch(qr) @@ -131,7 +133,7 @@ func TestMessage(t *testing.T) { } // Consume the resend. - _, err = streamConn.FetchNext() + _, err = streamConn.FetchNext(nil) require.NoError(t, err) qr = exec(t, conn, "select time_next, epoch from vitess_message where id = 1") next, epoch = getTimeEpoch(qr) @@ -215,18 +217,18 @@ func TestThreeColMessage(t *testing.T) { } } require.NoError(t, err) - assert.Equal(t, wantFields, gotFields) + utils.MustMatch(t, wantFields, gotFields) exec(t, conn, "insert into vitess_message3(id, msg1, msg2) values(1, 'hello world', 3)") - got, err := streamConn.FetchNext() + got, err := streamConn.FetchNext(nil) require.NoError(t, err) want := []sqltypes.Value{ sqltypes.NewInt64(1), sqltypes.NewVarChar("hello world"), sqltypes.NewInt64(3), } - assert.Equal(t, want, got) + utils.MustMatch(t, want, got) // Verify Ack. qr := exec(t, conn, "update vitess_message3 set time_acked = 123, time_next = null where id = 1 and time_acked is null") diff --git a/go/test/endtoend/mysqlserver/main_test.go b/go/test/endtoend/mysqlserver/main_test.go index 16afa7f96b2..83ce6eda40b 100644 --- a/go/test/endtoend/mysqlserver/main_test.go +++ b/go/test/endtoend/mysqlserver/main_test.go @@ -41,7 +41,14 @@ var ( keyspace_id bigint(20) unsigned NOT NULL, data longblob, primary key (id) - ) Engine=InnoDB` + ) Engine=InnoDB; +` + createProcSQL = `use vt_test_keyspace; +CREATE PROCEDURE testing() +BEGIN + delete from vt_insert_test; +END; +` ) func TestMain(m *testing.M) { @@ -98,6 +105,7 @@ func TestMain(m *testing.M) { "-mysql_auth_server_impl", "static", "-mysql_auth_server_static_file", clusterInstance.TmpDirectory + mysqlAuthServerStatic, "-mysql_server_version", "8.0.16-7", + "-warn_sharded_only=true", } clusterInstance.VtTabletExtraArgs = []string{ @@ -126,6 +134,11 @@ func TestMain(m *testing.M) { Pass: "testpassword1", } + masterProcess := clusterInstance.Keyspaces[0].Shards[0].MasterTablet().VttabletProcess + if _, err := masterProcess.QueryTablet(createProcSQL, keyspaceName, false); err != nil { + return 1, err + } + return m.Run(), nil }() if err != nil { diff --git a/go/test/endtoend/mysqlserver/mysql_server_test.go b/go/test/endtoend/mysqlserver/mysql_server_test.go index 2c8172c36c6..9fb6344778b 100644 --- a/go/test/endtoend/mysqlserver/mysql_server_test.go +++ b/go/test/endtoend/mysqlserver/mysql_server_test.go @@ -142,40 +142,39 @@ func TestWarnings(t *testing.T) { ctx := context.Background() conn, err := mysql.Connect(ctx, &vtParams) - require.Nilf(t, err, "unable to connect mysql: %v", err) + require.NoError(t, err) defer conn.Close() - // validate warning with invalid_field error as warning - qr, err := conn.ExecuteFetch("SELECT /*vt+ SCATTER_ERRORS_AS_WARNINGS */ invalid_field from vt_insert_test;", 1, false) - require.Nilf(t, err, "select error : %v", err) + // using CALL will produce a warning saying this only works in unsharded + qr, err := conn.ExecuteFetch("CALL testing()", 1, false) + require.NoError(t, err) assert.Empty(t, qr.Rows, "number of rows") qr, err = conn.ExecuteFetch("SHOW WARNINGS;", 1, false) - require.Nilf(t, err, "SHOW WARNINGS; execution failed: %v", err) + require.NoError(t, err, "SHOW WARNINGS") assert.EqualValues(t, 1, len(qr.Rows), "number of rows") assert.Contains(t, qr.Rows[0][0].String(), "VARCHAR(\"Warning\")", qr.Rows) - assert.Contains(t, qr.Rows[0][1].String(), "UINT16(1054)", qr.Rows) - assert.Contains(t, qr.Rows[0][2].String(), "Unknown column", qr.Rows) + assert.Contains(t, qr.Rows[0][1].String(), "UINT16(1235)", qr.Rows) + assert.Contains(t, qr.Rows[0][2].String(), "'CALL' not supported in sharded mode", qr.Rows) - // validate warning with query_timeout error as warning - qr, err = conn.ExecuteFetch("SELECT /*vt+ SCATTER_ERRORS_AS_WARNINGS QUERY_TIMEOUT_MS=1 */ sleep(1) from vt_insert_test;", 1, false) - require.Nilf(t, err, "insertion error : %v", err) - assert.Empty(t, qr.Rows, "number of rows") + // validate with 0 warnings + _, err = conn.ExecuteFetch("SELECT 1 from vt_insert_test limit 1", 1, false) + require.NoError(t, err) qr, err = conn.ExecuteFetch("SHOW WARNINGS;", 1, false) - require.Nilf(t, err, "SHOW WARNINGS; execution failed: %v", err) - assert.EqualValues(t, 1, len(qr.Rows), "number of rows") - assert.Contains(t, qr.Rows[0][0].String(), "VARCHAR(\"Warning\")", qr.Rows) - assert.Contains(t, qr.Rows[0][1].String(), "UINT16(1317)", qr.Rows) - assert.Contains(t, qr.Rows[0][2].String(), "context deadline exceeded", qr.Rows) + require.NoError(t, err) + assert.Empty(t, qr.Rows) - // validate with 0 warnings + // verify that show warnings are empty if another statement is run before calling it + qr, err = conn.ExecuteFetch("CALL testing()", 1, false) + require.NoError(t, err) + assert.Empty(t, qr.Rows, "number of rows") _, err = conn.ExecuteFetch("SELECT 1 from vt_insert_test limit 1", 1, false) - require.Nilf(t, err, "select error: %v", err) + require.NoError(t, err) qr, err = conn.ExecuteFetch("SHOW WARNINGS;", 1, false) - require.Nilf(t, err, "SHOW WARNINGS; execution failed: %v", err) - assert.Empty(t, len(qr.Rows), "number of rows") + require.NoError(t, err) + assert.Empty(t, qr.Rows) } // TestSelectWithUnauthorizedUser verifies that an unauthorized user diff --git a/go/test/endtoend/onlineddl/declarative/onlineddl_declarative_test.go b/go/test/endtoend/onlineddl/declarative/onlineddl_declarative_test.go index dd37ba18cee..0090dbbb78d 100644 --- a/go/test/endtoend/onlineddl/declarative/onlineddl_declarative_test.go +++ b/go/test/endtoend/onlineddl/declarative/onlineddl_declarative_test.go @@ -415,17 +415,17 @@ func testOnlineDDLStatement(t *testing.T, alterStatement string, ddlStrategy str } } else { var err error - uuid, err = clusterInstance.VtctlclientProcess.ApplySchemaWithOutput(keyspaceName, alterStatement, ddlStrategy) + uuid, err = clusterInstance.VtctlclientProcess.ApplySchemaWithOutput(keyspaceName, alterStatement, cluster.VtctlClientParams{DDLStrategy: ddlStrategy}) assert.NoError(t, err) } uuid = strings.TrimSpace(uuid) fmt.Println("# Generated UUID (for debug purposes):") fmt.Printf("<%s>\n", uuid) - strategy, _, err := schema.ParseDDLStrategy(ddlStrategy) + strategySetting, err := schema.ParseDDLStrategy(ddlStrategy) assert.NoError(t, err) - if !strategy.IsDirect() { + if !strategySetting.Strategy.IsDirect() { time.Sleep(time.Second * 20) } diff --git a/go/test/endtoend/onlineddl/exec_util.go b/go/test/endtoend/onlineddl/exec_util.go new file mode 100644 index 00000000000..ddbcbf92056 --- /dev/null +++ b/go/test/endtoend/onlineddl/exec_util.go @@ -0,0 +1,67 @@ +/* +Copyright 2021 The Vitess Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package onlineddl + +import ( + "fmt" + "io/ioutil" + "os/exec" + "path" + "path/filepath" + "testing" + + "github.com/stretchr/testify/require" + + "vitess.io/vitess/go/mysql" +) + +// CreateTempScript creates a script in the temporary directory with given content +func CreateTempScript(t *testing.T, content string) (fileName string) { + f, err := ioutil.TempFile("", "onlineddl-test-") + require.NoError(t, err) + + _, err = f.WriteString(content) + require.NoError(t, err) + err = f.Close() + require.NoError(t, err) + + return f.Name() +} + +// MysqlClientExecFile runs a file through the mysql client +func MysqlClientExecFile(t *testing.T, mysqlParams *mysql.ConnParams, testDataPath, testName string, fileName string) (output string) { + t.Helper() + + bashPath, err := exec.LookPath("bash") + require.NoError(t, err) + mysqlPath, err := exec.LookPath("mysql") + require.NoError(t, err) + + filePath := fileName + if !filepath.IsAbs(fileName) { + filePath, _ = filepath.Abs(path.Join(testDataPath, testName, fileName)) + } + bashCommand := fmt.Sprintf(`%s -u%s --socket=%s --database=%s -s -s < %s 2> /tmp/error.log`, mysqlPath, mysqlParams.Uname, mysqlParams.UnixSocket, mysqlParams.DbName, filePath) + cmd, err := exec.Command( + bashPath, + "-c", + bashCommand, + ).Output() + + require.NoError(t, err) + return string(cmd) +} diff --git a/go/test/endtoend/onlineddl/ghost/onlineddl_ghost_test.go b/go/test/endtoend/onlineddl/ghost/onlineddl_ghost_test.go index 67fd294d700..50459fec41d 100644 --- a/go/test/endtoend/onlineddl/ghost/onlineddl_ghost_test.go +++ b/go/test/endtoend/onlineddl/ghost/onlineddl_ghost_test.go @@ -50,6 +50,13 @@ var ( msg varchar(64), PRIMARY KEY (id) ) ENGINE=InnoDB;` + insertStatements = []string{ + `insert into %s (id, msg) values (3, 'three')`, + `insert into %s (id, msg) values (5, 'five')`, + `insert into %s (id, msg) values (7, 'seven')`, + `insert into %s (id, msg) values (11, 'eleven')`, + `insert into %s (id, msg) values (13, 'thirteen')`, + } // To verify non online-DDL behavior alterTableNormalStatement = ` ALTER TABLE %s @@ -79,6 +86,10 @@ var ( online_ddl_create_col INT NOT NULL, PRIMARY KEY (id) ) ENGINE=InnoDB;` + noPKCreateTableStatement = ` + CREATE TABLE %s ( + online_ddl_create_col INT NOT NULL + ) ENGINE=InnoDB;` onlineDDLDropTableStatement = ` DROP TABLE %s` onlineDDLDropTableIfExistsStatement = ` @@ -147,9 +158,15 @@ func TestMain(m *testing.M) { clusterInstance.VtctldExtraArgs = []string{ "-schema_change_dir", schemaChangeDirectory, "-schema_change_controller", "local", - "-schema_change_check_interval", "1"} + "-schema_change_check_interval", "1", + "-online_ddl_check_interval", "3s", + } clusterInstance.VtTabletExtraArgs = []string{ + "-enable-lag-throttler", + "-throttle_threshold", "1s", + "-heartbeat_enable", + "-heartbeat_interval", "250ms", "-migration_check_interval", "5s", "-gh-ost-path", os.Getenv("VITESS_ENDTOEND_GH_OST_PATH"), // leave env variable empty/unset to get the default behavior. Override in Mac. } @@ -209,6 +226,25 @@ func TestSchemaChange(t *testing.T) { onlineddl.CheckMigrationStatus(t, &vtParams, shards, uuid, schema.OnlineDDLStatusComplete) onlineddl.CheckCancelMigration(t, &vtParams, shards, uuid, false) onlineddl.CheckRetryMigration(t, &vtParams, shards, uuid, false) + + var totalRowsCopied uint64 + // count sum of rows copied in all shards, that should be the total number of rows inserted to the table + rs := onlineddl.ReadMigrations(t, &vtParams, uuid) + require.NotNil(t, rs) + for _, row := range rs.Named().Rows { + rowsCopied := row.AsUint64("rows_copied", 0) + totalRowsCopied += rowsCopied + } + require.Equal(t, uint64(len(insertStatements)), totalRowsCopied) + + // See that we're able to read logs after successful migration: + expectedMessage := "starting gh-ost" + logs := onlineddl.ReadMigrationLogs(t, &vtParams, uuid) + assert.Equal(t, len(shards), len(logs)) + for i := range logs { + require.Contains(t, logs[i], expectedMessage) + } + }) t.Run("successful online alter, vtctl", func(t *testing.T) { uuid := testOnlineDDLStatement(t, alterTableTrivialStatement, "gh-ost", "vtctl", "ghost_col") @@ -283,15 +319,48 @@ func TestSchemaChange(t *testing.T) { onlineddl.CheckCancelMigration(t, &vtParams, shards, uuid, false) onlineddl.CheckRetryMigration(t, &vtParams, shards, uuid, true) }) + t.Run("Online CREATE no PK table, vtgate", func(t *testing.T) { + uuid := testOnlineDDLStatement(t, noPKCreateTableStatement, "gh-ost --skip-topo", "vtgate", "online_ddl_create_col") + onlineddl.CheckMigrationStatus(t, &vtParams, shards, uuid, schema.OnlineDDLStatusComplete) + onlineddl.CheckCancelMigration(t, &vtParams, shards, uuid, false) + onlineddl.CheckRetryMigration(t, &vtParams, shards, uuid, false) + }) + t.Run("Fail ALTER for no PK table, vtgate", func(t *testing.T) { + uuid := testOnlineDDLStatement(t, alterTableTrivialStatement, "gh-ost --skip-topo", "vtgate", "") + onlineddl.CheckMigrationStatus(t, &vtParams, shards, uuid, schema.OnlineDDLStatusFailed) + + expectedMessage := "No PRIMARY nor UNIQUE key found" + rs := onlineddl.ReadMigrations(t, &vtParams, uuid) + require.NotNil(t, rs) + for _, row := range rs.Named().Rows { + message := row["message"].ToString() + // the following message is generated by gh-ost. We test that it is captured in our 'message' column: + require.Contains(t, message, expectedMessage) + } + + // See that we're able to read logs after failed migration: + logs := onlineddl.ReadMigrationLogs(t, &vtParams, uuid) + assert.Equal(t, len(shards), len(logs)) + for i := range logs { + require.Contains(t, logs[i], expectedMessage) + } + }) } func testWithInitialSchema(t *testing.T) { - // Create 4 tables + // Create 4 tables and populate them var sqlQuery = "" //nolint for i := 0; i < totalTableCount; i++ { - sqlQuery = fmt.Sprintf(createTable, fmt.Sprintf("vt_onlineddl_test_%02d", i)) + tableName := fmt.Sprintf("vt_onlineddl_test_%02d", i) + sqlQuery = fmt.Sprintf(createTable, tableName) err := clusterInstance.VtctlclientProcess.ApplySchema(keyspaceName, sqlQuery) require.Nil(t, err) + + for _, insert := range insertStatements { + insertQuery := fmt.Sprintf(insert, tableName) + r := onlineddl.VtgateExecQuery(t, &vtParams, insertQuery, "") + require.NotNil(t, r) + } } // Check if 4 tables are created @@ -299,7 +368,7 @@ func testWithInitialSchema(t *testing.T) { } // testOnlineDDLStatement runs an online DDL, ALTER statement -func testOnlineDDLStatement(t *testing.T, alterStatement string, ddlStrategy string, executeStrategy string, expectColumn string) (uuid string) { +func testOnlineDDLStatement(t *testing.T, alterStatement string, ddlStrategy string, executeStrategy string, expectHint string) (uuid string) { tableName := fmt.Sprintf("vt_onlineddl_test_%02d", 3) sqlQuery := fmt.Sprintf(alterStatement, tableName) if executeStrategy == "vtgate" { @@ -309,21 +378,21 @@ func testOnlineDDLStatement(t *testing.T, alterStatement string, ddlStrategy str } } else { var err error - uuid, err = clusterInstance.VtctlclientProcess.ApplySchemaWithOutput(keyspaceName, sqlQuery, ddlStrategy) + uuid, err = clusterInstance.VtctlclientProcess.ApplySchemaWithOutput(keyspaceName, sqlQuery, cluster.VtctlClientParams{DDLStrategy: ddlStrategy}) assert.NoError(t, err) } uuid = strings.TrimSpace(uuid) fmt.Println("# Generated UUID (for debug purposes):") fmt.Printf("<%s>\n", uuid) - strategy, _, err := schema.ParseDDLStrategy(ddlStrategy) + strategySetting, err := schema.ParseDDLStrategy(ddlStrategy) assert.NoError(t, err) - if !strategy.IsDirect() { + if !strategySetting.Strategy.IsDirect() { time.Sleep(time.Second * 20) } - if expectColumn != "" { - checkMigratedTable(t, tableName, expectColumn) + if expectHint != "" { + checkMigratedTable(t, tableName, expectHint) } return uuid } diff --git a/go/test/endtoend/onlineddl/revert/onlineddl_revert_test.go b/go/test/endtoend/onlineddl/revert/onlineddl_revert_test.go index d5c0ddaa98c..7c6046ccca5 100644 --- a/go/test/endtoend/onlineddl/revert/onlineddl_revert_test.go +++ b/go/test/endtoend/onlineddl/revert/onlineddl_revert_test.go @@ -158,7 +158,9 @@ func TestMain(m *testing.M) { clusterInstance.VtctldExtraArgs = []string{ "-schema_change_dir", schemaChangeDirectory, "-schema_change_controller", "local", - "-schema_change_check_interval", "1"} + "-schema_change_check_interval", "1", + "-online_ddl_check_interval", "3s", + } clusterInstance.VtTabletExtraArgs = []string{ "-enable-lag-throttler", @@ -469,17 +471,17 @@ func testOnlineDDLStatement(t *testing.T, alterStatement string, ddlStrategy str } } else { var err error - uuid, err = clusterInstance.VtctlclientProcess.ApplySchemaWithOutput(keyspaceName, alterStatement, ddlStrategy) + uuid, err = clusterInstance.VtctlclientProcess.ApplySchemaWithOutput(keyspaceName, alterStatement, cluster.VtctlClientParams{DDLStrategy: ddlStrategy}) assert.NoError(t, err) } uuid = strings.TrimSpace(uuid) fmt.Println("# Generated UUID (for debug purposes):") fmt.Printf("<%s>\n", uuid) - strategy, _, err := schema.ParseDDLStrategy(ddlStrategy) + strategySetting, err := schema.ParseDDLStrategy(ddlStrategy) assert.NoError(t, err) - if !strategy.IsDirect() { + if !strategySetting.Strategy.IsDirect() { time.Sleep(time.Second * 20) } diff --git a/go/test/endtoend/onlineddl/singleton/onlineddl_singleton_test.go b/go/test/endtoend/onlineddl/singleton/onlineddl_singleton_test.go new file mode 100644 index 00000000000..84a0ed91f38 --- /dev/null +++ b/go/test/endtoend/onlineddl/singleton/onlineddl_singleton_test.go @@ -0,0 +1,396 @@ +/* +Copyright 2021 The Vitess Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package singleton + +import ( + "flag" + "fmt" + "os" + "path" + "strings" + "testing" + "time" + + "vitess.io/vitess/go/mysql" + "vitess.io/vitess/go/vt/schema" + + "vitess.io/vitess/go/test/endtoend/cluster" + "vitess.io/vitess/go/test/endtoend/onlineddl" + + "github.com/stretchr/testify/assert" + "github.com/stretchr/testify/require" +) + +var ( + clusterInstance *cluster.LocalProcessCluster + vtParams mysql.ConnParams + + hostname = "localhost" + keyspaceName = "ks" + cell = "zone1" + schemaChangeDirectory = "" + tableName = `stress_test` + onlineSingletonDDLStrategy = "online -singleton" + onlineSingletonContextDDLStrategy = "online -singleton-context" + createStatement = ` + CREATE TABLE stress_test ( + id bigint(20) not null, + rand_val varchar(32) null default '', + hint_col varchar(64) not null default 'just-created', + created_timestamp timestamp not null default current_timestamp, + updates int unsigned not null default 0, + PRIMARY KEY (id), + key created_idx(created_timestamp), + key updates_idx(updates) + ) ENGINE=InnoDB + ` + // We will run this query with "gh-ost --max-load=Threads_running=1" + alterTableThrottlingStatement = ` + ALTER TABLE stress_test DROP COLUMN created_timestamp + ` + multiAlterTableThrottlingStatement = ` + ALTER TABLE stress_test ENGINE=InnoDB; + ALTER TABLE stress_test ENGINE=InnoDB; + ALTER TABLE stress_test ENGINE=InnoDB; + ` + // A trivial statement which must succeed and does not change the schema + alterTableTrivialStatement = ` + ALTER TABLE stress_test ENGINE=InnoDB + ` + dropStatement = ` + DROP TABLE stress_test + ` + multiDropStatements = `DROP TABLE IF EXISTS t1; DROP TABLE IF EXISTS t2; DROP TABLE IF EXISTS t3;` +) + +func TestMain(m *testing.M) { + defer cluster.PanicHandler(nil) + flag.Parse() + + exitcode, err := func() (int, error) { + clusterInstance = cluster.NewCluster(cell, hostname) + schemaChangeDirectory = path.Join("/tmp", fmt.Sprintf("schema_change_dir_%d", clusterInstance.GetAndReserveTabletUID())) + defer os.RemoveAll(schemaChangeDirectory) + defer clusterInstance.Teardown() + + if _, err := os.Stat(schemaChangeDirectory); os.IsNotExist(err) { + _ = os.Mkdir(schemaChangeDirectory, 0700) + } + + clusterInstance.VtctldExtraArgs = []string{ + "-schema_change_dir", schemaChangeDirectory, + "-schema_change_controller", "local", + "-schema_change_check_interval", "1"} + + clusterInstance.VtTabletExtraArgs = []string{ + "-enable-lag-throttler", + "-throttle_threshold", "1s", + "-heartbeat_enable", + "-heartbeat_interval", "250ms", + } + clusterInstance.VtGateExtraArgs = []string{} + + if err := clusterInstance.StartTopo(); err != nil { + return 1, err + } + + // Start keyspace + keyspace := &cluster.Keyspace{ + Name: keyspaceName, + } + + // No need for replicas in this stress test + if err := clusterInstance.StartKeyspace(*keyspace, []string{"1"}, 0, false); err != nil { + return 1, err + } + + vtgateInstance := clusterInstance.NewVtgateInstance() + // set the gateway we want to use + vtgateInstance.GatewayImplementation = "tabletgateway" + // Start vtgate + if err := vtgateInstance.Setup(); err != nil { + return 1, err + } + // ensure it is torn down during cluster TearDown + clusterInstance.VtgateProcess = *vtgateInstance + vtParams = mysql.ConnParams{ + Host: clusterInstance.Hostname, + Port: clusterInstance.VtgateMySQLPort, + } + + return m.Run(), nil + }() + if err != nil { + fmt.Printf("%v\n", err) + os.Exit(1) + } else { + os.Exit(exitcode) + } + +} + +func TestSchemaChange(t *testing.T) { + defer cluster.PanicHandler(t) + shards := clusterInstance.Keyspaces[0].Shards + require.Equal(t, 1, len(shards)) + + var uuids []string + // CREATE + t.Run("CREATE TABLE", func(t *testing.T) { + // The table does not exist + uuid := testOnlineDDLStatement(t, createStatement, onlineSingletonDDLStrategy, "vtgate", "", "", false) + uuids = append(uuids, uuid) + onlineddl.CheckMigrationStatus(t, &vtParams, shards, uuid, schema.OnlineDDLStatusComplete) + checkTable(t, tableName, true) + }) + t.Run("revert CREATE TABLE", func(t *testing.T) { + // The table existed, so it will now be dropped (renamed) + uuid := testRevertMigration(t, uuids[len(uuids)-1], "vtgate", "", false) + uuids = append(uuids, uuid) + onlineddl.CheckMigrationStatus(t, &vtParams, shards, uuid, schema.OnlineDDLStatusComplete) + checkTable(t, tableName, false) + }) + t.Run("revert revert CREATE TABLE", func(t *testing.T) { + // Table was dropped (renamed) so it will now be restored + uuid := testRevertMigration(t, uuids[len(uuids)-1], "vtgate", "", false) + uuids = append(uuids, uuid) + onlineddl.CheckMigrationStatus(t, &vtParams, shards, uuid, schema.OnlineDDLStatusComplete) + checkTable(t, tableName, true) + }) + + var throttledUUID string + t.Run("throttled migration", func(t *testing.T) { + throttledUUID = testOnlineDDLStatement(t, alterTableThrottlingStatement, "gh-ost -singleton --max-load=Threads_running=1", "vtgate", "hint_col", "", false) + onlineddl.CheckMigrationStatus(t, &vtParams, shards, throttledUUID, schema.OnlineDDLStatusRunning) + }) + t.Run("failed singleton migration, vtgate", func(t *testing.T) { + uuid := testOnlineDDLStatement(t, alterTableThrottlingStatement, "gh-ost -singleton --max-load=Threads_running=1", "vtgate", "hint_col", "rejected", true) + assert.Empty(t, uuid) + }) + t.Run("failed singleton migration, vtctl", func(t *testing.T) { + uuid := testOnlineDDLStatement(t, alterTableThrottlingStatement, "gh-ost -singleton --max-load=Threads_running=1", "vtctl", "hint_col", "rejected", true) + assert.Empty(t, uuid) + }) + t.Run("failed revert migration", func(t *testing.T) { + uuid := testRevertMigration(t, throttledUUID, "vtgate", "rejected", true) + assert.Empty(t, uuid) + }) + t.Run("terminate throttled migration", func(t *testing.T) { + onlineddl.CheckMigrationStatus(t, &vtParams, shards, throttledUUID, schema.OnlineDDLStatusRunning) + onlineddl.CheckCancelMigration(t, &vtParams, shards, throttledUUID, true) + time.Sleep(2 * time.Second) + onlineddl.CheckMigrationStatus(t, &vtParams, shards, throttledUUID, schema.OnlineDDLStatusFailed) + }) + t.Run("successful gh-ost alter, vtctl", func(t *testing.T) { + uuid := testOnlineDDLStatement(t, alterTableTrivialStatement, "gh-ost -singleton", "vtctl", "hint_col", "", false) + onlineddl.CheckMigrationStatus(t, &vtParams, shards, uuid, schema.OnlineDDLStatusComplete) + onlineddl.CheckCancelMigration(t, &vtParams, shards, uuid, false) + onlineddl.CheckRetryMigration(t, &vtParams, shards, uuid, false) + }) + t.Run("successful gh-ost alter, vtgate", func(t *testing.T) { + uuid := testOnlineDDLStatement(t, alterTableTrivialStatement, "gh-ost -singleton", "vtgate", "hint_col", "", false) + onlineddl.CheckMigrationStatus(t, &vtParams, shards, uuid, schema.OnlineDDLStatusComplete) + onlineddl.CheckCancelMigration(t, &vtParams, shards, uuid, false) + onlineddl.CheckRetryMigration(t, &vtParams, shards, uuid, false) + }) + + t.Run("successful online alter, vtgate", func(t *testing.T) { + uuid := testOnlineDDLStatement(t, alterTableTrivialStatement, onlineSingletonDDLStrategy, "vtgate", "hint_col", "", false) + uuids = append(uuids, uuid) + onlineddl.CheckMigrationStatus(t, &vtParams, shards, uuid, schema.OnlineDDLStatusComplete) + onlineddl.CheckCancelMigration(t, &vtParams, shards, uuid, false) + onlineddl.CheckRetryMigration(t, &vtParams, shards, uuid, false) + checkTable(t, tableName, true) + }) + t.Run("revert ALTER TABLE, vttablet", func(t *testing.T) { + // The table existed, so it will now be dropped (renamed) + uuid := testRevertMigration(t, uuids[len(uuids)-1], "vttablet", "", false) + uuids = append(uuids, uuid) + onlineddl.CheckMigrationStatus(t, &vtParams, shards, uuid, schema.OnlineDDLStatusComplete) + checkTable(t, tableName, true) + }) + + var throttledUUIDs []string + // singleton-context + t.Run("throttled migrations, singleton-context", func(t *testing.T) { + uuidList := testOnlineDDLStatement(t, multiAlterTableThrottlingStatement, "gh-ost -singleton-context --max-load=Threads_running=1", "vtctl", "hint_col", "", false) + throttledUUIDs = strings.Split(uuidList, "\n") + assert.Equal(t, 3, len(throttledUUIDs)) + for _, uuid := range throttledUUIDs { + onlineddl.CheckMigrationStatus(t, &vtParams, shards, uuid, schema.OnlineDDLStatusRunning, schema.OnlineDDLStatusQueued) + } + }) + t.Run("failed migrations, singleton-context", func(t *testing.T) { + _ = testOnlineDDLStatement(t, multiAlterTableThrottlingStatement, "gh-ost -singleton-context --max-load=Threads_running=1", "vtctl", "hint_col", "rejected", false) + }) + t.Run("terminate throttled migrations", func(t *testing.T) { + for _, uuid := range throttledUUIDs { + onlineddl.CheckMigrationStatus(t, &vtParams, shards, uuid, schema.OnlineDDLStatusRunning, schema.OnlineDDLStatusQueued) + onlineddl.CheckCancelMigration(t, &vtParams, shards, uuid, true) + } + time.Sleep(2 * time.Second) + for _, uuid := range throttledUUIDs { + uuid = strings.TrimSpace(uuid) + onlineddl.CheckMigrationStatus(t, &vtParams, shards, uuid, schema.OnlineDDLStatusFailed, schema.OnlineDDLStatusCancelled) + } + }) + + t.Run("successful multiple statement, singleton-context, vtctl", func(t *testing.T) { + uuidList := testOnlineDDLStatement(t, multiDropStatements, onlineSingletonContextDDLStrategy, "vtctl", "", "", false) + uuidSlice := strings.Split(uuidList, "\n") + assert.Equal(t, 3, len(uuidSlice)) + for _, uuid := range uuidSlice { + uuid = strings.TrimSpace(uuid) + onlineddl.CheckMigrationStatus(t, &vtParams, shards, uuid, schema.OnlineDDLStatusComplete) + } + }) + + //DROP + + t.Run("online DROP TABLE", func(t *testing.T) { + uuid := testOnlineDDLStatement(t, dropStatement, onlineSingletonDDLStrategy, "vtgate", "", "", false) + uuids = append(uuids, uuid) + onlineddl.CheckMigrationStatus(t, &vtParams, shards, uuid, schema.OnlineDDLStatusComplete) + checkTable(t, tableName, false) + }) + t.Run("revert DROP TABLE", func(t *testing.T) { + // This will recreate the table (well, actually, rename it back into place) + uuid := testRevertMigration(t, uuids[len(uuids)-1], "vttablet", "", false) + uuids = append(uuids, uuid) + onlineddl.CheckMigrationStatus(t, &vtParams, shards, uuid, schema.OnlineDDLStatusComplete) + checkTable(t, tableName, true) + }) + + // Last two tests (we run an incomplete migration) + t.Run("submit successful migration, no wait, vtgate", func(t *testing.T) { + _ = testOnlineDDLStatement(t, alterTableTrivialStatement, "gh-ost -singleton", "vtgate", "hint_col", "", true) + }) + t.Run("fail submit migration, no wait, vtgate", func(t *testing.T) { + _ = testOnlineDDLStatement(t, alterTableTrivialStatement, "gh-ost -singleton", "vtgate", "hint_col", "rejected", true) + }) +} + +// testOnlineDDLStatement runs an online DDL, ALTER statement +func testOnlineDDLStatement(t *testing.T, alterStatement string, ddlStrategy string, executeStrategy string, expectHint string, expectError string, skipWait bool) (uuid string) { + strategySetting, err := schema.ParseDDLStrategy(ddlStrategy) + require.NoError(t, err) + + if executeStrategy == "vtgate" { + result := onlineddl.VtgateExecDDL(t, &vtParams, ddlStrategy, alterStatement, expectError) + if result != nil { + row := result.Named().Row() + if row != nil { + uuid = row.AsString("uuid", "") + } + } + } else { + output, err := clusterInstance.VtctlclientProcess.ApplySchemaWithOutput(keyspaceName, alterStatement, cluster.VtctlClientParams{DDLStrategy: ddlStrategy, SkipPreflight: true}) + if expectError == "" { + assert.NoError(t, err) + uuid = output + } else { + assert.Error(t, err) + assert.Contains(t, output, expectError) + } + } + uuid = strings.TrimSpace(uuid) + fmt.Println("# Generated UUID (for debug purposes):") + fmt.Printf("<%s>\n", uuid) + + if !strategySetting.Strategy.IsDirect() && !skipWait { + time.Sleep(time.Second * 20) + } + + if expectError == "" && expectHint != "" { + checkMigratedTable(t, tableName, expectHint) + } + return uuid +} + +// testRevertMigration reverts a given migration +func testRevertMigration(t *testing.T, revertUUID string, executeStrategy string, expectError string, skipWait bool) (uuid string) { + revertQuery := fmt.Sprintf("revert vitess_migration '%s'", revertUUID) + if executeStrategy == "vtgate" { + result := onlineddl.VtgateExecDDL(t, &vtParams, onlineSingletonDDLStrategy, revertQuery, expectError) + if result != nil { + row := result.Named().Row() + if row != nil { + uuid = row.AsString("uuid", "") + } + } + } else { + output, err := clusterInstance.VtctlclientProcess.ApplySchemaWithOutput(keyspaceName, revertQuery, cluster.VtctlClientParams{DDLStrategy: onlineSingletonDDLStrategy, SkipPreflight: true}) + if expectError == "" { + assert.NoError(t, err) + uuid = output + } else { + assert.Error(t, err) + assert.Contains(t, output, expectError) + } + } + + if expectError == "" { + uuid = strings.TrimSpace(uuid) + fmt.Println("# Generated UUID (for debug purposes):") + fmt.Printf("<%s>\n", uuid) + } + if !skipWait { + time.Sleep(time.Second * 20) + } + return uuid +} + +// checkTable checks the number of tables in the first two shards. +func checkTable(t *testing.T, showTableName string, expectExists bool) bool { + expectCount := 0 + if expectExists { + expectCount = 1 + } + for i := range clusterInstance.Keyspaces[0].Shards { + if !checkTablesCount(t, clusterInstance.Keyspaces[0].Shards[i].Vttablets[0], showTableName, expectCount) { + return false + } + } + return true +} + +// checkTablesCount checks the number of tables in the given tablet +func checkTablesCount(t *testing.T, tablet *cluster.Vttablet, showTableName string, expectCount int) bool { + query := fmt.Sprintf(`show tables like '%%%s%%';`, showTableName) + queryResult, err := tablet.VttabletProcess.QueryTablet(query, keyspaceName, true) + require.Nil(t, err) + return assert.Equal(t, expectCount, len(queryResult.Rows)) +} + +// checkMigratedTables checks the CREATE STATEMENT of a table after migration +func checkMigratedTable(t *testing.T, tableName, expectHint string) { + for i := range clusterInstance.Keyspaces[0].Shards { + createStatement := getCreateTableStatement(t, clusterInstance.Keyspaces[0].Shards[i].Vttablets[0], tableName) + assert.Contains(t, createStatement, expectHint) + } +} + +// getCreateTableStatement returns the CREATE TABLE statement for a given table +func getCreateTableStatement(t *testing.T, tablet *cluster.Vttablet, tableName string) (statement string) { + queryResult, err := tablet.VttabletProcess.QueryTablet(fmt.Sprintf("show create table %s;", tableName), keyspaceName, true) + require.Nil(t, err) + + assert.Equal(t, len(queryResult.Rows), 1) + assert.Equal(t, len(queryResult.Rows[0]), 2) // table name, create statement + statement = queryResult.Rows[0][1].ToString() + return statement +} diff --git a/go/test/endtoend/onlineddl/vrepl/onlineddl_vrepl_test.go b/go/test/endtoend/onlineddl/vrepl/onlineddl_vrepl_test.go index 7971a128b75..f0b7f0c1979 100644 --- a/go/test/endtoend/onlineddl/vrepl/onlineddl_vrepl_test.go +++ b/go/test/endtoend/onlineddl/vrepl/onlineddl_vrepl_test.go @@ -163,7 +163,9 @@ func TestMain(m *testing.M) { clusterInstance.VtctldExtraArgs = []string{ "-schema_change_dir", schemaChangeDirectory, "-schema_change_controller", "local", - "-schema_change_check_interval", "1"} + "-online_ddl_check_interval", "3s", + "-schema_change_check_interval", "1", + } clusterInstance.VtTabletExtraArgs = []string{ "-enable-lag-throttler", @@ -248,6 +250,7 @@ func TestSchemaChange(t *testing.T) { uuid := testOnlineDDLStatement(t, alterTableSuccessfulStatement, "online", "vtgate", "vrepl_col") onlineddl.CheckMigrationStatus(t, &vtParams, shards, uuid, schema.OnlineDDLStatusComplete) testRows(t) + testMigrationRowCount(t, uuid) onlineddl.CheckCancelMigration(t, &vtParams, shards, uuid, false) onlineddl.CheckRetryMigration(t, &vtParams, shards, uuid, false) }) @@ -256,6 +259,7 @@ func TestSchemaChange(t *testing.T) { uuid := testOnlineDDLStatement(t, alterTableTrivialStatement, "online", "vtctl", "vrepl_col") onlineddl.CheckMigrationStatus(t, &vtParams, shards, uuid, schema.OnlineDDLStatusComplete) testRows(t) + testMigrationRowCount(t, uuid) onlineddl.CheckCancelMigration(t, &vtParams, shards, uuid, false) onlineddl.CheckRetryMigration(t, &vtParams, shards, uuid, false) }) @@ -370,6 +374,21 @@ func testRows(t *testing.T) { require.Equal(t, countInserts, row.AsInt64("c", 0)) } +func testMigrationRowCount(t *testing.T, uuid string) { + insertMutex.Lock() + defer insertMutex.Unlock() + + var totalRowsCopied uint64 + // count sum of rows copied in all shards, that should be the total number of rows inserted to the table + rs := onlineddl.ReadMigrations(t, &vtParams, uuid) + require.NotNil(t, rs) + for _, row := range rs.Named().Rows { + rowsCopied := row.AsUint64("rows_copied", 0) + totalRowsCopied += rowsCopied + } + require.Equal(t, uint64(countInserts), totalRowsCopied) +} + func testWithInitialSchema(t *testing.T) { // Create 4 tables var sqlQuery = "" //nolint @@ -384,7 +403,7 @@ func testWithInitialSchema(t *testing.T) { } // testOnlineDDLStatement runs an online DDL, ALTER statement -func testOnlineDDLStatement(t *testing.T, alterStatement string, ddlStrategy string, executeStrategy string, expectColumn string) (uuid string) { +func testOnlineDDLStatement(t *testing.T, alterStatement string, ddlStrategy string, executeStrategy string, expectHint string) (uuid string) { tableName := fmt.Sprintf("vt_onlineddl_test_%02d", 3) sqlQuery := fmt.Sprintf(alterStatement, tableName) if executeStrategy == "vtgate" { @@ -394,22 +413,22 @@ func testOnlineDDLStatement(t *testing.T, alterStatement string, ddlStrategy str } } else { var err error - uuid, err = clusterInstance.VtctlclientProcess.ApplySchemaWithOutput(keyspaceName, sqlQuery, ddlStrategy) + uuid, err = clusterInstance.VtctlclientProcess.ApplySchemaWithOutput(keyspaceName, sqlQuery, cluster.VtctlClientParams{DDLStrategy: ddlStrategy}) assert.NoError(t, err) } uuid = strings.TrimSpace(uuid) fmt.Println("# Generated UUID (for debug purposes):") fmt.Printf("<%s>\n", uuid) - strategy, _, err := schema.ParseDDLStrategy(ddlStrategy) + strategySetting, err := schema.ParseDDLStrategy(ddlStrategy) assert.NoError(t, err) - if !strategy.IsDirect() { + if !strategySetting.Strategy.IsDirect() { time.Sleep(time.Second * 20) } - if expectColumn != "" { - checkMigratedTable(t, tableName, expectColumn) + if expectHint != "" { + checkMigratedTable(t, tableName, expectHint) } return uuid } diff --git a/go/test/endtoend/onlineddl/vrepl_stress/onlineddl_vrepl_mini_stress_test.go b/go/test/endtoend/onlineddl/vrepl_stress/onlineddl_vrepl_mini_stress_test.go index 1189619e2d4..45e422ccab4 100644 --- a/go/test/endtoend/onlineddl/vrepl_stress/onlineddl_vrepl_mini_stress_test.go +++ b/go/test/endtoend/onlineddl/vrepl_stress/onlineddl_vrepl_mini_stress_test.go @@ -82,18 +82,30 @@ deletesAttempts=%d, deletesFailures=%d, deletesNoops=%d, deletes=%d, } var ( - clusterInstance *cluster.LocalProcessCluster - vtParams mysql.ConnParams + clusterInstance *cluster.LocalProcessCluster + vtParams mysql.ConnParams + evaluatedMysqlParams *mysql.ConnParams + opOrder int64 + opOrderMutex sync.Mutex + onlineDDLStrategy = "online -vreplication-test-suite -skip-topo" hostname = "localhost" keyspaceName = "ks" + shards []cluster.Shard cell = "zone1" schemaChangeDirectory = "" tableName = `stress_test` - createStatement = ` + afterTableName = `stress_test_after` + cleanupStatements = []string{ + `DROP TABLE IF EXISTS stress_test`, + `DROP TABLE IF EXISTS stress_test_before`, + `DROP TABLE IF EXISTS stress_test_after`, + } + createStatement = ` CREATE TABLE stress_test ( id bigint(20) not null, rand_val varchar(32) null default '', + op_order bigint unsigned not null default 0, hint_col varchar(64) not null default '', created_timestamp timestamp not null default current_timestamp, updates int unsigned not null default 0, @@ -106,10 +118,10 @@ var ( ALTER TABLE stress_test modify hint_col varchar(64) not null default '%s' ` insertRowStatement = ` - INSERT IGNORE INTO stress_test (id, rand_val) VALUES (%d, left(md5(rand()), 8)) + INSERT IGNORE INTO stress_test (id, rand_val, op_order) VALUES (%d, left(md5(rand()), 8), %d) ` updateRowStatement = ` - UPDATE stress_test SET updates=updates+1 WHERE id=%d + UPDATE stress_test SET op_order=%d, updates=updates+1 WHERE id=%d ` deleteRowStatement = ` DELETE FROM stress_test WHERE id=%d AND updates=1 @@ -118,6 +130,28 @@ var ( selectCountRowsStatement = ` SELECT COUNT(*) AS num_rows, CAST(SUM(updates) AS SIGNED) AS sum_updates FROM stress_test ` + // We use CAST(SUM(updates) AS SIGNED) because SUM() returns a DECIMAL datatype, and we want to read a SIGNED INTEGER type + selectCountRowsFromAfterTableStatement = ` + SELECT COUNT(*) AS num_rows, CAST(SUM(updates) AS SIGNED) AS sum_updates FROM stress_test_after + ` + selectCountFromTableBefore = ` + SELECT count(*) as c FROM stress_test_before + ` + selectCountFromTableAfter = ` + SELECT count(*) as c FROM stress_test_after + ` + selectMaxOpOrderFromTableBefore = ` + SELECT MAX(op_order) as m FROM stress_test_before + ` + selectMaxOpOrderFromTableAfter = ` + SELECT MAX(op_order) as m FROM stress_test_after + ` + selectBeforeTable = ` + SELECT * FROM stress_test_before order by id + ` + selectAfterTable = ` + SELECT * FROM stress_test_after order by id + ` truncateStatement = ` TRUNCATE TABLE stress_test ` @@ -125,11 +159,41 @@ var ( ) const ( - maxTableRows = 4096 - maxConcurrency = 5 - countIterations = 5 + maxTableRows = 4096 + maxConcurrency = 20 + singleConnectionSleepInterval = 2 * time.Millisecond + countIterations = 5 ) +func resetOpOrder() { + opOrderMutex.Lock() + defer opOrderMutex.Unlock() + opOrder = 0 +} + +func nextOpOrder() int64 { + opOrderMutex.Lock() + defer opOrderMutex.Unlock() + opOrder++ + return opOrder +} + +func getTablet() *cluster.Vttablet { + return clusterInstance.Keyspaces[0].Shards[0].Vttablets[0] +} + +func mysqlParams() *mysql.ConnParams { + if evaluatedMysqlParams != nil { + return evaluatedMysqlParams + } + evaluatedMysqlParams = &mysql.ConnParams{ + Uname: "vt_dba", + UnixSocket: path.Join(os.Getenv("VTDATAROOT"), fmt.Sprintf("/vt_%010d", getTablet().TabletUID), "/mysql.sock"), + DbName: fmt.Sprintf("vt_%s", keyspaceName), + } + return evaluatedMysqlParams +} + func TestMain(m *testing.M) { defer cluster.PanicHandler(nil) flag.Parse() @@ -147,7 +211,9 @@ func TestMain(m *testing.M) { clusterInstance.VtctldExtraArgs = []string{ "-schema_change_dir", schemaChangeDirectory, "-schema_change_controller", "local", - "-schema_change_check_interval", "1"} + "-schema_change_check_interval", "1", + "-online_ddl_check_interval", "3s", + } clusterInstance.VtTabletExtraArgs = []string{ "-enable-lag-throttler", @@ -202,7 +268,7 @@ func TestMain(m *testing.M) { func TestSchemaChange(t *testing.T) { defer cluster.PanicHandler(t) - shards := clusterInstance.Keyspaces[0].Shards + shards = clusterInstance.Keyspaces[0].Shards require.Equal(t, 1, len(shards)) t.Run("create schema", func(t *testing.T) { @@ -241,11 +307,12 @@ func TestSchemaChange(t *testing.T) { t.Run("ALTER TABLE without workload", func(t *testing.T) { // A single ALTER TABLE. Generally this is covered in endtoend/onlineddl_vrepl, // but we wish to verify the ALTER statement used in these tests is sound + testWithInitialSchema(t) initTable(t) hint := "hint-alter-without-workload" - uuid := testOnlineDDLStatement(t, fmt.Sprintf(alterHintStatement, hint), "online", "vtgate", hint) + uuid := testOnlineDDLStatement(t, fmt.Sprintf(alterHintStatement, hint), onlineDDLStrategy, "vtgate", hint) onlineddl.CheckMigrationStatus(t, &vtParams, shards, uuid, schema.OnlineDDLStatusComplete) - testSelectTableMetrics(t) + testSelectTableMetricsAfterMigration(t) }) for i := 0; i < countIterations; i++ { @@ -258,24 +325,37 @@ func TestSchemaChange(t *testing.T) { testName := fmt.Sprintf("ALTER TABLE with workload %d/%d", (i + 1), countIterations) t.Run(testName, func(t *testing.T) { ctx, cancel := context.WithCancel(context.Background()) - initTable(t) - var wg sync.WaitGroup - wg.Add(1) - go func() { - defer wg.Done() - runMultipleConnections(ctx, t) - }() - hint := fmt.Sprintf("hint-alter-with-workload-%d", i) - uuid := testOnlineDDLStatement(t, fmt.Sprintf(alterHintStatement, hint), "online", "vtgate", hint) - onlineddl.CheckMigrationStatus(t, &vtParams, shards, uuid, schema.OnlineDDLStatusComplete) - cancel() // will cause runMultipleConnections() to terminate - wg.Wait() - testSelectTableMetrics(t) + t.Run("create schema", func(t *testing.T) { + testWithInitialSchema(t) + }) + t.Run("init table", func(t *testing.T) { + initTable(t) + }) + t.Run("migrate", func(t *testing.T) { + var wg sync.WaitGroup + wg.Add(1) + go func() { + defer wg.Done() + runMultipleConnections(ctx, t) + }() + hint := fmt.Sprintf("hint-alter-with-workload-%d", i) + uuid := testOnlineDDLStatement(t, fmt.Sprintf(alterHintStatement, hint), onlineDDLStrategy, "vtgate", hint) + onlineddl.CheckMigrationStatus(t, &vtParams, shards, uuid, schema.OnlineDDLStatusComplete) + cancel() // will cause runMultipleConnections() to terminate + wg.Wait() + }) + t.Run("validate metrics", func(t *testing.T) { + testSelectTableMetricsAfterMigration(t) + }) }) } } func testWithInitialSchema(t *testing.T) { + for _, statement := range cleanupStatements { + err := clusterInstance.VtctlclientProcess.ApplySchema(keyspaceName, statement) + require.Nil(t, err) + } // Create the stress table err := clusterInstance.VtctlclientProcess.ApplySchema(keyspaceName, createStatement) require.Nil(t, err) @@ -293,22 +373,23 @@ func testOnlineDDLStatement(t *testing.T, alterStatement string, ddlStrategy str } } else { var err error - uuid, err = clusterInstance.VtctlclientProcess.ApplySchemaWithOutput(keyspaceName, alterStatement, ddlStrategy) + uuid, err = clusterInstance.VtctlclientProcess.ApplySchemaWithOutput(keyspaceName, alterStatement, cluster.VtctlClientParams{DDLStrategy: ddlStrategy}) assert.NoError(t, err) } uuid = strings.TrimSpace(uuid) fmt.Println("# Generated UUID (for debug purposes):") fmt.Printf("<%s>\n", uuid) - strategy, _, err := schema.ParseDDLStrategy(ddlStrategy) + strategySetting, err := schema.ParseDDLStrategy(ddlStrategy) assert.NoError(t, err) - if !strategy.IsDirect() { - time.Sleep(time.Second * 20) + if !strategySetting.Strategy.IsDirect() { + status := onlineddl.WaitForMigrationStatus(t, &vtParams, shards, uuid, 30*time.Second, schema.OnlineDDLStatusComplete, schema.OnlineDDLStatusFailed) + fmt.Printf("# Migration status (for debug purposes): <%s>\n", status) } if expectHint != "" { - checkMigratedTable(t, tableName, expectHint) + checkMigratedTable(t, afterTableName, expectHint) } return uuid } @@ -349,7 +430,7 @@ func getCreateTableStatement(t *testing.T, tablet *cluster.Vttablet, tableName s func generateInsert(t *testing.T, conn *mysql.Conn) error { id := rand.Int31n(int32(maxTableRows)) - query := fmt.Sprintf(insertRowStatement, id) + query := fmt.Sprintf(insertRowStatement, id, nextOpOrder()) qr, err := conn.ExecuteFetch(query, 1000, true) func() { @@ -373,7 +454,7 @@ func generateInsert(t *testing.T, conn *mysql.Conn) error { func generateUpdate(t *testing.T, conn *mysql.Conn) error { id := rand.Int31n(int32(maxTableRows)) - query := fmt.Sprintf(updateRowStatement, id) + query := fmt.Sprintf(updateRowStatement, nextOpOrder(), id) qr, err := conn.ExecuteFetch(query, 1000, true) func() { @@ -446,10 +527,13 @@ func runSingleConnection(ctx context.Context, t *testing.T, done *int64) { if err != nil { if strings.Contains(err.Error(), "disallowed due to rule: enforce blacklisted tables") { err = nil + } else if strings.Contains(err.Error(), "doesn't exist") { + // Table renamed to _before, due to -vreplication-test-suite flag + err = nil } } assert.Nil(t, err) - time.Sleep(10 * time.Millisecond) + time.Sleep(singleConnectionSleepInterval) } } @@ -475,11 +559,20 @@ func initTable(t *testing.T) { log.Infof("initTable begin") defer log.Infof("initTable complete") + t.Run("cancel pending migrations", func(t *testing.T) { + cancelQuery := "alter vitess_migration cancel all" + r := onlineddl.VtgateExecQuery(t, &vtParams, cancelQuery, "") + if r.RowsAffected > 0 { + fmt.Printf("# Cancelled migrations (for debug purposes): %d\n", r.RowsAffected) + } + }) + ctx := context.Background() conn, err := mysql.Connect(ctx, &vtParams) require.Nil(t, err) defer conn.Close() + resetOpOrder() writeMetrics.Clear() _, err = conn.ExecuteFetch(truncateStatement, 1000, true) require.Nil(t, err) @@ -495,10 +588,7 @@ func initTable(t *testing.T) { } } -func testSelectTableMetrics(t *testing.T) { - writeMetrics.mu.Lock() - defer writeMetrics.mu.Unlock() - +func testSelectTableMetricsWithStatement(t *testing.T, statement string) { log.Infof("%s", writeMetrics.String()) ctx := context.Background() @@ -506,7 +596,7 @@ func testSelectTableMetrics(t *testing.T) { require.Nil(t, err) defer conn.Close() - rs, err := conn.ExecuteFetch(selectCountRowsStatement, 1000, true) + rs, err := conn.ExecuteFetch(statement, 1000, true) require.Nil(t, err) row := rs.Named().Row() @@ -521,5 +611,76 @@ func testSelectTableMetrics(t *testing.T) { assert.NotZero(t, writeMetrics.deletes) assert.NotZero(t, writeMetrics.updates) assert.Equal(t, writeMetrics.inserts-writeMetrics.deletes, numRows) - assert.Equal(t, writeMetrics.updates-writeMetrics.deletes, sumUpdates) // because we DELETE WHERE updates=1 +} + +func testSelectTableMetrics(t *testing.T) { + testSelectTableMetricsWithStatement(t, selectCountRowsStatement) +} + +func testSelectTableMetricsAfterMigration(t *testing.T) { + writeMetrics.mu.Lock() + defer writeMetrics.mu.Unlock() + + var countBefore int64 + { + // Validate after table is populated + rs := onlineddl.VtgateExecQuery(t, &vtParams, selectCountFromTableBefore, "") + row := rs.Named().Row() + require.NotNil(t, row) + + countBefore = row.AsInt64("c", 0) + require.NotZero(t, countBefore) + require.Less(t, countBefore, int64(maxTableRows)) + + fmt.Printf("# count rows in table (before): %d\n", countBefore) + } + var countAfter int64 + { + // Validate after table is populated + rs := onlineddl.VtgateExecQuery(t, &vtParams, selectCountFromTableAfter, "") + row := rs.Named().Row() + require.NotNil(t, row) + + countAfter = row.AsInt64("c", 0) + require.NotZero(t, countAfter) + require.Less(t, countAfter, int64(maxTableRows)) + + fmt.Printf("# count rows in table (after): %d\n", countAfter) + } + { + rs := onlineddl.VtgateExecQuery(t, &vtParams, selectMaxOpOrderFromTableBefore, "") + row := rs.Named().Row() + require.NotNil(t, row) + + maxOpOrder := row.AsInt64("m", 0) + fmt.Printf("# max op_order in table (before): %d\n", maxOpOrder) + } + { + rs := onlineddl.VtgateExecQuery(t, &vtParams, selectMaxOpOrderFromTableAfter, "") + row := rs.Named().Row() + require.NotNil(t, row) + + maxOpOrder := row.AsInt64("m", 0) + fmt.Printf("# max op_order in table (after): %d\n", maxOpOrder) + } + + testSelectTableMetricsWithStatement(t, selectCountRowsFromAfterTableStatement) + + { + selectBeforeFile := onlineddl.CreateTempScript(t, selectBeforeTable) + defer os.Remove(selectBeforeFile) + beforeOutput := onlineddl.MysqlClientExecFile(t, mysqlParams(), os.TempDir(), "", selectBeforeFile) + beforeOutput = strings.TrimSpace(beforeOutput) + require.NotEmpty(t, beforeOutput) + assert.Equal(t, countBefore, int64(len(strings.Split(beforeOutput, "\n")))) + + selectAfterFile := onlineddl.CreateTempScript(t, selectAfterTable) + defer os.Remove(selectAfterFile) + afterOutput := onlineddl.MysqlClientExecFile(t, mysqlParams(), os.TempDir(), "", selectAfterFile) + afterOutput = strings.TrimSpace(afterOutput) + require.NotEmpty(t, afterOutput) + assert.Equal(t, countAfter, int64(len(strings.Split(afterOutput, "\n")))) + + require.Equal(t, beforeOutput, afterOutput, "results mismatch: (%s) and (%s)", selectBeforeTable, selectAfterTable) + } } diff --git a/go/test/endtoend/onlineddl/vrepl_suite/onlineddl_vrepl_suite_test.go b/go/test/endtoend/onlineddl/vrepl_suite/onlineddl_vrepl_suite_test.go new file mode 100644 index 00000000000..a170be6bf4d --- /dev/null +++ b/go/test/endtoend/onlineddl/vrepl_suite/onlineddl_vrepl_suite_test.go @@ -0,0 +1,413 @@ +/* +Copyright 2021 The Vitess Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package vreplsuite + +import ( + "context" + "flag" + "fmt" + "io/ioutil" + "os" + "os/exec" + "path" + "path/filepath" + "regexp" + "strings" + "testing" + "time" + + "vitess.io/vitess/go/mysql" + "vitess.io/vitess/go/sqltypes" + "vitess.io/vitess/go/vt/schema" + "vitess.io/vitess/go/vt/sqlparser" + + "vitess.io/vitess/go/test/endtoend/cluster" + "vitess.io/vitess/go/test/endtoend/onlineddl" + + "github.com/stretchr/testify/assert" + "github.com/stretchr/testify/require" +) + +var ( + clusterInstance *cluster.LocalProcessCluster + vtParams mysql.ConnParams + evaluatedMysqlParams *mysql.ConnParams + ddlStrategy = "online -skip-topo -vreplication-test-suite" + waitForMigrationTimeout = 20 * time.Second + + hostname = "localhost" + keyspaceName = "ks" + cell = "zone1" + schemaChangeDirectory = "" + tableName = `onlineddl_test` + beforeTableName = `onlineddl_test_before` + afterTableName = `onlineddl_test_after` + eventName = `onlineddl_test` +) + +const ( + testDataPath = "testdata" + defaultSQLMode = "ONLY_FULL_GROUP_BY,STRICT_TRANS_TABLES,NO_ZERO_IN_DATE,NO_ZERO_DATE,ERROR_FOR_DIVISION_BY_ZERO,NO_ENGINE_SUBSTITUTION" +) + +func TestMain(m *testing.M) { + defer cluster.PanicHandler(nil) + flag.Parse() + + exitcode, err := func() (int, error) { + clusterInstance = cluster.NewCluster(cell, hostname) + schemaChangeDirectory = path.Join("/tmp", fmt.Sprintf("schema_change_dir_%d", clusterInstance.GetAndReserveTabletUID())) + defer os.RemoveAll(schemaChangeDirectory) + defer clusterInstance.Teardown() + + if _, err := os.Stat(schemaChangeDirectory); os.IsNotExist(err) { + _ = os.Mkdir(schemaChangeDirectory, 0700) + } + + clusterInstance.VtctldExtraArgs = []string{ + "-schema_change_dir", schemaChangeDirectory, + "-schema_change_controller", "local", + "-schema_change_check_interval", "1", + "-online_ddl_check_interval", "2s", + } + + clusterInstance.VtTabletExtraArgs = []string{ + "-enable-lag-throttler", + "-throttle_threshold", "1s", + "-heartbeat_enable", + "-heartbeat_interval", "250ms", + "-migration_check_interval", "5s", + } + + if err := clusterInstance.StartTopo(); err != nil { + return 1, err + } + + // Start keyspace + keyspace := &cluster.Keyspace{ + Name: keyspaceName, + } + + // No need for replicas in this stress test + if err := clusterInstance.StartKeyspace(*keyspace, []string{"1"}, 0, false); err != nil { + return 1, err + } + + vtgateInstance := clusterInstance.NewVtgateInstance() + // set the gateway we want to use + vtgateInstance.GatewayImplementation = "tabletgateway" + // Start vtgate + if err := vtgateInstance.Setup(); err != nil { + return 1, err + } + // ensure it is torn down during cluster TearDown + clusterInstance.VtgateProcess = *vtgateInstance + vtParams = mysql.ConnParams{ + Host: clusterInstance.Hostname, + Port: clusterInstance.VtgateMySQLPort, + } + + return m.Run(), nil + }() + if err != nil { + fmt.Printf("%v\n", err) + os.Exit(1) + } else { + os.Exit(exitcode) + } + +} + +func TestSchemaChange(t *testing.T) { + defer cluster.PanicHandler(t) + + shards := clusterInstance.Keyspaces[0].Shards + require.Equal(t, 1, len(shards)) + + files, err := ioutil.ReadDir(testDataPath) + require.NoError(t, err) + for _, f := range files { + if !f.IsDir() { + continue + } + // this is a test! + t.Run(f.Name(), func(t *testing.T) { + testSingle(t, f.Name()) + }) + } +} + +func readTestFile(t *testing.T, testName string, fileName string) (content string, exists bool) { + filePath := path.Join(testDataPath, testName, fileName) + _, err := os.Stat(filePath) + if os.IsNotExist(err) { + return "", false + } + require.NoError(t, err) + b, err := ioutil.ReadFile(filePath) + require.NoError(t, err) + return strings.TrimSpace(string(b)), true +} + +// testSingle is the main testing function for a single test in the suite. +// It prepares the grounds, creates the test data, runs a migration, expects results/error, cleans up. +func testSingle(t *testing.T, testName string) { + if ignoreVersions, exists := readTestFile(t, testName, "ignore_versions"); exists { + // ignoreVersions is a regexp + re, err := regexp.Compile(ignoreVersions) + require.NoError(t, err) + + rs := mysqlExec(t, "select @@version as ver", "") + row := rs.Named().Row() + require.NotNil(t, row) + mysqlVersion := row["ver"].ToString() + + if re.MatchString(mysqlVersion) { + t.Skipf("Skipping test due to ignore_versions=%s", ignoreVersions) + return + } + } + + sqlMode := defaultSQLMode + if overrideSQLMode, exists := readTestFile(t, testName, "sql_mode"); exists { + sqlMode = overrideSQLMode + } + sqlModeQuery := fmt.Sprintf("set @@global.sql_mode='%s'", sqlMode) + _ = mysqlExec(t, sqlModeQuery, "") + _ = mysqlExec(t, "set @@global.event_scheduler=1", "") + + _ = mysqlExec(t, fmt.Sprintf("drop table if exists %s_child, %s, %s_parent, %s, %s;", tableName, tableName, tableName, beforeTableName, afterTableName), "") + _ = mysqlExec(t, fmt.Sprintf("drop event if exists %s", eventName), "") + + { + // create + f := "create.sql" + _, exists := readTestFile(t, testName, f) + require.True(t, exists) + mysqlClientExecFile(t, testName, f) + // ensure test table has been created: + getCreateTableStatement(t, tableName) + } + defer func() { + // destroy + f := "destroy.sql" + if _, exists := readTestFile(t, testName, f); exists { + mysqlClientExecFile(t, testName, f) + } + }() + + var expectQueryFailure string + if content, exists := readTestFile(t, testName, "expect_query_failure"); exists { + // VTGate failure is expected! + expectQueryFailure = content + } + + var migrationMessage string + var migrationStatus string + // Run test + alterClause := "engine=innodb" + if content, exists := readTestFile(t, testName, "alter"); exists { + alterClause = content + } + alterStatement := fmt.Sprintf("alter table %s %s", tableName, alterClause) + // Run the DDL! + uuid := testOnlineDDLStatement(t, alterStatement, ddlStrategy, expectQueryFailure) + + if expectQueryFailure != "" { + // Nothing further to do. Migration isn't actually running + return + } + assert.NotEmpty(t, uuid) + + defer func() { + query, err := sqlparser.ParseAndBind("alter vitess_migration %a cancel", + sqltypes.StringBindVariable(uuid), + ) + require.NoError(t, err) + onlineddl.VtgateExecQuery(t, &vtParams, query, "") + }() + row := waitForMigration(t, uuid, waitForMigrationTimeout) + // migration is complete + { + migrationStatus = row["migration_status"].ToString() + migrationMessage = row["message"].ToString() + } + + if expectedErrorMessage, exists := readTestFile(t, testName, "expect_failure"); exists { + // Failure is expected! + assert.Equal(t, migrationStatus, string(schema.OnlineDDLStatusFailed)) + require.Contains(t, migrationMessage, expectedErrorMessage, "expected error message (%s) to contain (%s)", migrationMessage, expectedErrorMessage) + // no need to proceed to checksum or anything further + return + } + // We do not expect failure. + require.Equal(t, string(schema.OnlineDDLStatusComplete), migrationStatus) + + if content, exists := readTestFile(t, testName, "expect_table_structure"); exists { + createStatement := getCreateTableStatement(t, afterTableName) + assert.Contains(t, createStatement, content, "expected SHOW CREATE TABLE to contain text in 'expect_table_structure' file") + } + + { + // checksum + beforeColumns := "*" + if content, exists := readTestFile(t, testName, "before_columns"); exists { + beforeColumns = content + } + afterColumns := "*" + if content, exists := readTestFile(t, testName, "after_columns"); exists { + afterColumns = content + } + orderBy := "" + if content, exists := readTestFile(t, testName, "order_by"); exists { + orderBy = fmt.Sprintf("order by %s", content) + } + selectBefore := fmt.Sprintf("select %s from %s %s", beforeColumns, beforeTableName, orderBy) + selectAfter := fmt.Sprintf("select %s from %s %s", afterColumns, afterTableName, orderBy) + + selectBeforeFile := createTempScript(t, selectBefore) + defer os.Remove(selectBeforeFile) + beforeOutput := mysqlClientExecFile(t, "", selectBeforeFile) + + selectAfterFile := createTempScript(t, selectAfter) + defer os.Remove(selectAfterFile) + afterOutput := mysqlClientExecFile(t, "", selectAfterFile) + + require.Equal(t, beforeOutput, afterOutput, "results mismatch: (%s) and (%s)", selectBefore, selectAfter) + } +} + +// testOnlineDDLStatement runs an online DDL, ALTER statement +func testOnlineDDLStatement(t *testing.T, alterStatement string, ddlStrategy string, expectError string) (uuid string) { + qr := onlineddl.VtgateExecDDL(t, &vtParams, ddlStrategy, alterStatement, expectError) + if qr != nil { + row := qr.Named().Row() + require.NotNil(t, row) + uuid = row.AsString("uuid", "") + } + uuid = strings.TrimSpace(uuid) + return uuid +} + +func readMigration(t *testing.T, uuid string) sqltypes.RowNamedValues { + rs := onlineddl.ReadMigrations(t, &vtParams, uuid) + require.NotNil(t, rs) + row := rs.Named().Row() + require.NotNil(t, row) + return row +} + +func waitForMigration(t *testing.T, uuid string, timeout time.Duration) sqltypes.RowNamedValues { + var status string + sleepDuration := time.Second + for timeout > 0 { + row := readMigration(t, uuid) + status = row["migration_status"].ToString() + switch status { + case string(schema.OnlineDDLStatusComplete), string(schema.OnlineDDLStatusFailed): + // migration is complete, either successful or not + return row + } + time.Sleep(sleepDuration) + timeout = timeout - sleepDuration + } + require.NoError(t, fmt.Errorf("timeout in waitForMigration(%s). status is: %s", uuid, status)) + return nil +} + +func getTablet() *cluster.Vttablet { + return clusterInstance.Keyspaces[0].Shards[0].Vttablets[0] +} + +func mysqlParams() *mysql.ConnParams { + if evaluatedMysqlParams != nil { + return evaluatedMysqlParams + } + evaluatedMysqlParams = &mysql.ConnParams{ + Uname: "vt_dba", + UnixSocket: path.Join(os.Getenv("VTDATAROOT"), fmt.Sprintf("/vt_%010d", getTablet().TabletUID), "/mysql.sock"), + DbName: fmt.Sprintf("vt_%s", keyspaceName), + } + return evaluatedMysqlParams +} + +// VtgateExecDDL executes a DDL query with given strategy +func mysqlExec(t *testing.T, sql string, expectError string) *sqltypes.Result { + t.Helper() + + ctx := context.Background() + conn, err := mysql.Connect(ctx, mysqlParams()) + require.Nil(t, err) + defer conn.Close() + + qr, err := conn.ExecuteFetch(sql, 100000, true) + if expectError == "" { + require.NoError(t, err) + } else { + require.Error(t, err, "error should not be nil") + require.Contains(t, err.Error(), expectError, "Unexpected error") + } + return qr +} + +// mysqlClientExecFile runs a file through the mysql client +func mysqlClientExecFile(t *testing.T, testName string, fileName string) (output string) { + t.Helper() + + bashPath, err := exec.LookPath("bash") + require.NoError(t, err) + mysqlPath, err := exec.LookPath("mysql") + require.NoError(t, err) + + filePath := fileName + if !filepath.IsAbs(fileName) { + filePath, _ = filepath.Abs(path.Join(testDataPath, testName, fileName)) + } + params := mysqlParams() + bashCommand := fmt.Sprintf(`%s -u%s --socket=%s --database=%s -s -s < %s 2> /tmp/error.log`, mysqlPath, params.Uname, params.UnixSocket, params.DbName, filePath) + cmd, err := exec.Command( + bashPath, + "-c", + bashCommand, + ).Output() + + require.NoError(t, err) + return string(cmd) +} + +// getCreateTableStatement returns the CREATE TABLE statement for a given table +func getCreateTableStatement(t *testing.T, tableName string) (statement string) { + queryResult, err := getTablet().VttabletProcess.QueryTablet(fmt.Sprintf("show create table %s", tableName), keyspaceName, true) + require.Nil(t, err) + + assert.Equal(t, len(queryResult.Rows), 1) + assert.Equal(t, len(queryResult.Rows[0]), 2) // table name, create statement + statement = queryResult.Rows[0][1].ToString() + return statement +} + +func createTempScript(t *testing.T, content string) (fileName string) { + f, err := ioutil.TempFile("", "vrepl-suite-") + require.NoError(t, err) + + _, err = f.WriteString(content) + require.NoError(t, err) + err = f.Close() + require.NoError(t, err) + + return f.Name() +} diff --git a/go/test/endtoend/onlineddl/vrepl_suite/testdata/alter-charset-non-utf8-80/alter b/go/test/endtoend/onlineddl/vrepl_suite/testdata/alter-charset-non-utf8-80/alter new file mode 100644 index 00000000000..b5ec82b1a8b --- /dev/null +++ b/go/test/endtoend/onlineddl/vrepl_suite/testdata/alter-charset-non-utf8-80/alter @@ -0,0 +1 @@ +MODIFY `t1` varchar(128) CHARACTER SET utf8mb4 NOT NULL, MODIFY `t2` varchar(128) CHARACTER SET latin2 NOT NULL, MODIFY `tutf8` varchar(128) CHARACTER SET latin1 NOT NULL diff --git a/go/test/endtoend/onlineddl/vrepl_suite/testdata/alter-charset-non-utf8-80/create.sql b/go/test/endtoend/onlineddl/vrepl_suite/testdata/alter-charset-non-utf8-80/create.sql new file mode 100644 index 00000000000..26bae0cc966 --- /dev/null +++ b/go/test/endtoend/onlineddl/vrepl_suite/testdata/alter-charset-non-utf8-80/create.sql @@ -0,0 +1,30 @@ +drop table if exists onlineddl_test; +create table onlineddl_test ( + id int auto_increment, + t1 varchar(128) charset latin1 collate latin1_swedish_ci, + t2 varchar(128) charset latin1 collate latin1_swedish_ci, + tutf8 varchar(128) charset utf8, + tutf8mb4 varchar(128) charset utf8mb4, + tlatin1 varchar(128) charset latin1 collate latin1_swedish_ci, + primary key(id) +) auto_increment=1; + +insert into onlineddl_test values (null, md5(rand()), md5(rand()), md5(rand()), md5(rand()), md5(rand())); +insert into onlineddl_test values (null, 'átesting', 'átesting', 'átesting', 'átesting', 'átesting'); +insert into onlineddl_test values (null, 'testátest', 'testátest', 'testátest', '🍻😀', 'átesting'); + +drop event if exists onlineddl_test; +delimiter ;; +create event onlineddl_test + on schedule every 1 second + starts current_timestamp + ends current_timestamp + interval 60 second + on completion not preserve + enable + do +begin + insert into onlineddl_test values (null, md5(rand()), md5(rand()), md5(rand()), md5(rand()), md5(rand())); + insert into onlineddl_test values (null, 'átesting-binlog', 'átesting-binlog', 'átesting-binlog', 'átesting-binlog', 'átesting-binlog'); + insert into onlineddl_test values (null, 'testátest-binlog', 'testátest-binlog', 'testátest-binlog', '🍻😀', 'átesting-binlog'); + insert into onlineddl_test values (null, 'átesting-bnull', 'átesting-bnull', 'átesting-bnull', null, null); +end ;; diff --git a/go/test/endtoend/onlineddl/vrepl_suite/testdata/alter-charset-non-utf8-80/ignore_versions b/go/test/endtoend/onlineddl/vrepl_suite/testdata/alter-charset-non-utf8-80/ignore_versions new file mode 100644 index 00000000000..0790a1e68fd --- /dev/null +++ b/go/test/endtoend/onlineddl/vrepl_suite/testdata/alter-charset-non-utf8-80/ignore_versions @@ -0,0 +1 @@ +(5.5|5.6|5.7) diff --git a/go/test/endtoend/onlineddl/vrepl_suite/testdata/alter-charset-non-utf8/alter b/go/test/endtoend/onlineddl/vrepl_suite/testdata/alter-charset-non-utf8/alter new file mode 100644 index 00000000000..b5ec82b1a8b --- /dev/null +++ b/go/test/endtoend/onlineddl/vrepl_suite/testdata/alter-charset-non-utf8/alter @@ -0,0 +1 @@ +MODIFY `t1` varchar(128) CHARACTER SET utf8mb4 NOT NULL, MODIFY `t2` varchar(128) CHARACTER SET latin2 NOT NULL, MODIFY `tutf8` varchar(128) CHARACTER SET latin1 NOT NULL diff --git a/go/test/endtoend/onlineddl/vrepl_suite/testdata/alter-charset-non-utf8/create.sql b/go/test/endtoend/onlineddl/vrepl_suite/testdata/alter-charset-non-utf8/create.sql new file mode 100644 index 00000000000..72df20e51ad --- /dev/null +++ b/go/test/endtoend/onlineddl/vrepl_suite/testdata/alter-charset-non-utf8/create.sql @@ -0,0 +1,28 @@ +drop table if exists onlineddl_test; +create table onlineddl_test ( + id int auto_increment, + t1 varchar(128) charset latin1 collate latin1_swedish_ci, + t2 varchar(128) charset latin1 collate latin1_swedish_ci, + tutf8 varchar(128) charset utf8, + tutf8mb4 varchar(128) charset utf8mb4, + tlatin1 varchar(128) charset latin1 collate latin1_swedish_ci, + primary key(id) +) auto_increment=1; + +insert into onlineddl_test values (null, md5(rand()), md5(rand()), md5(rand()), md5(rand()), md5(rand())); +insert into onlineddl_test values (null, 'átesting', 'átesting', 'átesting', 'átesting', 'átesting'); + +drop event if exists onlineddl_test; +delimiter ;; +create event onlineddl_test + on schedule every 1 second + starts current_timestamp + ends current_timestamp + interval 60 second + on completion not preserve + enable + do +begin + insert into onlineddl_test values (null, md5(rand()), md5(rand()), md5(rand()), md5(rand()), md5(rand())); + insert into onlineddl_test values (null, 'átesting-binlog', 'átesting-binlog', 'átesting-binlog', 'átesting-binlog', 'átesting-binlog'); + insert into onlineddl_test values (null, 'átesting-bnull', 'átesting-bnull', 'átesting-bnull', null, null); +end ;; diff --git a/go/test/endtoend/onlineddl/vrepl_suite/testdata/alter-charset-non-utf8/ignore_versions b/go/test/endtoend/onlineddl/vrepl_suite/testdata/alter-charset-non-utf8/ignore_versions new file mode 100644 index 00000000000..b6de5f8d9f5 --- /dev/null +++ b/go/test/endtoend/onlineddl/vrepl_suite/testdata/alter-charset-non-utf8/ignore_versions @@ -0,0 +1 @@ +(5.5|5.6) diff --git a/go/test/endtoend/onlineddl/vrepl_suite/testdata/autoinc-copy-deletes-user-defined/alter b/go/test/endtoend/onlineddl/vrepl_suite/testdata/autoinc-copy-deletes-user-defined/alter new file mode 100644 index 00000000000..5e180af3b7b --- /dev/null +++ b/go/test/endtoend/onlineddl/vrepl_suite/testdata/autoinc-copy-deletes-user-defined/alter @@ -0,0 +1 @@ +AUTO_INCREMENT=7 diff --git a/go/test/endtoend/onlineddl/vrepl_suite/testdata/autoinc-copy-deletes-user-defined/create.sql b/go/test/endtoend/onlineddl/vrepl_suite/testdata/autoinc-copy-deletes-user-defined/create.sql new file mode 100644 index 00000000000..0a150c3f040 --- /dev/null +++ b/go/test/endtoend/onlineddl/vrepl_suite/testdata/autoinc-copy-deletes-user-defined/create.sql @@ -0,0 +1,17 @@ +drop event if exists onlineddl_test; + +drop table if exists onlineddl_test; +create table onlineddl_test ( + id int auto_increment, + i int not null, + primary key(id) +) auto_increment=1; + +insert into onlineddl_test values (NULL, 11); +insert into onlineddl_test values (NULL, 13); +insert into onlineddl_test values (NULL, 17); +insert into onlineddl_test values (NULL, 23); +insert into onlineddl_test values (NULL, 29); +insert into onlineddl_test values (NULL, 31); +insert into onlineddl_test values (NULL, 37); +delete from onlineddl_test where id>=5; diff --git a/go/test/endtoend/onlineddl/vrepl_suite/testdata/autoinc-copy-deletes-user-defined/expect_table_structure b/go/test/endtoend/onlineddl/vrepl_suite/testdata/autoinc-copy-deletes-user-defined/expect_table_structure new file mode 100644 index 00000000000..5e180af3b7b --- /dev/null +++ b/go/test/endtoend/onlineddl/vrepl_suite/testdata/autoinc-copy-deletes-user-defined/expect_table_structure @@ -0,0 +1 @@ +AUTO_INCREMENT=7 diff --git a/go/test/endtoend/onlineddl/vrepl_suite/testdata/autoinc-copy-deletes/create.sql b/go/test/endtoend/onlineddl/vrepl_suite/testdata/autoinc-copy-deletes/create.sql new file mode 100644 index 00000000000..0a150c3f040 --- /dev/null +++ b/go/test/endtoend/onlineddl/vrepl_suite/testdata/autoinc-copy-deletes/create.sql @@ -0,0 +1,17 @@ +drop event if exists onlineddl_test; + +drop table if exists onlineddl_test; +create table onlineddl_test ( + id int auto_increment, + i int not null, + primary key(id) +) auto_increment=1; + +insert into onlineddl_test values (NULL, 11); +insert into onlineddl_test values (NULL, 13); +insert into onlineddl_test values (NULL, 17); +insert into onlineddl_test values (NULL, 23); +insert into onlineddl_test values (NULL, 29); +insert into onlineddl_test values (NULL, 31); +insert into onlineddl_test values (NULL, 37); +delete from onlineddl_test where id>=5; diff --git a/go/test/endtoend/onlineddl/vrepl_suite/testdata/autoinc-copy-deletes/expect_table_structure b/go/test/endtoend/onlineddl/vrepl_suite/testdata/autoinc-copy-deletes/expect_table_structure new file mode 100644 index 00000000000..5a755ffb8b5 --- /dev/null +++ b/go/test/endtoend/onlineddl/vrepl_suite/testdata/autoinc-copy-deletes/expect_table_structure @@ -0,0 +1 @@ +AUTO_INCREMENT=8 diff --git a/go/test/endtoend/onlineddl/vrepl_suite/testdata/autoinc-copy-simple/create.sql b/go/test/endtoend/onlineddl/vrepl_suite/testdata/autoinc-copy-simple/create.sql new file mode 100644 index 00000000000..b2680e8ff4d --- /dev/null +++ b/go/test/endtoend/onlineddl/vrepl_suite/testdata/autoinc-copy-simple/create.sql @@ -0,0 +1,13 @@ +drop event if exists onlineddl_test; + +drop table if exists onlineddl_test; +create table onlineddl_test ( + id int auto_increment, + i int not null, + primary key(id) +) auto_increment=1; + +insert into onlineddl_test values (NULL, 11); +insert into onlineddl_test values (NULL, 13); +insert into onlineddl_test values (NULL, 17); +insert into onlineddl_test values (NULL, 23); diff --git a/go/test/endtoend/onlineddl/vrepl_suite/testdata/autoinc-copy-simple/expect_table_structure b/go/test/endtoend/onlineddl/vrepl_suite/testdata/autoinc-copy-simple/expect_table_structure new file mode 100644 index 00000000000..3ed59021bb1 --- /dev/null +++ b/go/test/endtoend/onlineddl/vrepl_suite/testdata/autoinc-copy-simple/expect_table_structure @@ -0,0 +1 @@ +AUTO_INCREMENT=5 diff --git a/go/test/endtoend/onlineddl/vrepl_suite/testdata/autoinc-zero-value/create.sql b/go/test/endtoend/onlineddl/vrepl_suite/testdata/autoinc-zero-value/create.sql new file mode 100644 index 00000000000..8f712962b53 --- /dev/null +++ b/go/test/endtoend/onlineddl/vrepl_suite/testdata/autoinc-zero-value/create.sql @@ -0,0 +1,9 @@ +drop table if exists onlineddl_test; +create table onlineddl_test ( + id int auto_increment, + i int not null, + primary key(id) +) auto_increment=1; + +set session sql_mode='NO_AUTO_VALUE_ON_ZERO'; +insert into onlineddl_test values (0, 23); diff --git a/go/test/endtoend/onlineddl/vrepl_suite/testdata/bigint-change-nullable/alter b/go/test/endtoend/onlineddl/vrepl_suite/testdata/bigint-change-nullable/alter new file mode 100644 index 00000000000..6b139a49f66 --- /dev/null +++ b/go/test/endtoend/onlineddl/vrepl_suite/testdata/bigint-change-nullable/alter @@ -0,0 +1 @@ +change val val bigint diff --git a/go/test/endtoend/onlineddl/vrepl_suite/testdata/bigint-change-nullable/create.sql b/go/test/endtoend/onlineddl/vrepl_suite/testdata/bigint-change-nullable/create.sql new file mode 100644 index 00000000000..7814a54b3e8 --- /dev/null +++ b/go/test/endtoend/onlineddl/vrepl_suite/testdata/bigint-change-nullable/create.sql @@ -0,0 +1,21 @@ +drop table if exists onlineddl_test; +create table onlineddl_test ( + id bigint auto_increment, + val bigint not null, + primary key(id) +) auto_increment=1; + +drop event if exists onlineddl_test; +delimiter ;; +create event onlineddl_test + on schedule every 1 second + starts current_timestamp + ends current_timestamp + interval 60 second + on completion not preserve + enable + do +begin + insert into onlineddl_test values (null, 18446744073709551615); + insert into onlineddl_test values (null, 18446744073709551614); + insert into onlineddl_test values (null, 18446744073709551613); +end ;; diff --git a/go/test/endtoend/onlineddl/vrepl_suite/testdata/bit-add/after_columns b/go/test/endtoend/onlineddl/vrepl_suite/testdata/bit-add/after_columns new file mode 100644 index 00000000000..b464f06ca85 --- /dev/null +++ b/go/test/endtoend/onlineddl/vrepl_suite/testdata/bit-add/after_columns @@ -0,0 +1 @@ +id, i diff --git a/go/test/endtoend/onlineddl/vrepl_suite/testdata/bit-add/alter b/go/test/endtoend/onlineddl/vrepl_suite/testdata/bit-add/alter new file mode 100644 index 00000000000..d96488cd221 --- /dev/null +++ b/go/test/endtoend/onlineddl/vrepl_suite/testdata/bit-add/alter @@ -0,0 +1 @@ +add column is_good bit null default 0 diff --git a/go/test/endtoend/onlineddl/vrepl_suite/testdata/bit-add/before_columns b/go/test/endtoend/onlineddl/vrepl_suite/testdata/bit-add/before_columns new file mode 100644 index 00000000000..b464f06ca85 --- /dev/null +++ b/go/test/endtoend/onlineddl/vrepl_suite/testdata/bit-add/before_columns @@ -0,0 +1 @@ +id, i diff --git a/go/test/endtoend/onlineddl/vrepl_suite/testdata/bit-add/create.sql b/go/test/endtoend/onlineddl/vrepl_suite/testdata/bit-add/create.sql new file mode 100644 index 00000000000..13e2ec218cd --- /dev/null +++ b/go/test/endtoend/onlineddl/vrepl_suite/testdata/bit-add/create.sql @@ -0,0 +1,20 @@ +drop table if exists onlineddl_test; +create table onlineddl_test ( + id int auto_increment, + i int not null, + primary key(id) +) auto_increment=1; + +drop event if exists onlineddl_test; +delimiter ;; +create event onlineddl_test + on schedule every 1 second + starts current_timestamp + ends current_timestamp + interval 60 second + on completion not preserve + enable + do +begin + insert into onlineddl_test values (null, 11); + insert into onlineddl_test values (null, 13); +end ;; diff --git a/go/test/endtoend/onlineddl/vrepl_suite/testdata/bit-dml/alter b/go/test/endtoend/onlineddl/vrepl_suite/testdata/bit-dml/alter new file mode 100644 index 00000000000..1cd11573e84 --- /dev/null +++ b/go/test/endtoend/onlineddl/vrepl_suite/testdata/bit-dml/alter @@ -0,0 +1 @@ +modify column is_good bit not null default 0 diff --git a/go/test/endtoend/onlineddl/vrepl_suite/testdata/bit-dml/create.sql b/go/test/endtoend/onlineddl/vrepl_suite/testdata/bit-dml/create.sql new file mode 100644 index 00000000000..88f68166711 --- /dev/null +++ b/go/test/endtoend/onlineddl/vrepl_suite/testdata/bit-dml/create.sql @@ -0,0 +1,24 @@ +drop table if exists onlineddl_test; +create table onlineddl_test ( + id int auto_increment, + i int not null, + is_good bit null default 0, + primary key(id) +) auto_increment=1; + +drop event if exists onlineddl_test; +delimiter ;; +create event onlineddl_test + on schedule every 1 second + starts current_timestamp + ends current_timestamp + interval 60 second + on completion not preserve + enable + do +begin + insert into onlineddl_test values (null, 11, 0); + insert into onlineddl_test values (null, 13, 1); + insert into onlineddl_test values (null, 17, 1); + + update onlineddl_test set is_good=0 where i=13 order by id desc limit 1; +end ;; diff --git a/go/test/endtoend/onlineddl/vrepl_suite/testdata/char-collate-binary/alter b/go/test/endtoend/onlineddl/vrepl_suite/testdata/char-collate-binary/alter new file mode 100644 index 00000000000..0768d2bb537 --- /dev/null +++ b/go/test/endtoend/onlineddl/vrepl_suite/testdata/char-collate-binary/alter @@ -0,0 +1 @@ +change id id bigint diff --git a/go/test/endtoend/onlineddl/vrepl_suite/testdata/char-collate-binary/create.sql b/go/test/endtoend/onlineddl/vrepl_suite/testdata/char-collate-binary/create.sql new file mode 100644 index 00000000000..3120e0cfd49 --- /dev/null +++ b/go/test/endtoend/onlineddl/vrepl_suite/testdata/char-collate-binary/create.sql @@ -0,0 +1,24 @@ +drop table if exists onlineddl_test; +create table onlineddl_test ( + id bigint auto_increment, + country_code char(3) collate utf8mb4_bin, + primary key(id) +) auto_increment=1; + +insert into onlineddl_test values (null, 'ABC'); +insert into onlineddl_test values (null, 'DEF'); +insert into onlineddl_test values (null, 'GHI'); + +drop event if exists onlineddl_test; +delimiter ;; +create event onlineddl_test + on schedule every 1 second + starts current_timestamp + ends current_timestamp + interval 60 second + on completion not preserve + enable + do +begin + insert into onlineddl_test values (null, 'jkl'); + insert into onlineddl_test values (null, 'MNO'); +end ;; diff --git a/go/test/endtoend/onlineddl/vrepl_suite/testdata/convert-utf8mb4/alter b/go/test/endtoend/onlineddl/vrepl_suite/testdata/convert-utf8mb4/alter new file mode 100644 index 00000000000..21fa6e1e755 --- /dev/null +++ b/go/test/endtoend/onlineddl/vrepl_suite/testdata/convert-utf8mb4/alter @@ -0,0 +1 @@ +convert to character set utf8mb4 diff --git a/go/test/endtoend/onlineddl/vrepl_suite/testdata/convert-utf8mb4/create.sql b/go/test/endtoend/onlineddl/vrepl_suite/testdata/convert-utf8mb4/create.sql new file mode 100644 index 00000000000..54963d7ff17 --- /dev/null +++ b/go/test/endtoend/onlineddl/vrepl_suite/testdata/convert-utf8mb4/create.sql @@ -0,0 +1,28 @@ +drop table if exists onlineddl_test; +create table onlineddl_test ( + id int auto_increment, + t varchar(128) charset utf8 collate utf8_general_ci, + tl varchar(128) charset latin1 not null, + ta varchar(128) charset ascii not null, + primary key(id) +) auto_increment=1; + +insert into onlineddl_test values (null, 'Hello world, Καλημέρα κόσμε, コンニチハ', 'átesting0', 'initial'); + +drop event if exists onlineddl_test; +delimiter ;; +create event onlineddl_test + on schedule every 1 second + starts current_timestamp + ends current_timestamp + interval 60 second + on completion not preserve + enable + do +begin + insert into onlineddl_test values (null, md5(rand()), 'átesting-a', 'a'); + insert into onlineddl_test values (null, 'novo proprietário', 'átesting-b', 'b'); + insert into onlineddl_test values (null, '2H₂ + O₂ ⇌ 2H₂O, R = 4.7 kΩ, ⌀ 200 mm', 'átesting-c', 'c'); + insert into onlineddl_test values (null, 'usuário', 'átesting-x', 'x'); + + delete from onlineddl_test where ta='x' order by id desc limit 1; +end ;; diff --git a/go/test/endtoend/onlineddl/vrepl_suite/testdata/datetime-1970/after_columns b/go/test/endtoend/onlineddl/vrepl_suite/testdata/datetime-1970/after_columns new file mode 100644 index 00000000000..581038fb5a3 --- /dev/null +++ b/go/test/endtoend/onlineddl/vrepl_suite/testdata/datetime-1970/after_columns @@ -0,0 +1 @@ +id, create_time, update_time, counter diff --git a/go/test/endtoend/onlineddl/vrepl_suite/testdata/datetime-1970/alter b/go/test/endtoend/onlineddl/vrepl_suite/testdata/datetime-1970/alter new file mode 100644 index 00000000000..f4e4a9f2c06 --- /dev/null +++ b/go/test/endtoend/onlineddl/vrepl_suite/testdata/datetime-1970/alter @@ -0,0 +1 @@ +add column name varchar(1) diff --git a/go/test/endtoend/onlineddl/vrepl_suite/testdata/datetime-1970/before_columns b/go/test/endtoend/onlineddl/vrepl_suite/testdata/datetime-1970/before_columns new file mode 100644 index 00000000000..581038fb5a3 --- /dev/null +++ b/go/test/endtoend/onlineddl/vrepl_suite/testdata/datetime-1970/before_columns @@ -0,0 +1 @@ +id, create_time, update_time, counter diff --git a/go/test/endtoend/onlineddl/vrepl_suite/testdata/datetime-1970/create.sql b/go/test/endtoend/onlineddl/vrepl_suite/testdata/datetime-1970/create.sql new file mode 100644 index 00000000000..fe92644781f --- /dev/null +++ b/go/test/endtoend/onlineddl/vrepl_suite/testdata/datetime-1970/create.sql @@ -0,0 +1,27 @@ +set session time_zone='+00:00'; + +drop table if exists onlineddl_test; +create table onlineddl_test ( + id int auto_increment, + create_time timestamp NULL DEFAULT '0000-00-00 00:00:00', + update_time timestamp NULL DEFAULT CURRENT_TIMESTAMP ON UPDATE CURRENT_TIMESTAMP, + counter int(10) unsigned DEFAULT NULL, + primary key(id) +) auto_increment=1; + +set session time_zone='+00:00'; +insert into onlineddl_test values (1, '0000-00-00 00:00:00', now(), 0); + +drop event if exists onlineddl_test; +delimiter ;; +create event onlineddl_test + on schedule every 1 second + starts current_timestamp + ends current_timestamp + interval 60 second + on completion not preserve + enable + do +begin + set session time_zone='+00:00'; + update onlineddl_test set counter = counter + 1 where id = 1; +end ;; diff --git a/go/test/endtoend/onlineddl/vrepl_suite/testdata/datetime-1970/sql_mode b/go/test/endtoend/onlineddl/vrepl_suite/testdata/datetime-1970/sql_mode new file mode 100644 index 00000000000..e69de29bb2d diff --git a/go/test/endtoend/onlineddl/vrepl_suite/testdata/datetime-submillis-zeroleading/create.sql b/go/test/endtoend/onlineddl/vrepl_suite/testdata/datetime-submillis-zeroleading/create.sql new file mode 100644 index 00000000000..7e41217856e --- /dev/null +++ b/go/test/endtoend/onlineddl/vrepl_suite/testdata/datetime-submillis-zeroleading/create.sql @@ -0,0 +1,27 @@ +drop table if exists onlineddl_test; +create table onlineddl_test ( + id int auto_increment, + i int not null, + dt0 datetime(6), + dt1 datetime(6), + ts2 timestamp(6), + updated tinyint unsigned default 0, + primary key(id), + key i_idx(i) +) auto_increment=1; + +drop event if exists onlineddl_test; +delimiter ;; +create event onlineddl_test + on schedule every 1 second + starts current_timestamp + ends current_timestamp + interval 60 second + on completion not preserve + enable + do +begin + insert into onlineddl_test values (null, 11, '2016-10-31 11:22:33.0123', now(), '2016-10-31 11:22:33.0369', 0); + update onlineddl_test set dt1='2016-10-31 11:22:33.0246', updated = 1 where i = 11 order by id desc limit 1; + + insert into onlineddl_test values (null, 13, '2016-10-31 11:22:33.0123', '2016-10-31 11:22:33.789', '2016-10-31 11:22:33.0369', 0); +end ;; diff --git a/go/test/endtoend/onlineddl/vrepl_suite/testdata/datetime-submillis-zeroleading/ignore_versions b/go/test/endtoend/onlineddl/vrepl_suite/testdata/datetime-submillis-zeroleading/ignore_versions new file mode 100644 index 00000000000..7acd3f06f32 --- /dev/null +++ b/go/test/endtoend/onlineddl/vrepl_suite/testdata/datetime-submillis-zeroleading/ignore_versions @@ -0,0 +1 @@ +(5.5) diff --git a/go/test/endtoend/onlineddl/vrepl_suite/testdata/datetime-submillis/create.sql b/go/test/endtoend/onlineddl/vrepl_suite/testdata/datetime-submillis/create.sql new file mode 100644 index 00000000000..12bef17defc --- /dev/null +++ b/go/test/endtoend/onlineddl/vrepl_suite/testdata/datetime-submillis/create.sql @@ -0,0 +1,28 @@ +drop table if exists onlineddl_test; +create table onlineddl_test ( + id int auto_increment, + i int not null, + dt0 datetime(6), + dt1 datetime(6), + ts2 timestamp(6), + updated tinyint unsigned default 0, + primary key(id), + key i_idx(i) +) auto_increment=1; + +drop event if exists onlineddl_test; +delimiter ;; +create event onlineddl_test + on schedule every 1 second + starts current_timestamp + ends current_timestamp + interval 60 second + on completion not preserve + disable on slave + do +begin + insert into onlineddl_test values (null, 11, now(), now(), now(), 0); + update onlineddl_test set dt1='2016-10-31 11:22:33.444', updated = 1 where i = 11 order by id desc limit 1; + + insert into onlineddl_test values (null, 13, now(), now(), now(), 0); + update onlineddl_test set ts1='2016-11-01 11:22:33.444', updated = 1 where i = 13 order by id desc limit 1; +end ;; diff --git a/go/test/endtoend/onlineddl/vrepl_suite/testdata/datetime-submillis/ignore_versions b/go/test/endtoend/onlineddl/vrepl_suite/testdata/datetime-submillis/ignore_versions new file mode 100644 index 00000000000..7acd3f06f32 --- /dev/null +++ b/go/test/endtoend/onlineddl/vrepl_suite/testdata/datetime-submillis/ignore_versions @@ -0,0 +1 @@ +(5.5) diff --git a/go/test/endtoend/onlineddl/vrepl_suite/testdata/datetime-to-timestamp-pk/alter b/go/test/endtoend/onlineddl/vrepl_suite/testdata/datetime-to-timestamp-pk/alter new file mode 100644 index 00000000000..c1d80af44e2 --- /dev/null +++ b/go/test/endtoend/onlineddl/vrepl_suite/testdata/datetime-to-timestamp-pk/alter @@ -0,0 +1 @@ +change column t t timestamp default current_timestamp diff --git a/go/test/endtoend/onlineddl/vrepl_suite/testdata/datetime-to-timestamp-pk/create.sql b/go/test/endtoend/onlineddl/vrepl_suite/testdata/datetime-to-timestamp-pk/create.sql new file mode 100644 index 00000000000..52a8a0cb780 --- /dev/null +++ b/go/test/endtoend/onlineddl/vrepl_suite/testdata/datetime-to-timestamp-pk/create.sql @@ -0,0 +1,31 @@ +drop table if exists onlineddl_test; +create table onlineddl_test ( + id int unsigned auto_increment, + i int not null, + ts0 timestamp default current_timestamp, + ts1 timestamp null, + dt2 datetime, + t datetime default current_timestamp, + updated tinyint unsigned default 0, + primary key(id, t), + key i_idx(i) +) auto_increment=1; + +drop event if exists onlineddl_test; +delimiter ;; +create event onlineddl_test + on schedule every 1 second + starts current_timestamp + ends current_timestamp + interval 60 second + on completion not preserve + enable + do +begin + insert into onlineddl_test values (null, 7, null, now(), now(), '2010-10-20 10:20:30', 0); + + insert into onlineddl_test values (null, 11, null, now(), now(), '2010-10-20 10:20:30', 0); + update onlineddl_test set dt2=now() + interval 1 minute, updated = 1 where i = 11 order by id desc limit 1; + + insert into onlineddl_test values (null, 13, null, now(), now(), '2010-10-20 10:20:30', 0); + update onlineddl_test set t=t + interval 1 minute, updated = 1 where i = 13 order by id desc limit 1; +end ;; diff --git a/go/test/endtoend/onlineddl/vrepl_suite/testdata/datetime-to-timestamp-pk/ignore_versions b/go/test/endtoend/onlineddl/vrepl_suite/testdata/datetime-to-timestamp-pk/ignore_versions new file mode 100644 index 00000000000..7acd3f06f32 --- /dev/null +++ b/go/test/endtoend/onlineddl/vrepl_suite/testdata/datetime-to-timestamp-pk/ignore_versions @@ -0,0 +1 @@ +(5.5) diff --git a/go/test/endtoend/onlineddl/vrepl_suite/testdata/datetime-to-timestamp/alter b/go/test/endtoend/onlineddl/vrepl_suite/testdata/datetime-to-timestamp/alter new file mode 100644 index 00000000000..7a057adfa1b --- /dev/null +++ b/go/test/endtoend/onlineddl/vrepl_suite/testdata/datetime-to-timestamp/alter @@ -0,0 +1 @@ +change column t t timestamp null diff --git a/go/test/endtoend/onlineddl/vrepl_suite/testdata/datetime-to-timestamp/create.sql b/go/test/endtoend/onlineddl/vrepl_suite/testdata/datetime-to-timestamp/create.sql new file mode 100644 index 00000000000..99018d8c798 --- /dev/null +++ b/go/test/endtoend/onlineddl/vrepl_suite/testdata/datetime-to-timestamp/create.sql @@ -0,0 +1,31 @@ +drop table if exists onlineddl_test; +create table onlineddl_test ( + id int unsigned auto_increment, + i int not null, + ts0 timestamp default current_timestamp, + ts1 timestamp null, + dt2 datetime, + t datetime null, + updated tinyint unsigned default 0, + primary key(id), + key i_idx(i) +) auto_increment=1; + +drop event if exists onlineddl_test; +delimiter ;; +create event onlineddl_test + on schedule every 1 second + starts current_timestamp + ends current_timestamp + interval 60 second + on completion not preserve + enable + do +begin + insert into onlineddl_test values (null, 7, null, now(), now(), '2010-10-20 10:20:30', 0); + + insert into onlineddl_test values (null, 11, null, now(), now(), '2010-10-20 10:20:30', 0); + update onlineddl_test set dt2=now() + interval 1 minute, updated = 1 where i = 11 order by id desc limit 1; + + insert into onlineddl_test values (null, 13, null, now(), now(), '2010-10-20 10:20:30', 0); + update onlineddl_test set t=t + interval 1 minute, updated = 1 where i = 13 order by id desc limit 1; +end ;; diff --git a/go/test/endtoend/onlineddl/vrepl_suite/testdata/datetime/create.sql b/go/test/endtoend/onlineddl/vrepl_suite/testdata/datetime/create.sql new file mode 100644 index 00000000000..8ae020fa1ac --- /dev/null +++ b/go/test/endtoend/onlineddl/vrepl_suite/testdata/datetime/create.sql @@ -0,0 +1,37 @@ +drop table if exists onlineddl_test; +create table onlineddl_test ( + id int auto_increment, + i int not null, + dt0 datetime default current_timestamp, + dt1 datetime, + dt2 datetime, + updated tinyint unsigned default 0, + primary key(id), + key i_idx(i) +) auto_increment=1; + +drop event if exists onlineddl_test; +delimiter ;; +create event onlineddl_test + on schedule every 1 second + starts current_timestamp + ends current_timestamp + interval 60 second + on completion not preserve + enable + do +begin + insert into onlineddl_test values (null, 11, null, now(), now(), 0); + update onlineddl_test set dt2=now() + interval 1 minute, updated = 1 where i = 11 order by id desc limit 1; + + insert into onlineddl_test values (null, 13, null, now(), now(), 0); + update onlineddl_test set dt2=now() + interval 1 minute, updated = 1 where i = 13 order by id desc limit 1; + + insert into onlineddl_test values (null, 17, null, now(), now(), 0); + update onlineddl_test set dt2=now() + interval 1 minute, updated = 1 where i = 17 order by id desc limit 1; + + insert into onlineddl_test values (null, 19, null, now(), now(), 0); + update onlineddl_test set dt2=now() + interval 1 minute, updated = 1 where i = 19 order by id desc limit 1; + + insert into onlineddl_test values (null, 23, null, now(), now(), 0); + update onlineddl_test set dt2=now() + interval 1 minute, updated = 1 where i = 23 order by id desc limit 1; +end ;; diff --git a/go/test/endtoend/onlineddl/vrepl_suite/testdata/datetime/ignore_versions b/go/test/endtoend/onlineddl/vrepl_suite/testdata/datetime/ignore_versions new file mode 100644 index 00000000000..7acd3f06f32 --- /dev/null +++ b/go/test/endtoend/onlineddl/vrepl_suite/testdata/datetime/ignore_versions @@ -0,0 +1 @@ +(5.5) diff --git a/go/test/endtoend/onlineddl/vrepl_suite/testdata/decimal/create.sql b/go/test/endtoend/onlineddl/vrepl_suite/testdata/decimal/create.sql new file mode 100644 index 00000000000..0f47a86688e --- /dev/null +++ b/go/test/endtoend/onlineddl/vrepl_suite/testdata/decimal/create.sql @@ -0,0 +1,23 @@ +drop table if exists onlineddl_test; +create table onlineddl_test ( + id int auto_increment, + dec0 decimal(65,30) unsigned NOT NULL DEFAULT '0.000000000000000000000000000000', + dec1 decimal(65,30) unsigned NOT NULL DEFAULT '1.000000000000000000000000000000', + primary key(id) +) auto_increment=1; + +drop event if exists onlineddl_test; +delimiter ;; +create event onlineddl_test + on schedule every 1 second + starts current_timestamp + ends current_timestamp + interval 60 second + on completion not preserve + enable + do +begin + insert into onlineddl_test values (null, 0.0, 0.0); + insert into onlineddl_test values (null, 2.0, 4.0); + insert into onlineddl_test values (null, 99999999999999999999999999999999999.000, 6.0); + update onlineddl_test set dec1=4.5 where dec2=4.0 order by id desc limit 1; +end ;; diff --git a/go/test/endtoend/onlineddl/vrepl_suite/testdata/drop-null-add-not-null/after_columns b/go/test/endtoend/onlineddl/vrepl_suite/testdata/drop-null-add-not-null/after_columns new file mode 100644 index 00000000000..16f9ec009e5 --- /dev/null +++ b/go/test/endtoend/onlineddl/vrepl_suite/testdata/drop-null-add-not-null/after_columns @@ -0,0 +1 @@ +c2 diff --git a/go/test/endtoend/onlineddl/vrepl_suite/testdata/drop-null-add-not-null/alter b/go/test/endtoend/onlineddl/vrepl_suite/testdata/drop-null-add-not-null/alter new file mode 100644 index 00000000000..dece2e34f34 --- /dev/null +++ b/go/test/endtoend/onlineddl/vrepl_suite/testdata/drop-null-add-not-null/alter @@ -0,0 +1 @@ +drop column c1, add column c1 int not null default 47 diff --git a/go/test/endtoend/onlineddl/vrepl_suite/testdata/drop-null-add-not-null/before_columns b/go/test/endtoend/onlineddl/vrepl_suite/testdata/drop-null-add-not-null/before_columns new file mode 100644 index 00000000000..16f9ec009e5 --- /dev/null +++ b/go/test/endtoend/onlineddl/vrepl_suite/testdata/drop-null-add-not-null/before_columns @@ -0,0 +1 @@ +c2 diff --git a/go/test/endtoend/onlineddl/vrepl_suite/testdata/drop-null-add-not-null/create.sql b/go/test/endtoend/onlineddl/vrepl_suite/testdata/drop-null-add-not-null/create.sql new file mode 100644 index 00000000000..1f22e6ba81d --- /dev/null +++ b/go/test/endtoend/onlineddl/vrepl_suite/testdata/drop-null-add-not-null/create.sql @@ -0,0 +1,30 @@ +drop table if exists onlineddl_test; +create table onlineddl_test ( + id int auto_increment, + c1 int null, + c2 int not null, + primary key (id) +) auto_increment=1; + +insert into onlineddl_test values (null, null, 17); +insert into onlineddl_test values (null, null, 19); + +drop event if exists onlineddl_test; +delimiter ;; +create event onlineddl_test + on schedule every 1 second + starts current_timestamp + ends current_timestamp + interval 60 second + on completion not preserve + enable + do +begin + insert ignore into onlineddl_test values (101, 11, 23); + insert ignore into onlineddl_test values (102, 13, 23); + insert into onlineddl_test values (null, 17, 23); + insert into onlineddl_test values (null, null, 29); + set @last_insert_id := last_insert_id(); + -- update onlineddl_test set c2=c2+@last_insert_id where id=@last_insert_id order by id desc limit 1; + delete from onlineddl_test where id=1; + delete from onlineddl_test where c1=13; -- id=2 +end ;; diff --git a/go/test/endtoend/onlineddl/vrepl_suite/testdata/enum-pk/create.sql b/go/test/endtoend/onlineddl/vrepl_suite/testdata/enum-pk/create.sql new file mode 100644 index 00000000000..183b4159d6f --- /dev/null +++ b/go/test/endtoend/onlineddl/vrepl_suite/testdata/enum-pk/create.sql @@ -0,0 +1,35 @@ +drop table if exists onlineddl_test; +create table onlineddl_test ( + id int auto_increment, + i int not null, + e enum('red', 'green', 'blue', 'orange', 'yellow', 'grey', 'black') not null default 'red' collate 'utf8_bin', + primary key(id, e) +) auto_increment=1; + +insert into onlineddl_test values (null, 2, 'yellow'); +insert into onlineddl_test values (null, 3, 'grey'); +insert into onlineddl_test values (11, 5, 'yellow'); + +drop event if exists onlineddl_test; +delimiter ;; +create event onlineddl_test + on schedule every 1 second + starts current_timestamp + ends current_timestamp + interval 60 second + on completion not preserve + enable + do +begin + update onlineddl_test set e='black' where e='grey'; + update onlineddl_test set e='black' where id=11; + insert into onlineddl_test values (null, 11, 'red'); + set @last_insert_id := last_insert_id(); + insert into onlineddl_test values (@last_insert_id, 11, 'green'); + insert into onlineddl_test values (null, 13, 'green'); + insert into onlineddl_test values (null, 17, 'blue'); + set @last_insert_id := last_insert_id(); + update onlineddl_test set e='orange' where id = @last_insert_id; + insert into onlineddl_test values (null, 23, null); + set @last_insert_id := last_insert_id(); + update onlineddl_test set i=i+1, e=null where id = @last_insert_id; +end ;; diff --git a/go/test/endtoend/onlineddl/vrepl_suite/testdata/enum-to-varchar-rename/after_columns b/go/test/endtoend/onlineddl/vrepl_suite/testdata/enum-to-varchar-rename/after_columns new file mode 100644 index 00000000000..25931ae510f --- /dev/null +++ b/go/test/endtoend/onlineddl/vrepl_suite/testdata/enum-to-varchar-rename/after_columns @@ -0,0 +1 @@ +id, i, e2 diff --git a/go/test/endtoend/onlineddl/vrepl_suite/testdata/enum-to-varchar-rename/alter b/go/test/endtoend/onlineddl/vrepl_suite/testdata/enum-to-varchar-rename/alter new file mode 100644 index 00000000000..a6577ee09e9 --- /dev/null +++ b/go/test/endtoend/onlineddl/vrepl_suite/testdata/enum-to-varchar-rename/alter @@ -0,0 +1 @@ +change e e2 varchar(32) not null default '' diff --git a/go/test/endtoend/onlineddl/vrepl_suite/testdata/enum-to-varchar-rename/before_columns b/go/test/endtoend/onlineddl/vrepl_suite/testdata/enum-to-varchar-rename/before_columns new file mode 100644 index 00000000000..8dbd253ca71 --- /dev/null +++ b/go/test/endtoend/onlineddl/vrepl_suite/testdata/enum-to-varchar-rename/before_columns @@ -0,0 +1 @@ +id, i, e diff --git a/go/test/endtoend/onlineddl/vrepl_suite/testdata/enum-to-varchar-rename/create.sql b/go/test/endtoend/onlineddl/vrepl_suite/testdata/enum-to-varchar-rename/create.sql new file mode 100644 index 00000000000..109b4f63101 --- /dev/null +++ b/go/test/endtoend/onlineddl/vrepl_suite/testdata/enum-to-varchar-rename/create.sql @@ -0,0 +1,26 @@ +drop table if exists onlineddl_test; +create table onlineddl_test ( + id int auto_increment, + i int not null, + e enum('red', 'green', 'blue', 'orange') null default null collate 'utf8_bin', + primary key(id) +) auto_increment=1; + +insert into onlineddl_test values (null, 7, 'red'); + +drop event if exists onlineddl_test; +delimiter ;; +create event onlineddl_test + on schedule every 1 second + starts current_timestamp + ends current_timestamp + interval 60 second + on completion not preserve + enable + do +begin + insert into onlineddl_test values (null, 11, 'red'); + insert into onlineddl_test values (null, 13, 'green'); + insert into onlineddl_test values (null, 17, 'blue'); + set @last_insert_id := last_insert_id(); + update onlineddl_test set e='orange' where id = @last_insert_id; +end ;; diff --git a/go/test/endtoend/onlineddl/vrepl_suite/testdata/enum-to-varchar/alter b/go/test/endtoend/onlineddl/vrepl_suite/testdata/enum-to-varchar/alter new file mode 100644 index 00000000000..a7f5412d310 --- /dev/null +++ b/go/test/endtoend/onlineddl/vrepl_suite/testdata/enum-to-varchar/alter @@ -0,0 +1 @@ +change e e varchar(32) not null default '' diff --git a/go/test/endtoend/onlineddl/vrepl_suite/testdata/enum-to-varchar/create.sql b/go/test/endtoend/onlineddl/vrepl_suite/testdata/enum-to-varchar/create.sql new file mode 100644 index 00000000000..109b4f63101 --- /dev/null +++ b/go/test/endtoend/onlineddl/vrepl_suite/testdata/enum-to-varchar/create.sql @@ -0,0 +1,26 @@ +drop table if exists onlineddl_test; +create table onlineddl_test ( + id int auto_increment, + i int not null, + e enum('red', 'green', 'blue', 'orange') null default null collate 'utf8_bin', + primary key(id) +) auto_increment=1; + +insert into onlineddl_test values (null, 7, 'red'); + +drop event if exists onlineddl_test; +delimiter ;; +create event onlineddl_test + on schedule every 1 second + starts current_timestamp + ends current_timestamp + interval 60 second + on completion not preserve + enable + do +begin + insert into onlineddl_test values (null, 11, 'red'); + insert into onlineddl_test values (null, 13, 'green'); + insert into onlineddl_test values (null, 17, 'blue'); + set @last_insert_id := last_insert_id(); + update onlineddl_test set e='orange' where id = @last_insert_id; +end ;; diff --git a/go/test/endtoend/onlineddl/vrepl_suite/testdata/enum/alter b/go/test/endtoend/onlineddl/vrepl_suite/testdata/enum/alter new file mode 100644 index 00000000000..7dccd41a2be --- /dev/null +++ b/go/test/endtoend/onlineddl/vrepl_suite/testdata/enum/alter @@ -0,0 +1 @@ +change e e enum('red', 'green', 'blue', 'orange', 'yellow') collate 'utf8_bin' null default null diff --git a/go/test/endtoend/onlineddl/vrepl_suite/testdata/enum/create.sql b/go/test/endtoend/onlineddl/vrepl_suite/testdata/enum/create.sql new file mode 100644 index 00000000000..bb4ed269532 --- /dev/null +++ b/go/test/endtoend/onlineddl/vrepl_suite/testdata/enum/create.sql @@ -0,0 +1,27 @@ +drop table if exists onlineddl_test; +create table onlineddl_test ( + id int auto_increment, + i int not null, + e enum('red', 'green', 'blue', 'orange') null default null collate 'utf8_bin', + primary key(id) +) auto_increment=1; + +drop event if exists onlineddl_test; +delimiter ;; +create event onlineddl_test + on schedule every 1 second + starts current_timestamp + ends current_timestamp + interval 60 second + on completion not preserve + enable + do +begin + insert into onlineddl_test values (null, 11, 'red'); + insert into onlineddl_test values (null, 13, 'green'); + insert into onlineddl_test values (null, 17, 'blue'); + set @last_insert_id := last_insert_id(); + update onlineddl_test set e='orange' where id = @last_insert_id; + insert into onlineddl_test values (null, 23, null); + set @last_insert_id := last_insert_id(); + update onlineddl_test set i=i+1, e=null where id = @last_insert_id; +end ;; diff --git a/go/test/endtoend/onlineddl/vrepl_suite/testdata/fail-drop-pk/alter b/go/test/endtoend/onlineddl/vrepl_suite/testdata/fail-drop-pk/alter new file mode 100644 index 00000000000..2fbf26b1df4 --- /dev/null +++ b/go/test/endtoend/onlineddl/vrepl_suite/testdata/fail-drop-pk/alter @@ -0,0 +1 @@ +change id id int, drop primary key diff --git a/go/test/endtoend/onlineddl/vrepl_suite/testdata/fail-drop-pk/create.sql b/go/test/endtoend/onlineddl/vrepl_suite/testdata/fail-drop-pk/create.sql new file mode 100644 index 00000000000..335caa37798 --- /dev/null +++ b/go/test/endtoend/onlineddl/vrepl_suite/testdata/fail-drop-pk/create.sql @@ -0,0 +1,22 @@ +drop table if exists onlineddl_test; +create table onlineddl_test ( + id int auto_increment, + i int not null, + ts timestamp, + primary key(id) +) auto_increment=1; + +drop event if exists onlineddl_test; +delimiter ;; +create event onlineddl_test + on schedule every 1 second + starts current_timestamp + ends current_timestamp + interval 60 second + on completion not preserve + enable + do +begin + insert into onlineddl_test values (null, 11, now()); + insert into onlineddl_test values (null, 13, now()); + insert into onlineddl_test values (null, 17, now()); +end ;; diff --git a/go/test/endtoend/onlineddl/vrepl_suite/testdata/fail-drop-pk/expect_failure b/go/test/endtoend/onlineddl/vrepl_suite/testdata/fail-drop-pk/expect_failure new file mode 100644 index 00000000000..b66e92432da --- /dev/null +++ b/go/test/endtoend/onlineddl/vrepl_suite/testdata/fail-drop-pk/expect_failure @@ -0,0 +1 @@ +Found no shared diff --git a/go/test/endtoend/onlineddl/vrepl_suite/testdata/fail-fk-child/create.sql b/go/test/endtoend/onlineddl/vrepl_suite/testdata/fail-fk-child/create.sql new file mode 100644 index 00000000000..daa12a5b590 --- /dev/null +++ b/go/test/endtoend/onlineddl/vrepl_suite/testdata/fail-fk-child/create.sql @@ -0,0 +1,34 @@ +set session foreign_key_checks=0; +drop table if exists onlineddl_test_child; +drop table if exists onlineddl_test; +drop table if exists onlineddl_test_parent; +set session foreign_key_checks=1; +create table onlineddl_test_parent ( + id int auto_increment, + ts timestamp, + primary key(id) +); +create table onlineddl_test ( + id int auto_increment, + i int not null, + parent_id int not null, + primary key(id), + constraint test_fk foreign key (parent_id) references onlineddl_test_parent (id) on delete no action +) auto_increment=1; + +insert into onlineddl_test_parent (id) values (1),(2),(3); + +drop event if exists onlineddl_test; +delimiter ;; +create event onlineddl_test + on schedule every 1 second + starts current_timestamp + ends current_timestamp + interval 60 second + on completion not preserve + enable + do +begin + insert into onlineddl_test values (null, 11, 1); + insert into onlineddl_test values (null, 13, 2); + insert into onlineddl_test values (null, 17, 3); +end ;; diff --git a/go/test/endtoend/onlineddl/vrepl_suite/testdata/fail-fk-child/expect_failure b/go/test/endtoend/onlineddl/vrepl_suite/testdata/fail-fk-child/expect_failure new file mode 100644 index 00000000000..53b8e8c8c32 --- /dev/null +++ b/go/test/endtoend/onlineddl/vrepl_suite/testdata/fail-fk-child/expect_failure @@ -0,0 +1 @@ +foreign key constraints are not supported in online DDL diff --git a/go/test/endtoend/onlineddl/vrepl_suite/testdata/fail-fk-parent/create.sql b/go/test/endtoend/onlineddl/vrepl_suite/testdata/fail-fk-parent/create.sql new file mode 100644 index 00000000000..5e4638c9342 --- /dev/null +++ b/go/test/endtoend/onlineddl/vrepl_suite/testdata/fail-fk-parent/create.sql @@ -0,0 +1,34 @@ +set session foreign_key_checks=0; +drop table if exists onlineddl_test_child; +drop table if exists onlineddl_test; +drop table if exists onlineddl_test_parent; +set session foreign_key_checks=1; +create table onlineddl_test ( + id int auto_increment, + ts timestamp, + primary key(id) +); +create table onlineddl_test_child ( + id int auto_increment, + i int not null, + parent_id int not null, + primary key(id), + constraint test_fk foreign key (parent_id) references onlineddl_test (id) on delete no action +) auto_increment=1; + +insert into onlineddl_test (id) values (1),(2),(3); + +drop event if exists onlineddl_test; +delimiter ;; +create event onlineddl_test + on schedule every 1 second + starts current_timestamp + ends current_timestamp + interval 60 second + on completion not preserve + enable + do +begin + insert into onlineddl_test_child values (null, 11, 1); + insert into onlineddl_test_child values (null, 13, 2); + insert into onlineddl_test_child values (null, 17, 3); +end ;; diff --git a/go/test/endtoend/onlineddl/vrepl_suite/testdata/fail-fk-parent/expect_failure b/go/test/endtoend/onlineddl/vrepl_suite/testdata/fail-fk-parent/expect_failure new file mode 100644 index 00000000000..53b8e8c8c32 --- /dev/null +++ b/go/test/endtoend/onlineddl/vrepl_suite/testdata/fail-fk-parent/expect_failure @@ -0,0 +1 @@ +foreign key constraints are not supported in online DDL diff --git a/go/test/endtoend/onlineddl/vrepl_suite/testdata/fail-float-unique-key/alter b/go/test/endtoend/onlineddl/vrepl_suite/testdata/fail-float-unique-key/alter new file mode 100644 index 00000000000..0d2477f5801 --- /dev/null +++ b/go/test/endtoend/onlineddl/vrepl_suite/testdata/fail-float-unique-key/alter @@ -0,0 +1 @@ +add column v varchar(32) diff --git a/go/test/endtoend/onlineddl/vrepl_suite/testdata/fail-float-unique-key/create.sql b/go/test/endtoend/onlineddl/vrepl_suite/testdata/fail-float-unique-key/create.sql new file mode 100644 index 00000000000..abd7fbd4266 --- /dev/null +++ b/go/test/endtoend/onlineddl/vrepl_suite/testdata/fail-float-unique-key/create.sql @@ -0,0 +1,11 @@ +drop table if exists onlineddl_test; +create table onlineddl_test ( + f float, + i int not null, + ts timestamp default current_timestamp, + dt datetime, + key i_idx(i), + unique key f_uidx(f) +) auto_increment=1; + +drop event if exists onlineddl_test; diff --git a/go/test/endtoend/onlineddl/vrepl_suite/testdata/fail-float-unique-key/expect_failure b/go/test/endtoend/onlineddl/vrepl_suite/testdata/fail-float-unique-key/expect_failure new file mode 100644 index 00000000000..b66e92432da --- /dev/null +++ b/go/test/endtoend/onlineddl/vrepl_suite/testdata/fail-float-unique-key/expect_failure @@ -0,0 +1 @@ +Found no shared diff --git a/go/test/endtoend/onlineddl/vrepl_suite/testdata/fail-no-shared-uk/alter b/go/test/endtoend/onlineddl/vrepl_suite/testdata/fail-no-shared-uk/alter new file mode 100644 index 00000000000..1bf49f74a95 --- /dev/null +++ b/go/test/endtoend/onlineddl/vrepl_suite/testdata/fail-no-shared-uk/alter @@ -0,0 +1 @@ +drop primary key, add primary key (id, i) diff --git a/go/test/endtoend/onlineddl/vrepl_suite/testdata/fail-no-shared-uk/create.sql b/go/test/endtoend/onlineddl/vrepl_suite/testdata/fail-no-shared-uk/create.sql new file mode 100644 index 00000000000..335caa37798 --- /dev/null +++ b/go/test/endtoend/onlineddl/vrepl_suite/testdata/fail-no-shared-uk/create.sql @@ -0,0 +1,22 @@ +drop table if exists onlineddl_test; +create table onlineddl_test ( + id int auto_increment, + i int not null, + ts timestamp, + primary key(id) +) auto_increment=1; + +drop event if exists onlineddl_test; +delimiter ;; +create event onlineddl_test + on schedule every 1 second + starts current_timestamp + ends current_timestamp + interval 60 second + on completion not preserve + enable + do +begin + insert into onlineddl_test values (null, 11, now()); + insert into onlineddl_test values (null, 13, now()); + insert into onlineddl_test values (null, 17, now()); +end ;; diff --git a/go/test/endtoend/onlineddl/vrepl_suite/testdata/fail-no-shared-uk/expect_failure b/go/test/endtoend/onlineddl/vrepl_suite/testdata/fail-no-shared-uk/expect_failure new file mode 100644 index 00000000000..b66e92432da --- /dev/null +++ b/go/test/endtoend/onlineddl/vrepl_suite/testdata/fail-no-shared-uk/expect_failure @@ -0,0 +1 @@ +Found no shared diff --git a/go/test/endtoend/onlineddl/vrepl_suite/testdata/fail-no-unique-key/alter b/go/test/endtoend/onlineddl/vrepl_suite/testdata/fail-no-unique-key/alter new file mode 100644 index 00000000000..0d2477f5801 --- /dev/null +++ b/go/test/endtoend/onlineddl/vrepl_suite/testdata/fail-no-unique-key/alter @@ -0,0 +1 @@ +add column v varchar(32) diff --git a/go/test/endtoend/onlineddl/vrepl_suite/testdata/fail-no-unique-key/create.sql b/go/test/endtoend/onlineddl/vrepl_suite/testdata/fail-no-unique-key/create.sql new file mode 100644 index 00000000000..e004d9e6922 --- /dev/null +++ b/go/test/endtoend/onlineddl/vrepl_suite/testdata/fail-no-unique-key/create.sql @@ -0,0 +1,9 @@ +drop table if exists onlineddl_test; +create table onlineddl_test ( + i int not null, + ts timestamp default current_timestamp, + dt datetime, + key i_idx(i) +) auto_increment=1; + +drop event if exists onlineddl_test; diff --git a/go/test/endtoend/onlineddl/vrepl_suite/testdata/fail-no-unique-key/expect_failure b/go/test/endtoend/onlineddl/vrepl_suite/testdata/fail-no-unique-key/expect_failure new file mode 100644 index 00000000000..b66e92432da --- /dev/null +++ b/go/test/endtoend/onlineddl/vrepl_suite/testdata/fail-no-unique-key/expect_failure @@ -0,0 +1 @@ +Found no shared diff --git a/go/test/endtoend/onlineddl/vrepl_suite/testdata/fail-nonexistent-column/alter b/go/test/endtoend/onlineddl/vrepl_suite/testdata/fail-nonexistent-column/alter new file mode 100644 index 00000000000..c52c5d98c41 --- /dev/null +++ b/go/test/endtoend/onlineddl/vrepl_suite/testdata/fail-nonexistent-column/alter @@ -0,0 +1 @@ +drop column no_such_column diff --git a/go/test/endtoend/onlineddl/vrepl_suite/testdata/fail-nonexistent-column/create.sql b/go/test/endtoend/onlineddl/vrepl_suite/testdata/fail-nonexistent-column/create.sql new file mode 100644 index 00000000000..02c45a91471 --- /dev/null +++ b/go/test/endtoend/onlineddl/vrepl_suite/testdata/fail-nonexistent-column/create.sql @@ -0,0 +1,13 @@ +drop table if exists onlineddl_test; +create table onlineddl_test ( + id int auto_increment, + i int not null, + color varchar(32), + primary key(id) +) auto_increment=1; + +drop event if exists onlineddl_test; + +insert into onlineddl_test values (null, 11, 'red'); +insert into onlineddl_test values (null, 13, 'green'); +insert into onlineddl_test values (null, 17, 'blue'); diff --git a/go/test/endtoend/onlineddl/vrepl_suite/testdata/fail-nonexistent-column/expect_failure b/go/test/endtoend/onlineddl/vrepl_suite/testdata/fail-nonexistent-column/expect_failure new file mode 100644 index 00000000000..de429a09409 --- /dev/null +++ b/go/test/endtoend/onlineddl/vrepl_suite/testdata/fail-nonexistent-column/expect_failure @@ -0,0 +1 @@ +errno 1091 diff --git a/go/test/endtoend/onlineddl/vrepl_suite/testdata/fail-rename-table/alter b/go/test/endtoend/onlineddl/vrepl_suite/testdata/fail-rename-table/alter new file mode 100644 index 00000000000..8c2af8065ad --- /dev/null +++ b/go/test/endtoend/onlineddl/vrepl_suite/testdata/fail-rename-table/alter @@ -0,0 +1 @@ +rename as something_else diff --git a/go/test/endtoend/onlineddl/vrepl_suite/testdata/fail-rename-table/create.sql b/go/test/endtoend/onlineddl/vrepl_suite/testdata/fail-rename-table/create.sql new file mode 100644 index 00000000000..335caa37798 --- /dev/null +++ b/go/test/endtoend/onlineddl/vrepl_suite/testdata/fail-rename-table/create.sql @@ -0,0 +1,22 @@ +drop table if exists onlineddl_test; +create table onlineddl_test ( + id int auto_increment, + i int not null, + ts timestamp, + primary key(id) +) auto_increment=1; + +drop event if exists onlineddl_test; +delimiter ;; +create event onlineddl_test + on schedule every 1 second + starts current_timestamp + ends current_timestamp + interval 60 second + on completion not preserve + enable + do +begin + insert into onlineddl_test values (null, 11, now()); + insert into onlineddl_test values (null, 13, now()); + insert into onlineddl_test values (null, 17, now()); +end ;; diff --git a/go/test/endtoend/onlineddl/vrepl_suite/testdata/fail-rename-table/expect_query_failure b/go/test/endtoend/onlineddl/vrepl_suite/testdata/fail-rename-table/expect_query_failure new file mode 100644 index 00000000000..56015aaf960 --- /dev/null +++ b/go/test/endtoend/onlineddl/vrepl_suite/testdata/fail-rename-table/expect_query_failure @@ -0,0 +1 @@ +RENAME is not supported in online DDL diff --git a/go/test/endtoend/onlineddl/vrepl_suite/testdata/fail-syntax-error/alter b/go/test/endtoend/onlineddl/vrepl_suite/testdata/fail-syntax-error/alter new file mode 100644 index 00000000000..5a145fa7dce --- /dev/null +++ b/go/test/endtoend/onlineddl/vrepl_suite/testdata/fail-syntax-error/alter @@ -0,0 +1 @@ +drop zzz zzz zzz zzz diff --git a/go/test/endtoend/onlineddl/vrepl_suite/testdata/fail-syntax-error/create.sql b/go/test/endtoend/onlineddl/vrepl_suite/testdata/fail-syntax-error/create.sql new file mode 100644 index 00000000000..02c45a91471 --- /dev/null +++ b/go/test/endtoend/onlineddl/vrepl_suite/testdata/fail-syntax-error/create.sql @@ -0,0 +1,13 @@ +drop table if exists onlineddl_test; +create table onlineddl_test ( + id int auto_increment, + i int not null, + color varchar(32), + primary key(id) +) auto_increment=1; + +drop event if exists onlineddl_test; + +insert into onlineddl_test values (null, 11, 'red'); +insert into onlineddl_test values (null, 13, 'green'); +insert into onlineddl_test values (null, 17, 'blue'); diff --git a/go/test/endtoend/onlineddl/vrepl_suite/testdata/fail-syntax-error/expect_query_failure b/go/test/endtoend/onlineddl/vrepl_suite/testdata/fail-syntax-error/expect_query_failure new file mode 100644 index 00000000000..2d11e0ca7ba --- /dev/null +++ b/go/test/endtoend/onlineddl/vrepl_suite/testdata/fail-syntax-error/expect_query_failure @@ -0,0 +1 @@ +syntax error at position diff --git a/go/test/endtoend/onlineddl/vrepl_suite/testdata/gbk-charset/create.sql b/go/test/endtoend/onlineddl/vrepl_suite/testdata/gbk-charset/create.sql new file mode 100644 index 00000000000..b9a14cdc156 --- /dev/null +++ b/go/test/endtoend/onlineddl/vrepl_suite/testdata/gbk-charset/create.sql @@ -0,0 +1,25 @@ +drop table if exists onlineddl_test; +create table onlineddl_test ( + id int(11) NOT NULL AUTO_INCREMENT, + name varchar(512) DEFAULT NULL, + v varchar(255) DEFAULT NULL COMMENT '添加普通列测试', + PRIMARY KEY (id) +) ENGINE=InnoDB AUTO_INCREMENT=1 DEFAULT CHARSET=gbk; + +insert into onlineddl_test values (null, 'gbk-test-initial', '添加普通列测试-添加普通列测试'); +insert into onlineddl_test values (null, 'gbk-test-initial', '添加普通列测试-添加普通列测试'); + +drop event if exists onlineddl_test; +delimiter ;; +create event onlineddl_test + on schedule every 1 second + starts current_timestamp + ends current_timestamp + interval 60 second + on completion not preserve + enable + do +begin + insert into onlineddl_test (name) values ('gbk-test-default'); + insert into onlineddl_test values (null, 'gbk-test', '添加普通列测试-添加普通列测试'); + update onlineddl_test set v='添加普通列测试' where v='添加普通列测试-添加普通列测试' order by id desc limit 1; +end ;; diff --git a/go/test/endtoend/onlineddl/vrepl_suite/testdata/gbk-charset/extra_args b/go/test/endtoend/onlineddl/vrepl_suite/testdata/gbk-charset/extra_args new file mode 100644 index 00000000000..e69de29bb2d diff --git a/go/test/endtoend/onlineddl/vrepl_suite/testdata/generated-columns-add57/after_columns b/go/test/endtoend/onlineddl/vrepl_suite/testdata/generated-columns-add57/after_columns new file mode 100644 index 00000000000..bd17155d303 --- /dev/null +++ b/go/test/endtoend/onlineddl/vrepl_suite/testdata/generated-columns-add57/after_columns @@ -0,0 +1 @@ +id, a, b diff --git a/go/test/endtoend/onlineddl/vrepl_suite/testdata/generated-columns-add57/alter b/go/test/endtoend/onlineddl/vrepl_suite/testdata/generated-columns-add57/alter new file mode 100644 index 00000000000..b73b2f460fd --- /dev/null +++ b/go/test/endtoend/onlineddl/vrepl_suite/testdata/generated-columns-add57/alter @@ -0,0 +1 @@ +add column sum_ab int as (a + b) virtual not null diff --git a/go/test/endtoend/onlineddl/vrepl_suite/testdata/generated-columns-add57/before_columns b/go/test/endtoend/onlineddl/vrepl_suite/testdata/generated-columns-add57/before_columns new file mode 100644 index 00000000000..bd17155d303 --- /dev/null +++ b/go/test/endtoend/onlineddl/vrepl_suite/testdata/generated-columns-add57/before_columns @@ -0,0 +1 @@ +id, a, b diff --git a/go/test/endtoend/onlineddl/vrepl_suite/testdata/generated-columns-add57/create.sql b/go/test/endtoend/onlineddl/vrepl_suite/testdata/generated-columns-add57/create.sql new file mode 100644 index 00000000000..c11ec898a61 --- /dev/null +++ b/go/test/endtoend/onlineddl/vrepl_suite/testdata/generated-columns-add57/create.sql @@ -0,0 +1,29 @@ +drop table if exists onlineddl_test; +create table onlineddl_test ( + id int auto_increment, + a int not null, + b int not null, + primary key(id) +) auto_increment=1; + +drop event if exists onlineddl_test; +delimiter ;; +create event onlineddl_test + on schedule every 1 second + starts current_timestamp + ends current_timestamp + interval 60 second + on completion not preserve + enable + do +begin + insert into onlineddl_test (id, a, b) values (null, 2,3); + insert into onlineddl_test (id, a, b) values (null, 2,4); + insert into onlineddl_test (id, a, b) values (null, 2,5); + insert into onlineddl_test (id, a, b) values (null, 2,6); + insert into onlineddl_test (id, a, b) values (null, 2,7); + insert into onlineddl_test (id, a, b) values (null, 2,8); + insert into onlineddl_test (id, a, b) values (null, 2,9); + insert into onlineddl_test (id, a, b) values (null, 2,0); + insert into onlineddl_test (id, a, b) values (null, 2,1); + insert into onlineddl_test (id, a, b) values (null, 2,2); +end ;; diff --git a/go/test/endtoend/onlineddl/vrepl_suite/testdata/generated-columns-add57/ignore_versions b/go/test/endtoend/onlineddl/vrepl_suite/testdata/generated-columns-add57/ignore_versions new file mode 100644 index 00000000000..b6de5f8d9f5 --- /dev/null +++ b/go/test/endtoend/onlineddl/vrepl_suite/testdata/generated-columns-add57/ignore_versions @@ -0,0 +1 @@ +(5.5|5.6) diff --git a/go/test/endtoend/onlineddl/vrepl_suite/testdata/generated-columns-add57/order_by b/go/test/endtoend/onlineddl/vrepl_suite/testdata/generated-columns-add57/order_by new file mode 100644 index 00000000000..074d1eeb404 --- /dev/null +++ b/go/test/endtoend/onlineddl/vrepl_suite/testdata/generated-columns-add57/order_by @@ -0,0 +1 @@ +id diff --git a/go/test/endtoend/onlineddl/vrepl_suite/testdata/generated-columns-rename57/alter b/go/test/endtoend/onlineddl/vrepl_suite/testdata/generated-columns-rename57/alter new file mode 100644 index 00000000000..3509b0a96fa --- /dev/null +++ b/go/test/endtoend/onlineddl/vrepl_suite/testdata/generated-columns-rename57/alter @@ -0,0 +1 @@ +change sum_ab total_ab int as (a + b) virtual not null diff --git a/go/test/endtoend/onlineddl/vrepl_suite/testdata/generated-columns-rename57/create.sql b/go/test/endtoend/onlineddl/vrepl_suite/testdata/generated-columns-rename57/create.sql new file mode 100644 index 00000000000..b5d7445f4c1 --- /dev/null +++ b/go/test/endtoend/onlineddl/vrepl_suite/testdata/generated-columns-rename57/create.sql @@ -0,0 +1,30 @@ +drop table if exists onlineddl_test; +create table onlineddl_test ( + id int auto_increment, + a int not null, + b int not null, + sum_ab int as (a + b) virtual not null, + primary key(id) +) auto_increment=1; + +drop event if exists onlineddl_test; +delimiter ;; +create event onlineddl_test + on schedule every 1 second + starts current_timestamp + ends current_timestamp + interval 60 second + on completion not preserve + enable + do +begin + insert into onlineddl_test (id, a, b) values (null, 2,3); + insert into onlineddl_test (id, a, b) values (null, 2,4); + insert into onlineddl_test (id, a, b) values (null, 2,5); + insert into onlineddl_test (id, a, b) values (null, 2,6); + insert into onlineddl_test (id, a, b) values (null, 2,7); + insert into onlineddl_test (id, a, b) values (null, 2,8); + insert into onlineddl_test (id, a, b) values (null, 2,9); + insert into onlineddl_test (id, a, b) values (null, 2,0); + insert into onlineddl_test (id, a, b) values (null, 2,1); + insert into onlineddl_test (id, a, b) values (null, 2,2); +end ;; diff --git a/go/test/endtoend/onlineddl/vrepl_suite/testdata/generated-columns-rename57/ignore_versions b/go/test/endtoend/onlineddl/vrepl_suite/testdata/generated-columns-rename57/ignore_versions new file mode 100644 index 00000000000..b6de5f8d9f5 --- /dev/null +++ b/go/test/endtoend/onlineddl/vrepl_suite/testdata/generated-columns-rename57/ignore_versions @@ -0,0 +1 @@ +(5.5|5.6) diff --git a/go/test/endtoend/onlineddl/vrepl_suite/testdata/generated-columns57-unique/create.sql b/go/test/endtoend/onlineddl/vrepl_suite/testdata/generated-columns57-unique/create.sql new file mode 100644 index 00000000000..90d0c118729 --- /dev/null +++ b/go/test/endtoend/onlineddl/vrepl_suite/testdata/generated-columns57-unique/create.sql @@ -0,0 +1,30 @@ +drop table if exists onlineddl_test; +create table onlineddl_test ( + id int auto_increment, + `idb` varchar(36) CHARACTER SET utf8mb4 GENERATED ALWAYS AS (json_unquote(json_extract(`jsonobj`,_utf8mb4'$._id'))) STORED NOT NULL, + `jsonobj` json NOT NULL, + PRIMARY KEY (`id`,`idb`) +) auto_increment=1; + +insert into onlineddl_test (id, jsonobj) values (null, '{"_id":2}'); +insert into onlineddl_test (id, jsonobj) values (null, '{"_id":3}'); + +drop event if exists onlineddl_test; +delimiter ;; +create event onlineddl_test + on schedule every 1 second + starts current_timestamp + ends current_timestamp + interval 60 second + on completion not preserve + enable + do +begin + insert into onlineddl_test (id, jsonobj) values (null, '{"_id":5}'); + insert into onlineddl_test (id, jsonobj) values (null, '{"_id":7}'); + insert into onlineddl_test (id, jsonobj) values (null, '{"_id":11}'); + insert into onlineddl_test (id, jsonobj) values (null, '{"_id":13}'); + insert into onlineddl_test (id, jsonobj) values (null, '{"_id":17}'); + insert into onlineddl_test (id, jsonobj) values (null, '{"_id":19}'); + insert into onlineddl_test (id, jsonobj) values (null, '{"_id":23}'); + insert into onlineddl_test (id, jsonobj) values (null, '{"_id":27}'); +end ;; diff --git a/go/test/endtoend/onlineddl/vrepl_suite/testdata/generated-columns57-unique/ignore_versions b/go/test/endtoend/onlineddl/vrepl_suite/testdata/generated-columns57-unique/ignore_versions new file mode 100644 index 00000000000..b6de5f8d9f5 --- /dev/null +++ b/go/test/endtoend/onlineddl/vrepl_suite/testdata/generated-columns57-unique/ignore_versions @@ -0,0 +1 @@ +(5.5|5.6) diff --git a/go/test/endtoend/onlineddl/vrepl_suite/testdata/generated-columns57/create.sql b/go/test/endtoend/onlineddl/vrepl_suite/testdata/generated-columns57/create.sql new file mode 100644 index 00000000000..b5d7445f4c1 --- /dev/null +++ b/go/test/endtoend/onlineddl/vrepl_suite/testdata/generated-columns57/create.sql @@ -0,0 +1,30 @@ +drop table if exists onlineddl_test; +create table onlineddl_test ( + id int auto_increment, + a int not null, + b int not null, + sum_ab int as (a + b) virtual not null, + primary key(id) +) auto_increment=1; + +drop event if exists onlineddl_test; +delimiter ;; +create event onlineddl_test + on schedule every 1 second + starts current_timestamp + ends current_timestamp + interval 60 second + on completion not preserve + enable + do +begin + insert into onlineddl_test (id, a, b) values (null, 2,3); + insert into onlineddl_test (id, a, b) values (null, 2,4); + insert into onlineddl_test (id, a, b) values (null, 2,5); + insert into onlineddl_test (id, a, b) values (null, 2,6); + insert into onlineddl_test (id, a, b) values (null, 2,7); + insert into onlineddl_test (id, a, b) values (null, 2,8); + insert into onlineddl_test (id, a, b) values (null, 2,9); + insert into onlineddl_test (id, a, b) values (null, 2,0); + insert into onlineddl_test (id, a, b) values (null, 2,1); + insert into onlineddl_test (id, a, b) values (null, 2,2); +end ;; diff --git a/go/test/endtoend/onlineddl/vrepl_suite/testdata/generated-columns57/ignore_versions b/go/test/endtoend/onlineddl/vrepl_suite/testdata/generated-columns57/ignore_versions new file mode 100644 index 00000000000..b6de5f8d9f5 --- /dev/null +++ b/go/test/endtoend/onlineddl/vrepl_suite/testdata/generated-columns57/ignore_versions @@ -0,0 +1 @@ +(5.5|5.6) diff --git a/go/test/endtoend/onlineddl/vrepl_suite/testdata/geometry57/create.sql b/go/test/endtoend/onlineddl/vrepl_suite/testdata/geometry57/create.sql new file mode 100644 index 00000000000..df366717910 --- /dev/null +++ b/go/test/endtoend/onlineddl/vrepl_suite/testdata/geometry57/create.sql @@ -0,0 +1,21 @@ +drop table if exists onlineddl_test; +create table onlineddl_test ( + id int auto_increment, + g geometry, + primary key(id) +) auto_increment=1; + +drop event if exists onlineddl_test; +delimiter ;; +create event onlineddl_test + on schedule every 1 second + starts current_timestamp + ends current_timestamp + interval 60 second + on completion not preserve + enable + do +begin + insert into onlineddl_test values (null, ST_GeomFromText('POINT(1 1)')); + insert into onlineddl_test values (null, ST_GeomFromText('POINT(2 2)')); + insert into onlineddl_test values (null, ST_GeomFromText('POINT(3 3)')); +end ;; diff --git a/go/test/endtoend/onlineddl/vrepl_suite/testdata/geometry57/ignore_versions b/go/test/endtoend/onlineddl/vrepl_suite/testdata/geometry57/ignore_versions new file mode 100644 index 00000000000..b6de5f8d9f5 --- /dev/null +++ b/go/test/endtoend/onlineddl/vrepl_suite/testdata/geometry57/ignore_versions @@ -0,0 +1 @@ +(5.5|5.6) diff --git a/go/test/endtoend/onlineddl/vrepl_suite/testdata/json57/create.sql b/go/test/endtoend/onlineddl/vrepl_suite/testdata/json57/create.sql new file mode 100644 index 00000000000..623e3140eb7 --- /dev/null +++ b/go/test/endtoend/onlineddl/vrepl_suite/testdata/json57/create.sql @@ -0,0 +1,21 @@ +drop table if exists onlineddl_test; +create table onlineddl_test ( + id int auto_increment, + j json, + primary key(id) +) auto_increment=1; + +drop event if exists onlineddl_test; +delimiter ;; +create event onlineddl_test + on schedule every 1 second + starts current_timestamp + ends current_timestamp + interval 60 second + on completion not preserve + enable + do +begin + insert into onlineddl_test values (null, '"sometext"'); + insert into onlineddl_test values (null, '{"key":"val"}'); + insert into onlineddl_test values (null, '{"is-it": true, "count": 3, "elements": []}'); +end ;; diff --git a/go/test/endtoend/onlineddl/vrepl_suite/testdata/json57/ignore_versions b/go/test/endtoend/onlineddl/vrepl_suite/testdata/json57/ignore_versions new file mode 100644 index 00000000000..b6de5f8d9f5 --- /dev/null +++ b/go/test/endtoend/onlineddl/vrepl_suite/testdata/json57/ignore_versions @@ -0,0 +1 @@ +(5.5|5.6) diff --git a/go/test/endtoend/onlineddl/vrepl_suite/testdata/json57dml/create.sql b/go/test/endtoend/onlineddl/vrepl_suite/testdata/json57dml/create.sql new file mode 100644 index 00000000000..819f38c938f --- /dev/null +++ b/go/test/endtoend/onlineddl/vrepl_suite/testdata/json57dml/create.sql @@ -0,0 +1,28 @@ +drop table if exists onlineddl_test; +create table onlineddl_test ( + id int auto_increment, + i int not null, + updated tinyint not null default 0, + j json, + primary key(id) +) auto_increment=1; + +drop event if exists onlineddl_test; +delimiter ;; +create event onlineddl_test + on schedule every 1 second + starts current_timestamp + ends current_timestamp + interval 60 second + on completion not preserve + enable + do +begin + insert into onlineddl_test (id, i, j) values (null, 11, '"sometext"'); + insert into onlineddl_test (id, i, j) values (null, 13, '{"key":"val"}'); + insert into onlineddl_test (id, i, j) values (null, 17, '{"is-it": true, "count": 3, "elements": []}'); + insert into onlineddl_test (id, i, j) values (null, 19, '{"text":"Lorem ipsum dolor sit amet, consectetuer adipiscing elit. Aenean commodo ligula eget dolor. Aenean massa. Cum sociis natoque penatibus et magnis dis parturient montes, nascetur ridiculus mus. Donec quam felis, ultricies nec, pellentesque eu, pretium quis, sem. Nulla consequat massa quis enim. Donec pede justo, fringilla vel, aliquet nec, vulputate eget, arcu. In enim justo, rhoncus ut, imperdiet a, venenatis vitae, justo. Nullam dictum felis eu pede mollis pretium. Integer tincidunt. Cras dapibus. Vivamus elementum semper nisi. Aenean vulputate eleifend tellus. Aenean leo ligula, porttitor eu, consequat vitae, eleifend ac, enim. Aliquam lorem ante, dapibus in, viverra quis, feugiat a, tellus. Phasellus viverra nulla ut metus varius laoreet. Quisque rutrum. Aenean imperdiet. Etiam ultricies nisi vel augue. Curabitur ullamcorper ultricies nisi. Nam eget dui. Etiam rhoncus. Maecenas tempus, tellus eget condimentum rhoncus, sem quam semper libero, sit amet adipiscing sem neque sed ipsum. Nam quam nunc, blandit vel, luctus pulvinar, hendrerit id, lorem. Maecenas nec odio et ante tincidunt tempus. Donec vitae sapien ut libero venenatis faucibus. Nullam quis ante. Etiam sit amet orci eget eros faucibus tincidunt. Duis leo. Sed fringilla mauris sit amet nibh. Donec sodales sagittis magna. Sed consequat, leo eget bibendum sodales, augue velit cursus nunc, quis gravida magna mi a libero. Fusce vulputate eleifend sapien. Vestibulum purus quam, scelerisque ut, mollis sed, nonummy id, metus. Nullam accumsan lorem in dui. Cras ultricies mi eu turpis hendrerit fringilla. Vestibulum ante ipsum primis in faucibus orci luctus et ultrices posuere cubilia Curae; In ac dui quis mi consectetuer lacinia. Nam pretium turpis et arcu. Duis arcu tortor, suscipit eget, imperdiet nec, imperdiet iaculis, ipsum. Sed aliquam ultrices mauris. Integer ante arcu, accumsan a, consectetuer eget, posuere ut, mauris. Praesent adipiscing. Phasellus ullamcorper ipsum rutrum nunc. Nunc nonummy metus. Vestibulum volutpat pretium libero. Cras id dui. Aenean ut eros et nisl sagittis vestibulum. Nullam nulla eros, ultricies sit amet, nonummy id, imperdiet feugiat, pede. Sed lectus. Donec mollis hendrerit risus. Phasellus nec sem in justo pellentesque facilisis. Etiam imperdiet imperdiet orci. Nunc nec neque. Phasellus leo dolor, tempus non, auctor et, hendrerit quis, nisi. Curabitur ligula sapien, tincidunt non, euismod vitae, posuere imperdiet, leo. Maecenas malesuada. Praesent congue erat at massa. Sed cursus turpis vitae tortor. Donec posuere vulputate arcu. Phasellus accumsan cursus velit. Vestibulum ante ipsum primis in faucibus orci luctus et ultrices posuere cubilia Curae; Sed aliquam, nisi quis porttitor congue, elit erat euismod orci, ac"}'); + + update onlineddl_test set j = '{"updated": 11}', updated = 1 where i = 11 and updated = 0; + update onlineddl_test set j = json_set(j, '$.count', 13, '$.id', id), updated = 1 where i = 13 and updated = 0; + delete from onlineddl_test where i = 17; +end ;; diff --git a/go/test/endtoend/onlineddl/vrepl_suite/testdata/json57dml/ignore_versions b/go/test/endtoend/onlineddl/vrepl_suite/testdata/json57dml/ignore_versions new file mode 100644 index 00000000000..b6de5f8d9f5 --- /dev/null +++ b/go/test/endtoend/onlineddl/vrepl_suite/testdata/json57dml/ignore_versions @@ -0,0 +1 @@ +(5.5|5.6) diff --git a/go/test/endtoend/onlineddl/vrepl_suite/testdata/keyword-column/after_columns b/go/test/endtoend/onlineddl/vrepl_suite/testdata/keyword-column/after_columns new file mode 100644 index 00000000000..f5941f36da2 --- /dev/null +++ b/go/test/endtoend/onlineddl/vrepl_suite/testdata/keyword-column/after_columns @@ -0,0 +1 @@ +id, i, color diff --git a/go/test/endtoend/onlineddl/vrepl_suite/testdata/keyword-column/alter b/go/test/endtoend/onlineddl/vrepl_suite/testdata/keyword-column/alter new file mode 100644 index 00000000000..2a81d11f0f4 --- /dev/null +++ b/go/test/endtoend/onlineddl/vrepl_suite/testdata/keyword-column/alter @@ -0,0 +1 @@ +add column `index` int unsigned diff --git a/go/test/endtoend/onlineddl/vrepl_suite/testdata/keyword-column/before_columns b/go/test/endtoend/onlineddl/vrepl_suite/testdata/keyword-column/before_columns new file mode 100644 index 00000000000..f5941f36da2 --- /dev/null +++ b/go/test/endtoend/onlineddl/vrepl_suite/testdata/keyword-column/before_columns @@ -0,0 +1 @@ +id, i, color diff --git a/go/test/endtoend/onlineddl/vrepl_suite/testdata/keyword-column/create.sql b/go/test/endtoend/onlineddl/vrepl_suite/testdata/keyword-column/create.sql new file mode 100644 index 00000000000..02c45a91471 --- /dev/null +++ b/go/test/endtoend/onlineddl/vrepl_suite/testdata/keyword-column/create.sql @@ -0,0 +1,13 @@ +drop table if exists onlineddl_test; +create table onlineddl_test ( + id int auto_increment, + i int not null, + color varchar(32), + primary key(id) +) auto_increment=1; + +drop event if exists onlineddl_test; + +insert into onlineddl_test values (null, 11, 'red'); +insert into onlineddl_test values (null, 13, 'green'); +insert into onlineddl_test values (null, 17, 'blue'); diff --git a/go/test/endtoend/onlineddl/vrepl_suite/testdata/latin1/create.sql b/go/test/endtoend/onlineddl/vrepl_suite/testdata/latin1/create.sql new file mode 100644 index 00000000000..1efaf4641b0 --- /dev/null +++ b/go/test/endtoend/onlineddl/vrepl_suite/testdata/latin1/create.sql @@ -0,0 +1,25 @@ +drop table if exists onlineddl_test; +create table onlineddl_test ( + id int auto_increment, + t varchar(128) charset latin1 collate latin1_swedish_ci, + primary key(id) +) auto_increment=1 charset latin1 collate latin1_swedish_ci; + +drop event if exists onlineddl_test; +delimiter ;; +create event onlineddl_test + on schedule every 1 second + starts current_timestamp + ends current_timestamp + interval 60 second + on completion not preserve + enable + do +begin + insert into onlineddl_test values (null, md5(rand())); + insert into onlineddl_test values (null, 'átesting'); + insert into onlineddl_test values (null, 'ádelete'); + insert into onlineddl_test values (null, 'testátest'); + update onlineddl_test set t='áupdated' order by id desc limit 1; + update onlineddl_test set t='áupdated1' where t='áupdated' order by id desc limit 1; + delete from onlineddl_test where t='ádelete'; +end ;; diff --git a/go/test/endtoend/onlineddl/vrepl_suite/testdata/latin1text/create.sql b/go/test/endtoend/onlineddl/vrepl_suite/testdata/latin1text/create.sql new file mode 100644 index 00000000000..6c1a71b519c --- /dev/null +++ b/go/test/endtoend/onlineddl/vrepl_suite/testdata/latin1text/create.sql @@ -0,0 +1,25 @@ +drop table if exists onlineddl_test; +create table onlineddl_test ( + id int auto_increment, + t text charset latin1 collate latin1_swedish_ci, + primary key(id) +) auto_increment=1 charset latin1 collate latin1_swedish_ci; + +drop event if exists onlineddl_test; +delimiter ;; +create event onlineddl_test + on schedule every 1 second + starts current_timestamp + ends current_timestamp + interval 60 second + on completion not preserve + enable + do +begin + insert into onlineddl_test values (null, md5(rand())); + insert into onlineddl_test values (null, 'átesting'); + insert into onlineddl_test values (null, 'ádelete'); + insert into onlineddl_test values (null, 'testátest'); + update onlineddl_test set t='áupdated' order by id desc limit 1; + update onlineddl_test set t='áupdated1' where t='áupdated' order by id desc limit 1; + delete from onlineddl_test where t='ádelete'; +end ;; diff --git a/go/test/endtoend/onlineddl/vrepl_suite/testdata/mixed-charset/create.sql b/go/test/endtoend/onlineddl/vrepl_suite/testdata/mixed-charset/create.sql new file mode 100644 index 00000000000..914e6a7ca95 --- /dev/null +++ b/go/test/endtoend/onlineddl/vrepl_suite/testdata/mixed-charset/create.sql @@ -0,0 +1,23 @@ +drop table if exists onlineddl_test; +create table onlineddl_test ( + id int auto_increment, + t varchar(128) charset latin1 collate latin1_swedish_ci, + tutf8 varchar(128) charset utf8, + tutf8mb4 varchar(128) charset utf8mb4, + primary key(id) +) auto_increment=1; + +drop event if exists onlineddl_test; +delimiter ;; +create event onlineddl_test + on schedule every 1 second + starts current_timestamp + ends current_timestamp + interval 60 second + on completion not preserve + enable + do +begin + insert into onlineddl_test values (null, md5(rand()), md5(rand()), md5(rand())); + insert into onlineddl_test values (null, 'átesting', 'átesting', 'átesting'); + insert into onlineddl_test values (null, 'testátest', 'testátest', '🍻😀'); +end ;; diff --git a/go/test/endtoend/onlineddl/vrepl_suite/testdata/modify-change-case-pk/alter b/go/test/endtoend/onlineddl/vrepl_suite/testdata/modify-change-case-pk/alter new file mode 100644 index 00000000000..066298972f8 --- /dev/null +++ b/go/test/endtoend/onlineddl/vrepl_suite/testdata/modify-change-case-pk/alter @@ -0,0 +1 @@ +modify ID int diff --git a/go/test/endtoend/onlineddl/vrepl_suite/testdata/modify-change-case-pk/create.sql b/go/test/endtoend/onlineddl/vrepl_suite/testdata/modify-change-case-pk/create.sql new file mode 100644 index 00000000000..ffb6339d707 --- /dev/null +++ b/go/test/endtoend/onlineddl/vrepl_suite/testdata/modify-change-case-pk/create.sql @@ -0,0 +1,28 @@ +drop table if exists onlineddl_test; +create table onlineddl_test ( + id int auto_increment, + c1 int not null default 0, + c2 int not null default 0, + primary key (id) +) auto_increment=1; + +insert into onlineddl_test values (97, 7, 23); + +drop event if exists onlineddl_test; +delimiter ;; +create event onlineddl_test + on schedule every 1 second + starts current_timestamp + ends current_timestamp + interval 60 second + on completion not preserve + enable + do +begin + insert ignore into onlineddl_test values (1, 11, 23); + insert ignore into onlineddl_test values (2, 13, 23); + insert into onlineddl_test values (null, 17, 23); + set @last_insert_id := last_insert_id(); + update onlineddl_test set c1=c1+@last_insert_id, c2=c2+@last_insert_id where id=@last_insert_id order by id desc limit 1; + delete from onlineddl_test where id=1; + delete from onlineddl_test where c1=13; -- id=2 +end ;; diff --git a/go/test/endtoend/onlineddl/vrepl_suite/testdata/modify-change-case/alter b/go/test/endtoend/onlineddl/vrepl_suite/testdata/modify-change-case/alter new file mode 100644 index 00000000000..e0c50f09092 --- /dev/null +++ b/go/test/endtoend/onlineddl/vrepl_suite/testdata/modify-change-case/alter @@ -0,0 +1 @@ +modify C2 int not null default 0 diff --git a/go/test/endtoend/onlineddl/vrepl_suite/testdata/modify-change-case/create.sql b/go/test/endtoend/onlineddl/vrepl_suite/testdata/modify-change-case/create.sql new file mode 100644 index 00000000000..34283332fdc --- /dev/null +++ b/go/test/endtoend/onlineddl/vrepl_suite/testdata/modify-change-case/create.sql @@ -0,0 +1,26 @@ +drop table if exists onlineddl_test; +create table onlineddl_test ( + id int auto_increment, + c1 int not null default 0, + c2 int not null default 0, + primary key (id) +) auto_increment=1; + +drop event if exists onlineddl_test; +delimiter ;; +create event onlineddl_test + on schedule every 1 second + starts current_timestamp + ends current_timestamp + interval 60 second + on completion not preserve + enable + do +begin + insert ignore into onlineddl_test values (1, 11, 23); + insert ignore into onlineddl_test values (2, 13, 23); + insert into onlineddl_test values (null, 17, 23); + set @last_insert_id := last_insert_id(); + update onlineddl_test set c1=c1+@last_insert_id, c2=c2+@last_insert_id where id=@last_insert_id order by id desc limit 1; + delete from onlineddl_test where id=1; + delete from onlineddl_test where c1=13; -- id=2 +end ;; diff --git a/go/test/endtoend/onlineddl/vrepl_suite/testdata/rename-inserts-only/alter b/go/test/endtoend/onlineddl/vrepl_suite/testdata/rename-inserts-only/alter new file mode 100644 index 00000000000..ff40a87b5c3 --- /dev/null +++ b/go/test/endtoend/onlineddl/vrepl_suite/testdata/rename-inserts-only/alter @@ -0,0 +1 @@ +change column c2 c3 int not null diff --git a/go/test/endtoend/onlineddl/vrepl_suite/testdata/rename-inserts-only/create.sql b/go/test/endtoend/onlineddl/vrepl_suite/testdata/rename-inserts-only/create.sql new file mode 100644 index 00000000000..84207397ded --- /dev/null +++ b/go/test/endtoend/onlineddl/vrepl_suite/testdata/rename-inserts-only/create.sql @@ -0,0 +1,22 @@ +drop table if exists onlineddl_test; +create table onlineddl_test ( + id int auto_increment, + c1 int not null, + c2 int not null, + primary key (id) +) auto_increment=1; + +drop event if exists onlineddl_test; +delimiter ;; +create event onlineddl_test + on schedule every 1 second + starts current_timestamp + ends current_timestamp + interval 60 second + on completion not preserve + enable + do +begin + insert into onlineddl_test values (null, 11, 23); + insert into onlineddl_test values (null, 13, 23); + insert into onlineddl_test values (null, floor(rand()*pow(2,32)), floor(rand()*pow(2,32))); +end ;; diff --git a/go/test/endtoend/onlineddl/vrepl_suite/testdata/rename-none-column/alter b/go/test/endtoend/onlineddl/vrepl_suite/testdata/rename-none-column/alter new file mode 100644 index 00000000000..99730c09c9d --- /dev/null +++ b/go/test/endtoend/onlineddl/vrepl_suite/testdata/rename-none-column/alter @@ -0,0 +1 @@ +add column exchange double comment 'exchange rate used for pay in your own currency' diff --git a/go/test/endtoend/onlineddl/vrepl_suite/testdata/rename-none-column/create.sql b/go/test/endtoend/onlineddl/vrepl_suite/testdata/rename-none-column/create.sql new file mode 100644 index 00000000000..b170ffd1fc0 --- /dev/null +++ b/go/test/endtoend/onlineddl/vrepl_suite/testdata/rename-none-column/create.sql @@ -0,0 +1,8 @@ +drop table if exists onlineddl_test; +create table onlineddl_test ( + id int auto_increment, + c1 int not null, + primary key (id) +) auto_increment=1; + +drop event if exists onlineddl_test; diff --git a/go/test/endtoend/onlineddl/vrepl_suite/testdata/rename-none-comment/alter b/go/test/endtoend/onlineddl/vrepl_suite/testdata/rename-none-comment/alter new file mode 100644 index 00000000000..40835ed50ec --- /dev/null +++ b/go/test/endtoend/onlineddl/vrepl_suite/testdata/rename-none-comment/alter @@ -0,0 +1 @@ +add column exchange_rate double comment 'change rate used for pay in your own currency' diff --git a/go/test/endtoend/onlineddl/vrepl_suite/testdata/rename-none-comment/create.sql b/go/test/endtoend/onlineddl/vrepl_suite/testdata/rename-none-comment/create.sql new file mode 100644 index 00000000000..b170ffd1fc0 --- /dev/null +++ b/go/test/endtoend/onlineddl/vrepl_suite/testdata/rename-none-comment/create.sql @@ -0,0 +1,8 @@ +drop table if exists onlineddl_test; +create table onlineddl_test ( + id int auto_increment, + c1 int not null, + primary key (id) +) auto_increment=1; + +drop event if exists onlineddl_test; diff --git a/go/test/endtoend/onlineddl/vrepl_suite/testdata/rename-reorder-column/after_columns b/go/test/endtoend/onlineddl/vrepl_suite/testdata/rename-reorder-column/after_columns new file mode 100644 index 00000000000..bf608b3bfe4 --- /dev/null +++ b/go/test/endtoend/onlineddl/vrepl_suite/testdata/rename-reorder-column/after_columns @@ -0,0 +1 @@ +id, c1, c2a diff --git a/go/test/endtoend/onlineddl/vrepl_suite/testdata/rename-reorder-column/alter b/go/test/endtoend/onlineddl/vrepl_suite/testdata/rename-reorder-column/alter new file mode 100644 index 00000000000..d8691f50a85 --- /dev/null +++ b/go/test/endtoend/onlineddl/vrepl_suite/testdata/rename-reorder-column/alter @@ -0,0 +1 @@ +change column c2 c2a int not null after id diff --git a/go/test/endtoend/onlineddl/vrepl_suite/testdata/rename-reorder-column/before_columns b/go/test/endtoend/onlineddl/vrepl_suite/testdata/rename-reorder-column/before_columns new file mode 100644 index 00000000000..e1b87089408 --- /dev/null +++ b/go/test/endtoend/onlineddl/vrepl_suite/testdata/rename-reorder-column/before_columns @@ -0,0 +1 @@ +id, c1, c2 diff --git a/go/test/endtoend/onlineddl/vrepl_suite/testdata/rename-reorder-column/create.sql b/go/test/endtoend/onlineddl/vrepl_suite/testdata/rename-reorder-column/create.sql new file mode 100644 index 00000000000..a307df9e954 --- /dev/null +++ b/go/test/endtoend/onlineddl/vrepl_suite/testdata/rename-reorder-column/create.sql @@ -0,0 +1,26 @@ +drop table if exists onlineddl_test; +create table onlineddl_test ( + id int auto_increment, + c1 int not null, + c2 int not null, + primary key (id) +) auto_increment=1; + +drop event if exists onlineddl_test; +delimiter ;; +create event onlineddl_test + on schedule every 1 second + starts current_timestamp + ends current_timestamp + interval 60 second + on completion not preserve + enable + do +begin + insert ignore into onlineddl_test values (1, 11, 23); + insert ignore into onlineddl_test values (2, 13, 23); + insert into onlineddl_test values (null, 17, 23); + set @last_insert_id := last_insert_id(); + update onlineddl_test set c1=c1+@last_insert_id, c2=c2+@last_insert_id where id=@last_insert_id order by id desc limit 1; + delete from onlineddl_test where id=1; + delete from onlineddl_test where c1=13; -- id=2 +end ;; diff --git a/go/test/endtoend/onlineddl/vrepl_suite/testdata/rename-reorder-columns/after_columns b/go/test/endtoend/onlineddl/vrepl_suite/testdata/rename-reorder-columns/after_columns new file mode 100644 index 00000000000..a17380e105a --- /dev/null +++ b/go/test/endtoend/onlineddl/vrepl_suite/testdata/rename-reorder-columns/after_columns @@ -0,0 +1 @@ +id, c1, c2a, c3 diff --git a/go/test/endtoend/onlineddl/vrepl_suite/testdata/rename-reorder-columns/alter b/go/test/endtoend/onlineddl/vrepl_suite/testdata/rename-reorder-columns/alter new file mode 100644 index 00000000000..cf8dfb98d2b --- /dev/null +++ b/go/test/endtoend/onlineddl/vrepl_suite/testdata/rename-reorder-columns/alter @@ -0,0 +1 @@ +change column c2 c2a int not null, change column c3 c3 int not null after id diff --git a/go/test/endtoend/onlineddl/vrepl_suite/testdata/rename-reorder-columns/before_columns b/go/test/endtoend/onlineddl/vrepl_suite/testdata/rename-reorder-columns/before_columns new file mode 100644 index 00000000000..f1eeeb79164 --- /dev/null +++ b/go/test/endtoend/onlineddl/vrepl_suite/testdata/rename-reorder-columns/before_columns @@ -0,0 +1 @@ +id, c1, c2, c3 diff --git a/go/test/endtoend/onlineddl/vrepl_suite/testdata/rename-reorder-columns/create.sql b/go/test/endtoend/onlineddl/vrepl_suite/testdata/rename-reorder-columns/create.sql new file mode 100644 index 00000000000..3d586a8c013 --- /dev/null +++ b/go/test/endtoend/onlineddl/vrepl_suite/testdata/rename-reorder-columns/create.sql @@ -0,0 +1,27 @@ +drop table if exists onlineddl_test; +create table onlineddl_test ( + id int auto_increment, + c1 int not null, + c2 int not null, + c3 int not null, + primary key (id) +) auto_increment=1; + +drop event if exists onlineddl_test; +delimiter ;; +create event onlineddl_test + on schedule every 1 second + starts current_timestamp + ends current_timestamp + interval 60 second + on completion not preserve + enable + do +begin + insert ignore into onlineddl_test values (1, 11, 23, 97); + insert ignore into onlineddl_test values (2, 13, 27, 61); + insert into onlineddl_test values (null, 17, 31, 53); + set @last_insert_id := last_insert_id(); + update onlineddl_test set c1=c1+@last_insert_id, c2=c2+@last_insert_id, c3=c3+@last_insert_id where id=@last_insert_id order by id desc limit 1; + delete from onlineddl_test where id=1; + delete from onlineddl_test where c1=13; -- id=2 +end ;; diff --git a/go/test/endtoend/onlineddl/vrepl_suite/testdata/rename/alter b/go/test/endtoend/onlineddl/vrepl_suite/testdata/rename/alter new file mode 100644 index 00000000000..ff40a87b5c3 --- /dev/null +++ b/go/test/endtoend/onlineddl/vrepl_suite/testdata/rename/alter @@ -0,0 +1 @@ +change column c2 c3 int not null diff --git a/go/test/endtoend/onlineddl/vrepl_suite/testdata/rename/create.sql b/go/test/endtoend/onlineddl/vrepl_suite/testdata/rename/create.sql new file mode 100644 index 00000000000..a307df9e954 --- /dev/null +++ b/go/test/endtoend/onlineddl/vrepl_suite/testdata/rename/create.sql @@ -0,0 +1,26 @@ +drop table if exists onlineddl_test; +create table onlineddl_test ( + id int auto_increment, + c1 int not null, + c2 int not null, + primary key (id) +) auto_increment=1; + +drop event if exists onlineddl_test; +delimiter ;; +create event onlineddl_test + on schedule every 1 second + starts current_timestamp + ends current_timestamp + interval 60 second + on completion not preserve + enable + do +begin + insert ignore into onlineddl_test values (1, 11, 23); + insert ignore into onlineddl_test values (2, 13, 23); + insert into onlineddl_test values (null, 17, 23); + set @last_insert_id := last_insert_id(); + update onlineddl_test set c1=c1+@last_insert_id, c2=c2+@last_insert_id where id=@last_insert_id order by id desc limit 1; + delete from onlineddl_test where id=1; + delete from onlineddl_test where c1=13; -- id=2 +end ;; diff --git a/go/test/endtoend/onlineddl/vrepl_suite/testdata/reorder-columns/after_columns b/go/test/endtoend/onlineddl/vrepl_suite/testdata/reorder-columns/after_columns new file mode 100644 index 00000000000..e1b87089408 --- /dev/null +++ b/go/test/endtoend/onlineddl/vrepl_suite/testdata/reorder-columns/after_columns @@ -0,0 +1 @@ +id, c1, c2 diff --git a/go/test/endtoend/onlineddl/vrepl_suite/testdata/reorder-columns/alter b/go/test/endtoend/onlineddl/vrepl_suite/testdata/reorder-columns/alter new file mode 100644 index 00000000000..7704ff6ed14 --- /dev/null +++ b/go/test/endtoend/onlineddl/vrepl_suite/testdata/reorder-columns/alter @@ -0,0 +1 @@ +change column c2 c2 int not null after id diff --git a/go/test/endtoend/onlineddl/vrepl_suite/testdata/reorder-columns/before_columns b/go/test/endtoend/onlineddl/vrepl_suite/testdata/reorder-columns/before_columns new file mode 100644 index 00000000000..e1b87089408 --- /dev/null +++ b/go/test/endtoend/onlineddl/vrepl_suite/testdata/reorder-columns/before_columns @@ -0,0 +1 @@ +id, c1, c2 diff --git a/go/test/endtoend/onlineddl/vrepl_suite/testdata/reorder-columns/create.sql b/go/test/endtoend/onlineddl/vrepl_suite/testdata/reorder-columns/create.sql new file mode 100644 index 00000000000..3bc6f42b6c3 --- /dev/null +++ b/go/test/endtoend/onlineddl/vrepl_suite/testdata/reorder-columns/create.sql @@ -0,0 +1,26 @@ +drop table if exists onlineddl_test; +create table onlineddl_test ( + id int auto_increment, + c1 int not null, + c2 int not null, + primary key (id) +) auto_increment=1; + +drop event if exists onlineddl_test; +delimiter ;; +create event onlineddl_test + on schedule every 1 second + starts current_timestamp + ends current_timestamp + interval 60 second + on completion not preserve + enable + do +begin + insert ignore into onlineddl_test values (1, 11, 23); + insert ignore into onlineddl_test values (2, 13, 29); + insert into onlineddl_test values (null, 17, 31); + set @last_insert_id := last_insert_id(); + update onlineddl_test set c1=c1+@last_insert_id, c2=c2+@last_insert_id where id=@last_insert_id order by id desc limit 1; + delete from onlineddl_test where id=1; + delete from onlineddl_test where c1=13; +end ;; diff --git a/go/test/endtoend/onlineddl/vrepl_suite/testdata/spatial57/create.sql b/go/test/endtoend/onlineddl/vrepl_suite/testdata/spatial57/create.sql new file mode 100644 index 00000000000..9e26c32fd5d --- /dev/null +++ b/go/test/endtoend/onlineddl/vrepl_suite/testdata/spatial57/create.sql @@ -0,0 +1,22 @@ +drop table if exists onlineddl_test; +create table onlineddl_test ( + id int auto_increment, + g geometry, + pt point, + primary key(id) +) auto_increment=1; + +drop event if exists onlineddl_test; +delimiter ;; +create event onlineddl_test + on schedule every 1 second + starts current_timestamp + ends current_timestamp + interval 60 second + on completion not preserve + enable + do +begin + insert into onlineddl_test values (null, ST_GeomFromText('POINT(1 1)'), POINT(10,10)); + insert into onlineddl_test values (null, ST_GeomFromText('POINT(2 2)'), POINT(20,20)); + insert into onlineddl_test values (null, ST_GeomFromText('POINT(3 3)'), POINT(30,30)); +end ;; diff --git a/go/test/endtoend/onlineddl/vrepl_suite/testdata/spatial57/ignore_versions b/go/test/endtoend/onlineddl/vrepl_suite/testdata/spatial57/ignore_versions new file mode 100644 index 00000000000..b6de5f8d9f5 --- /dev/null +++ b/go/test/endtoend/onlineddl/vrepl_suite/testdata/spatial57/ignore_versions @@ -0,0 +1 @@ +(5.5|5.6) diff --git a/go/test/endtoend/onlineddl/vrepl_suite/testdata/swap-uk/alter b/go/test/endtoend/onlineddl/vrepl_suite/testdata/swap-uk/alter new file mode 100644 index 00000000000..bac15ca1769 --- /dev/null +++ b/go/test/endtoend/onlineddl/vrepl_suite/testdata/swap-uk/alter @@ -0,0 +1 @@ +drop primary key, add unique key(id) diff --git a/go/test/endtoend/onlineddl/vrepl_suite/testdata/swap-uk/create.sql b/go/test/endtoend/onlineddl/vrepl_suite/testdata/swap-uk/create.sql new file mode 100644 index 00000000000..335caa37798 --- /dev/null +++ b/go/test/endtoend/onlineddl/vrepl_suite/testdata/swap-uk/create.sql @@ -0,0 +1,22 @@ +drop table if exists onlineddl_test; +create table onlineddl_test ( + id int auto_increment, + i int not null, + ts timestamp, + primary key(id) +) auto_increment=1; + +drop event if exists onlineddl_test; +delimiter ;; +create event onlineddl_test + on schedule every 1 second + starts current_timestamp + ends current_timestamp + interval 60 second + on completion not preserve + enable + do +begin + insert into onlineddl_test values (null, 11, now()); + insert into onlineddl_test values (null, 13, now()); + insert into onlineddl_test values (null, 17, now()); +end ;; diff --git a/go/test/endtoend/onlineddl/vrepl_suite/testdata/test.sh b/go/test/endtoend/onlineddl/vrepl_suite/testdata/test.sh new file mode 100644 index 00000000000..59ea60ac844 --- /dev/null +++ b/go/test/endtoend/onlineddl/vrepl_suite/testdata/test.sh @@ -0,0 +1,270 @@ +#!/bin/bash + +# Local integration tests. To be used by CI. +# See https://github.com/github/gh-ost/tree/doc/local-tests.md +# + +# Usage: localtests/test/sh [filter] +# By default, runs all tests. Given filter, will only run tests matching given regep + +tests_path=$(dirname $0) +test_logfile=/tmp/gh-ost-test.log +default_ghost_binary=/tmp/gh-ost-test +ghost_binary="" +exec_command_file=/tmp/gh-ost-test.bash +ghost_structure_output_file=/tmp/gh-ost-test.ghost.structure.sql +orig_content_output_file=/tmp/gh-ost-test.orig.content.csv +ghost_content_output_file=/tmp/gh-ost-test.ghost.content.csv +throttle_flag_file=/tmp/gh-ost-test.ghost.throttle.flag + +master_host= +master_port= +replica_host= +replica_port= +original_sql_mode= + +OPTIND=1 +while getopts "b:" OPTION +do + case $OPTION in + b) + ghost_binary="$OPTARG" + ;; + esac +done +shift $((OPTIND-1)) + +test_pattern="${1:-.}" + +verify_master_and_replica() { + if [ "$(gh-ost-test-mysql-master -e "select 1" -ss)" != "1" ] ; then + echo "Cannot verify gh-ost-test-mysql-master" + exit 1 + fi + read master_host master_port <<< $(gh-ost-test-mysql-master -e "select @@hostname, @@port" -ss) + [ "$master_host" == "$(hostname)" ] && master_host="127.0.0.1" + echo "# master verified at $master_host:$master_port" + if ! gh-ost-test-mysql-master -e "set global event_scheduler := 1" ; then + echo "Cannot enable event_scheduler on master" + exit 1 + fi + original_sql_mode="$(gh-ost-test-mysql-master -e "select @@global.sql_mode" -s -s)" + echo "sql_mode on master is ${original_sql_mode}" + + echo "Gracefully sleeping for 3 seconds while replica is setting up..." + sleep 3 + + if [ "$(gh-ost-test-mysql-replica -e "select 1" -ss)" != "1" ] ; then + echo "Cannot verify gh-ost-test-mysql-replica" + exit 1 + fi + if [ "$(gh-ost-test-mysql-replica -e "select @@global.binlog_format" -ss)" != "ROW" ] ; then + echo "Expecting test replica to have binlog_format=ROW" + exit 1 + fi + read replica_host replica_port <<< $(gh-ost-test-mysql-replica -e "select @@hostname, @@port" -ss) + [ "$replica_host" == "$(hostname)" ] && replica_host="127.0.0.1" + echo "# replica verified at $replica_host:$replica_port" +} + +exec_cmd() { + echo "$@" + command "$@" 1> $test_logfile 2>&1 + return $? +} + +echo_dot() { + echo -n "." +} + +start_replication() { + gh-ost-test-mysql-replica -e "stop slave; start slave;" + num_attempts=0 + while gh-ost-test-mysql-replica -e "show slave status\G" | grep Seconds_Behind_Master | grep -q NULL ; do + ((num_attempts=num_attempts+1)) + if [ $num_attempts -gt 10 ] ; then + echo + echo "ERROR replication failure" + exit 1 + fi + echo_dot + sleep 1 + done +} + +test_single() { + local test_name + test_name="$1" + + if [ -f $tests_path/$test_name/ignore_versions ] ; then + ignore_versions=$(cat $tests_path/$test_name/ignore_versions) + mysql_version=$(gh-ost-test-mysql-master -s -s -e "select @@version") + if echo "$mysql_version" | egrep -q "^${ignore_versions}" ; then + echo -n "Skipping: $test_name" + return 0 + fi + fi + + echo -n "Testing: $test_name" + + echo_dot + start_replication + echo_dot + + if [ -f $tests_path/$test_name/sql_mode ] ; then + gh-ost-test-mysql-master --default-character-set=utf8mb4 test -e "set @@global.sql_mode='$(cat $tests_path/$test_name/sql_mode)'" + gh-ost-test-mysql-replica --default-character-set=utf8mb4 test -e "set @@global.sql_mode='$(cat $tests_path/$test_name/sql_mode)'" + fi + + gh-ost-test-mysql-master --default-character-set=utf8mb4 test < $tests_path/$test_name/create.sql + + extra_args="" + if [ -f $tests_path/$test_name/extra_args ] ; then + extra_args=$(cat $tests_path/$test_name/extra_args) + fi + orig_columns="*" + ghost_columns="*" + order_by="" + if [ -f $tests_path/$test_name/orig_columns ] ; then + orig_columns=$(cat $tests_path/$test_name/orig_columns) + fi + if [ -f $tests_path/$test_name/ghost_columns ] ; then + ghost_columns=$(cat $tests_path/$test_name/ghost_columns) + fi + if [ -f $tests_path/$test_name/order_by ] ; then + order_by="order by $(cat $tests_path/$test_name/order_by)" + fi + # graceful sleep for replica to catch up + echo_dot + sleep 1 + # + cmd="$ghost_binary \ + --user=gh-ost \ + --password=gh-ost \ + --host=$replica_host \ + --port=$replica_port \ + --assume-master-host=${master_host}:${master_port} + --database=test \ + --table=onlineddl_test \ + --alter='engine=innodb' \ + --exact-rowcount \ + --assume-rbr \ + --initially-drop-old-table \ + --initially-drop-ghost-table \ + --throttle-query='select timestampdiff(second, min(last_update), now()) < 5 from _gh_ost_test_ghc' \ + --throttle-flag-file=$throttle_flag_file \ + --serve-socket-file=/tmp/gh-ost.test.sock \ + --initially-drop-socket-file \ + --test-on-replica \ + --default-retries=3 \ + --chunk-size=10 \ + --verbose \ + --debug \ + --stack \ + --execute ${extra_args[@]}" + echo_dot + echo $cmd > $exec_command_file + echo_dot + bash $exec_command_file 1> $test_logfile 2>&1 + + execution_result=$? + + if [ -f $tests_path/$test_name/sql_mode ] ; then + gh-ost-test-mysql-master --default-character-set=utf8mb4 test -e "set @@global.sql_mode='${original_sql_mode}'" + gh-ost-test-mysql-replica --default-character-set=utf8mb4 test -e "set @@global.sql_mode='${original_sql_mode}'" + fi + + if [ -f $tests_path/$test_name/destroy.sql ] ; then + gh-ost-test-mysql-master --default-character-set=utf8mb4 test < $tests_path/$test_name/destroy.sql + fi + + if [ -f $tests_path/$test_name/expect_failure ] ; then + if [ $execution_result -eq 0 ] ; then + echo + echo "ERROR $test_name execution was expected to exit on error but did not. cat $test_logfile" + return 1 + fi + if [ -s $tests_path/$test_name/expect_failure ] ; then + # 'expect_failure' file has content. We expect to find this content in the log. + expected_error_message="$(cat $tests_path/$test_name/expect_failure)" + if grep -q "$expected_error_message" $test_logfile ; then + return 0 + fi + echo + echo "ERROR $test_name execution was expected to exit with error message '${expected_error_message}' but did not. cat $test_logfile" + return 1 + fi + # 'expect_failure' file has no content. We generally agree that the failure is correct + return 0 + fi + + if [ $execution_result -ne 0 ] ; then + echo + echo "ERROR $test_name execution failure. cat $test_logfile:" + cat $test_logfile + return 1 + fi + + gh-ost-test-mysql-replica --default-character-set=utf8mb4 test -e "show create table _gh_ost_test_gho\G" -ss > $ghost_structure_output_file + + if [ -f $tests_path/$test_name/expect_table_structure ] ; then + expected_table_structure="$(cat $tests_path/$test_name/expect_table_structure)" + if ! grep -q "$expected_table_structure" $ghost_structure_output_file ; then + echo + echo "ERROR $test_name: table structure was expected to include ${expected_table_structure} but did not. cat $ghost_structure_output_file:" + cat $ghost_structure_output_file + return 1 + fi + fi + + echo_dot + gh-ost-test-mysql-replica --default-character-set=utf8mb4 test -e "select ${orig_columns} from onlineddl_test ${order_by}" -ss > $orig_content_output_file + gh-ost-test-mysql-replica --default-character-set=utf8mb4 test -e "select ${ghost_columns} from _gh_ost_test_gho ${order_by}" -ss > $ghost_content_output_file + orig_checksum=$(cat $orig_content_output_file | md5sum) + ghost_checksum=$(cat $ghost_content_output_file | md5sum) + + if [ "$orig_checksum" != "$ghost_checksum" ] ; then + echo "ERROR $test_name: checksum mismatch" + echo "---" + diff $orig_content_output_file $ghost_content_output_file + + echo "diff $orig_content_output_file $ghost_content_output_file" + + return 1 + fi +} + +build_binary() { + echo "Building" + rm -f $default_ghost_binary + [ "$ghost_binary" == "" ] && ghost_binary="$default_ghost_binary" + if [ -f "$ghost_binary" ] ; then + echo "Using binary: $ghost_binary" + return 0 + fi + go build -o $ghost_binary go/cmd/gh-ost/main.go + if [ $? -ne 0 ] ; then + echo "Build failure" + exit 1 + fi +} + +test_all() { + build_binary + find $tests_path ! -path . -type d -mindepth 1 -maxdepth 1 | cut -d "/" -f 3 | egrep "$test_pattern" | while read test_name ; do + test_single "$test_name" + if [ $? -ne 0 ] ; then + create_statement=$(gh-ost-test-mysql-replica test -t -e "show create table _gh_ost_test_gho \G") + echo "$create_statement" >> $test_logfile + echo "+ FAIL" + return 1 + else + echo + echo "+ pass" + fi + gh-ost-test-mysql-replica -e "start slave" + done +} + +verify_master_and_replica +test_all diff --git a/go/test/endtoend/onlineddl/vrepl_suite/testdata/timestamp-datetime/create.sql b/go/test/endtoend/onlineddl/vrepl_suite/testdata/timestamp-datetime/create.sql new file mode 100644 index 00000000000..dc76c589e01 --- /dev/null +++ b/go/test/endtoend/onlineddl/vrepl_suite/testdata/timestamp-datetime/create.sql @@ -0,0 +1,33 @@ +drop table if exists onlineddl_test; +create table onlineddl_test ( + id int auto_increment, + i int not null, + ts timestamp default current_timestamp, + dt datetime, + ts2ts timestamp null, + ts2dt datetime null, + dt2ts timestamp null, + dt2dt datetime null, + updated tinyint unsigned default 0, + primary key(id) +) auto_increment=1; + +drop event if exists onlineddl_test; +delimiter ;; +create event onlineddl_test + on schedule every 1 second + starts current_timestamp + ends current_timestamp + interval 60 second + on completion not preserve + enable + do +begin + insert into onlineddl_test values (null, 11, now(), now(),null, null, null, null, 0); + update onlineddl_test set ts2ts=ts, ts2dt=ts, dt2ts=dt, dt2dt=dt where i = 11 order by id desc limit 1; + + insert into onlineddl_test values (null, 13, null, now(), now(), 0); + update onlineddl_test set ts2ts=ts, ts2dt=ts, dt2ts=dt, dt2dt=dt where i = 13 order by id desc limit 1; + + insert into onlineddl_test values (null, 17, null, '2016-07-06 10:20:30', '2016-07-06 10:20:30', 0); + update onlineddl_test set ts2ts=ts, ts2dt=ts, dt2ts=dt, dt2dt=dt where i = 17 order by id desc limit 1; +end ;; diff --git a/go/test/endtoend/onlineddl/vrepl_suite/testdata/timestamp-to-datetime/alter b/go/test/endtoend/onlineddl/vrepl_suite/testdata/timestamp-to-datetime/alter new file mode 100644 index 00000000000..da18e565c10 --- /dev/null +++ b/go/test/endtoend/onlineddl/vrepl_suite/testdata/timestamp-to-datetime/alter @@ -0,0 +1 @@ +change column t t datetime not null diff --git a/go/test/endtoend/onlineddl/vrepl_suite/testdata/timestamp-to-datetime/create.sql b/go/test/endtoend/onlineddl/vrepl_suite/testdata/timestamp-to-datetime/create.sql new file mode 100644 index 00000000000..d2741b4d652 --- /dev/null +++ b/go/test/endtoend/onlineddl/vrepl_suite/testdata/timestamp-to-datetime/create.sql @@ -0,0 +1,31 @@ +drop table if exists onlineddl_test; +create table onlineddl_test ( + id int auto_increment, + i int not null, + ts0 timestamp default current_timestamp, + ts1 timestamp default current_timestamp, + dt2 datetime, + t datetime, + updated tinyint unsigned default 0, + primary key(id), + key i_idx(i) +) auto_increment=1; + +drop event if exists onlineddl_test; +delimiter ;; +create event onlineddl_test + on schedule every 1 second + starts current_timestamp + ends current_timestamp + interval 60 second + on completion not preserve + enable + do +begin + insert into onlineddl_test values (null, 7, null, now(), now(), '2010-10-20 10:20:30', 0); + + insert into onlineddl_test values (null, 11, null, now(), now(), '2010-10-20 10:20:30', 0); + update onlineddl_test set ts2=now() + interval 1 minute, updated = 1 where i = 11 order by id desc limit 1; + + insert into onlineddl_test values (null, 13, null, now(), now(), '2010-10-20 10:20:30', 0); + update onlineddl_test set ts2=now() + interval 1 minute, updated = 1 where i = 13 order by id desc limit 1; +end ;; diff --git a/go/test/endtoend/onlineddl/vrepl_suite/testdata/timestamp-to-datetime/ignore_versions b/go/test/endtoend/onlineddl/vrepl_suite/testdata/timestamp-to-datetime/ignore_versions new file mode 100644 index 00000000000..7acd3f06f32 --- /dev/null +++ b/go/test/endtoend/onlineddl/vrepl_suite/testdata/timestamp-to-datetime/ignore_versions @@ -0,0 +1 @@ +(5.5) diff --git a/go/test/endtoend/onlineddl/vrepl_suite/testdata/timestamp/create.sql b/go/test/endtoend/onlineddl/vrepl_suite/testdata/timestamp/create.sql new file mode 100644 index 00000000000..29ed069b29b --- /dev/null +++ b/go/test/endtoend/onlineddl/vrepl_suite/testdata/timestamp/create.sql @@ -0,0 +1,37 @@ +drop table if exists onlineddl_test; +create table onlineddl_test ( + id int auto_increment, + i int not null, + ts0 timestamp default current_timestamp, + ts1 timestamp default current_timestamp, + ts2 timestamp default current_timestamp, + updated tinyint unsigned default 0, + primary key(id), + key i_idx(i) +) auto_increment=1; + +drop event if exists onlineddl_test; +delimiter ;; +create event onlineddl_test + on schedule every 1 second + starts current_timestamp + ends current_timestamp + interval 60 second + on completion not preserve + enable + do +begin + insert into onlineddl_test values (null, 11, null, now(), now(), 0); + update onlineddl_test set ts2=now() + interval 1 minute, updated = 1 where i = 11 order by id desc limit 1; + + insert into onlineddl_test values (null, 13, null, now(), now(), 0); + update onlineddl_test set ts2=now() + interval 1 minute, updated = 1 where i = 13 order by id desc limit 1; + + insert into onlineddl_test values (null, 17, null, now(), now(), 0); + update onlineddl_test set ts2=now() + interval 1 minute, updated = 1 where i = 17 order by id desc limit 1; + + insert into onlineddl_test values (null, 19, null, now(), now(), 0); + update onlineddl_test set ts2=now() + interval 1 minute, updated = 1 where i = 19 order by id desc limit 1; + + insert into onlineddl_test values (null, 23, null, now(), now(), 0); + update onlineddl_test set ts2=now() + interval 1 minute, updated = 1 where i = 23 order by id desc limit 1; +end ;; diff --git a/go/test/endtoend/onlineddl/vrepl_suite/testdata/timestamp/ignore_versions b/go/test/endtoend/onlineddl/vrepl_suite/testdata/timestamp/ignore_versions new file mode 100644 index 00000000000..7acd3f06f32 --- /dev/null +++ b/go/test/endtoend/onlineddl/vrepl_suite/testdata/timestamp/ignore_versions @@ -0,0 +1 @@ +(5.5) diff --git a/go/test/endtoend/onlineddl/vrepl_suite/testdata/trivial/create.sql b/go/test/endtoend/onlineddl/vrepl_suite/testdata/trivial/create.sql new file mode 100644 index 00000000000..02c45a91471 --- /dev/null +++ b/go/test/endtoend/onlineddl/vrepl_suite/testdata/trivial/create.sql @@ -0,0 +1,13 @@ +drop table if exists onlineddl_test; +create table onlineddl_test ( + id int auto_increment, + i int not null, + color varchar(32), + primary key(id) +) auto_increment=1; + +drop event if exists onlineddl_test; + +insert into onlineddl_test values (null, 11, 'red'); +insert into onlineddl_test values (null, 13, 'green'); +insert into onlineddl_test values (null, 17, 'blue'); diff --git a/go/test/endtoend/onlineddl/vrepl_suite/testdata/trivial/extra_args b/go/test/endtoend/onlineddl/vrepl_suite/testdata/trivial/extra_args new file mode 100644 index 00000000000..8b6320aa1d7 --- /dev/null +++ b/go/test/endtoend/onlineddl/vrepl_suite/testdata/trivial/extra_args @@ -0,0 +1 @@ +--throttle-query='select false' \ diff --git a/go/test/endtoend/onlineddl/vrepl_suite/testdata/tz-datetime-ts/alter b/go/test/endtoend/onlineddl/vrepl_suite/testdata/tz-datetime-ts/alter new file mode 100644 index 00000000000..ab25b649a8a --- /dev/null +++ b/go/test/endtoend/onlineddl/vrepl_suite/testdata/tz-datetime-ts/alter @@ -0,0 +1 @@ +change column t t timestamp not null default current_timestamp diff --git a/go/test/endtoend/onlineddl/vrepl_suite/testdata/tz-datetime-ts/create.sql b/go/test/endtoend/onlineddl/vrepl_suite/testdata/tz-datetime-ts/create.sql new file mode 100644 index 00000000000..bacbcee1f10 --- /dev/null +++ b/go/test/endtoend/onlineddl/vrepl_suite/testdata/tz-datetime-ts/create.sql @@ -0,0 +1,44 @@ +drop table if exists onlineddl_test; +create table onlineddl_test ( + id int auto_increment, + i int not null, + ts0 timestamp default current_timestamp, + ts1 timestamp default current_timestamp, + dt2 datetime, + t datetime, + updated tinyint unsigned default 0, + primary key(id), + key i_idx(i) +) auto_increment=1; + +drop event if exists onlineddl_test; +delimiter ;; +create event onlineddl_test + on schedule every 1 second + starts current_timestamp + ends current_timestamp + interval 60 second + on completion not preserve + enable + do +begin + insert into onlineddl_test values (null, 7, null, now(), now(), '2010-10-20 10:20:30', 0); + + insert into onlineddl_test values (null, 11, null, now(), now(), '2010-10-20 10:20:30', 0); + update onlineddl_test set ts2=now() + interval 1 minute, updated = 1 where i = 11 order by id desc limit 1; + + set session time_zone='system'; + insert into onlineddl_test values (null, 13, null, now(), now(), '2010-10-20 10:20:30', 0); + update onlineddl_test set ts2=now() + interval 1 minute, updated = 1 where i = 13 order by id desc limit 1; + + set session time_zone='+00:00'; + insert into onlineddl_test values (null, 17, null, now(), now(), '2010-10-20 10:20:30', 0); + update onlineddl_test set ts2=now() + interval 1 minute, updated = 1 where i = 17 order by id desc limit 1; + + set session time_zone='-03:00'; + insert into onlineddl_test values (null, 19, null, now(), now(), '2010-10-20 10:20:30', 0); + update onlineddl_test set ts2=now() + interval 1 minute, updated = 1 where i = 19 order by id desc limit 1; + + set session time_zone='+05:00'; + insert into onlineddl_test values (null, 23, null, now(), now(), '2010-10-20 10:20:30', 0); + update onlineddl_test set ts2=now() + interval 1 minute, updated = 1 where i = 23 order by id desc limit 1; +end ;; diff --git a/go/test/endtoend/onlineddl/vrepl_suite/testdata/tz-datetime-ts/ignore_versions b/go/test/endtoend/onlineddl/vrepl_suite/testdata/tz-datetime-ts/ignore_versions new file mode 100644 index 00000000000..7acd3f06f32 --- /dev/null +++ b/go/test/endtoend/onlineddl/vrepl_suite/testdata/tz-datetime-ts/ignore_versions @@ -0,0 +1 @@ +(5.5) diff --git a/go/test/endtoend/onlineddl/vrepl_suite/testdata/tz-datetime/create.sql b/go/test/endtoend/onlineddl/vrepl_suite/testdata/tz-datetime/create.sql new file mode 100644 index 00000000000..3a3a812f75d --- /dev/null +++ b/go/test/endtoend/onlineddl/vrepl_suite/testdata/tz-datetime/create.sql @@ -0,0 +1,41 @@ +drop table if exists onlineddl_test; +create table onlineddl_test ( + id int auto_increment, + i int not null, + ts0 timestamp default current_timestamp, + ts1 datetime, + ts2 datetime, + updated tinyint unsigned default 0, + primary key(id), + key i_idx(i) +) auto_increment=1; + +drop event if exists onlineddl_test; +delimiter ;; +create event onlineddl_test + on schedule every 1 second + starts current_timestamp + ends current_timestamp + interval 60 second + on completion not preserve + enable + do +begin + insert into onlineddl_test values (null, 11, null, now(), now(), 0); + update onlineddl_test set ts2=now() + interval 10 minute, updated = 1 where i = 11 order by id desc limit 1; + + set session time_zone='system'; + insert into onlineddl_test values (null, 13, null, now(), now(), 0); + update onlineddl_test set ts2=now() + interval 10 minute, updated = 1 where i = 13 order by id desc limit 1; + + set session time_zone='+00:00'; + insert into onlineddl_test values (null, 17, null, now(), now(), 0); + update onlineddl_test set ts2=now() + interval 10 minute, updated = 1 where i = 17 order by id desc limit 1; + + set session time_zone='-03:00'; + insert into onlineddl_test values (null, 19, null, now(), now(), 0); + update onlineddl_test set ts2=now() + interval 10 minute, updated = 1 where i = 19 order by id desc limit 1; + + set session time_zone='+05:00'; + insert into onlineddl_test values (null, 23, null, now(), now(), 0); + update onlineddl_test set ts2=now() + interval 10 minute, updated = 1 where i = 23 order by id desc limit 1; +end ;; diff --git a/go/test/endtoend/onlineddl/vrepl_suite/testdata/tz/create.sql b/go/test/endtoend/onlineddl/vrepl_suite/testdata/tz/create.sql new file mode 100644 index 00000000000..2144cb6a51c --- /dev/null +++ b/go/test/endtoend/onlineddl/vrepl_suite/testdata/tz/create.sql @@ -0,0 +1,41 @@ +drop table if exists onlineddl_test; +create table onlineddl_test ( + id int auto_increment, + i int not null, + ts0 timestamp default current_timestamp, + ts1 timestamp default current_timestamp, + ts2 timestamp default current_timestamp, + updated tinyint unsigned default 0, + primary key(id), + key i_idx(i) +) auto_increment=1; + +drop event if exists onlineddl_test; +delimiter ;; +create event onlineddl_test + on schedule every 1 second + starts current_timestamp + ends current_timestamp + interval 60 second + on completion not preserve + enable + do +begin + insert into onlineddl_test values (null, 11, null, now(), now(), 0); + update onlineddl_test set ts2=now() + interval 10 minute, updated = 1 where i = 11 order by id desc limit 1; + + set session time_zone='system'; + insert into onlineddl_test values (null, 13, null, now(), now(), 0); + update onlineddl_test set ts2=now() + interval 10 minute, updated = 1 where i = 13 order by id desc limit 1; + + set session time_zone='+00:00'; + insert into onlineddl_test values (null, 17, null, now(), now(), 0); + update onlineddl_test set ts2=now() + interval 10 minute, updated = 1 where i = 17 order by id desc limit 1; + + set session time_zone='-03:00'; + insert into onlineddl_test values (null, 19, null, now(), now(), 0); + update onlineddl_test set ts2=now() + interval 10 minute, updated = 1 where i = 19 order by id desc limit 1; + + set session time_zone='+05:00'; + insert into onlineddl_test values (null, 23, null, now(), now(), 0); + update onlineddl_test set ts2=now() + interval 10 minute, updated = 1 where i = 23 order by id desc limit 1; +end ;; diff --git a/go/test/endtoend/onlineddl/vrepl_suite/testdata/tz/ignore_versions b/go/test/endtoend/onlineddl/vrepl_suite/testdata/tz/ignore_versions new file mode 100644 index 00000000000..7acd3f06f32 --- /dev/null +++ b/go/test/endtoend/onlineddl/vrepl_suite/testdata/tz/ignore_versions @@ -0,0 +1 @@ +(5.5) diff --git a/go/test/endtoend/onlineddl/vrepl_suite/testdata/unsigned-modify/create.sql b/go/test/endtoend/onlineddl/vrepl_suite/testdata/unsigned-modify/create.sql new file mode 100644 index 00000000000..41de8963783 --- /dev/null +++ b/go/test/endtoend/onlineddl/vrepl_suite/testdata/unsigned-modify/create.sql @@ -0,0 +1,28 @@ +drop table if exists onlineddl_test; +create table onlineddl_test ( + id bigint(20) NOT NULL AUTO_INCREMENT, + column1 int(11) NOT NULL, + column2 smallint(5) unsigned NOT NULL, + column3 mediumint(8) unsigned NOT NULL, + column4 tinyint(3) unsigned NOT NULL, + column5 int(11) NOT NULL, + column6 int(11) NOT NULL, + PRIMARY KEY (id), + KEY c12_ix (column1, column2) +) auto_increment=1; + +drop event if exists onlineddl_test; +delimiter ;; +create event onlineddl_test + on schedule every 1 second + starts current_timestamp + ends current_timestamp + interval 60 second + on completion not preserve + enable + do +begin + -- mediumint maxvalue: 16777215 (unsigned), 8388607 (signed) + insert into onlineddl_test values (NULL, 13382498, 536, 8388607, 3, 1483892217, 1483892218); + insert into onlineddl_test values (NULL, 13382498, 536, 8388607, 250, 1483892217, 1483892218); + insert into onlineddl_test values (NULL, 13382498, 536, 10000000, 3, 1483892217, 1483892218); +end ;; diff --git a/go/test/endtoend/onlineddl/vrepl_suite/testdata/unsigned-rename/after_columns b/go/test/endtoend/onlineddl/vrepl_suite/testdata/unsigned-rename/after_columns new file mode 100644 index 00000000000..cdee3f16d06 --- /dev/null +++ b/go/test/endtoend/onlineddl/vrepl_suite/testdata/unsigned-rename/after_columns @@ -0,0 +1 @@ +id, i, bi, iu_renamed, biu diff --git a/go/test/endtoend/onlineddl/vrepl_suite/testdata/unsigned-rename/alter b/go/test/endtoend/onlineddl/vrepl_suite/testdata/unsigned-rename/alter new file mode 100644 index 00000000000..d4f4967c32e --- /dev/null +++ b/go/test/endtoend/onlineddl/vrepl_suite/testdata/unsigned-rename/alter @@ -0,0 +1 @@ +change column iu iu_renamed int unsigned not null diff --git a/go/test/endtoend/onlineddl/vrepl_suite/testdata/unsigned-rename/before_columns b/go/test/endtoend/onlineddl/vrepl_suite/testdata/unsigned-rename/before_columns new file mode 100644 index 00000000000..96a3ec84ae8 --- /dev/null +++ b/go/test/endtoend/onlineddl/vrepl_suite/testdata/unsigned-rename/before_columns @@ -0,0 +1 @@ +id, i, bi, iu, biu diff --git a/go/test/endtoend/onlineddl/vrepl_suite/testdata/unsigned-rename/create.sql b/go/test/endtoend/onlineddl/vrepl_suite/testdata/unsigned-rename/create.sql new file mode 100644 index 00000000000..5a5f3baed04 --- /dev/null +++ b/go/test/endtoend/onlineddl/vrepl_suite/testdata/unsigned-rename/create.sql @@ -0,0 +1,24 @@ +drop table if exists onlineddl_test; +create table onlineddl_test ( + id int auto_increment, + i int not null, + bi bigint not null, + iu int unsigned not null, + biu bigint unsigned not null, + primary key(id) +) auto_increment=1; + +drop event if exists onlineddl_test; +delimiter ;; +create event onlineddl_test + on schedule every 1 second + starts current_timestamp + ends current_timestamp + interval 60 second + on completion not preserve + enable + do +begin + insert into onlineddl_test values (null, -2147483647, -9223372036854775807, 4294967295, 18446744073709551615); + set @last_insert_id := cast(last_insert_id() as signed); + update onlineddl_test set i=-2147483647+@last_insert_id, bi=-9223372036854775807+@last_insert_id, iu=4294967295-@last_insert_id, biu=18446744073709551615-@last_insert_id where id < @last_insert_id order by id desc limit 1; +end ;; diff --git a/go/test/endtoend/onlineddl/vrepl_suite/testdata/unsigned-reorder/after_columns b/go/test/endtoend/onlineddl/vrepl_suite/testdata/unsigned-reorder/after_columns new file mode 100644 index 00000000000..96a3ec84ae8 --- /dev/null +++ b/go/test/endtoend/onlineddl/vrepl_suite/testdata/unsigned-reorder/after_columns @@ -0,0 +1 @@ +id, i, bi, iu, biu diff --git a/go/test/endtoend/onlineddl/vrepl_suite/testdata/unsigned-reorder/alter b/go/test/endtoend/onlineddl/vrepl_suite/testdata/unsigned-reorder/alter new file mode 100644 index 00000000000..43ce5819a2c --- /dev/null +++ b/go/test/endtoend/onlineddl/vrepl_suite/testdata/unsigned-reorder/alter @@ -0,0 +1 @@ +change column iu iu int unsigned not null after id diff --git a/go/test/endtoend/onlineddl/vrepl_suite/testdata/unsigned-reorder/before_columns b/go/test/endtoend/onlineddl/vrepl_suite/testdata/unsigned-reorder/before_columns new file mode 100644 index 00000000000..96a3ec84ae8 --- /dev/null +++ b/go/test/endtoend/onlineddl/vrepl_suite/testdata/unsigned-reorder/before_columns @@ -0,0 +1 @@ +id, i, bi, iu, biu diff --git a/go/test/endtoend/onlineddl/vrepl_suite/testdata/unsigned-reorder/create.sql b/go/test/endtoend/onlineddl/vrepl_suite/testdata/unsigned-reorder/create.sql new file mode 100644 index 00000000000..5a5f3baed04 --- /dev/null +++ b/go/test/endtoend/onlineddl/vrepl_suite/testdata/unsigned-reorder/create.sql @@ -0,0 +1,24 @@ +drop table if exists onlineddl_test; +create table onlineddl_test ( + id int auto_increment, + i int not null, + bi bigint not null, + iu int unsigned not null, + biu bigint unsigned not null, + primary key(id) +) auto_increment=1; + +drop event if exists onlineddl_test; +delimiter ;; +create event onlineddl_test + on schedule every 1 second + starts current_timestamp + ends current_timestamp + interval 60 second + on completion not preserve + enable + do +begin + insert into onlineddl_test values (null, -2147483647, -9223372036854775807, 4294967295, 18446744073709551615); + set @last_insert_id := cast(last_insert_id() as signed); + update onlineddl_test set i=-2147483647+@last_insert_id, bi=-9223372036854775807+@last_insert_id, iu=4294967295-@last_insert_id, biu=18446744073709551615-@last_insert_id where id < @last_insert_id order by id desc limit 1; +end ;; diff --git a/go/test/endtoend/onlineddl/vrepl_suite/testdata/unsigned/create.sql b/go/test/endtoend/onlineddl/vrepl_suite/testdata/unsigned/create.sql new file mode 100644 index 00000000000..5a5f3baed04 --- /dev/null +++ b/go/test/endtoend/onlineddl/vrepl_suite/testdata/unsigned/create.sql @@ -0,0 +1,24 @@ +drop table if exists onlineddl_test; +create table onlineddl_test ( + id int auto_increment, + i int not null, + bi bigint not null, + iu int unsigned not null, + biu bigint unsigned not null, + primary key(id) +) auto_increment=1; + +drop event if exists onlineddl_test; +delimiter ;; +create event onlineddl_test + on schedule every 1 second + starts current_timestamp + ends current_timestamp + interval 60 second + on completion not preserve + enable + do +begin + insert into onlineddl_test values (null, -2147483647, -9223372036854775807, 4294967295, 18446744073709551615); + set @last_insert_id := cast(last_insert_id() as signed); + update onlineddl_test set i=-2147483647+@last_insert_id, bi=-9223372036854775807+@last_insert_id, iu=4294967295-@last_insert_id, biu=18446744073709551615-@last_insert_id where id < @last_insert_id order by id desc limit 1; +end ;; diff --git a/go/test/endtoend/onlineddl/vrepl_suite/testdata/update-pk/create.sql b/go/test/endtoend/onlineddl/vrepl_suite/testdata/update-pk/create.sql new file mode 100644 index 00000000000..d8b304b000a --- /dev/null +++ b/go/test/endtoend/onlineddl/vrepl_suite/testdata/update-pk/create.sql @@ -0,0 +1,33 @@ +drop table if exists onlineddl_test; +create table onlineddl_test ( + id int auto_increment, + i int not null, + color varchar(32) not null default '', + primary key(id, color) +) auto_increment=1; + +insert into onlineddl_test values (null, 5, 'grey'); +insert into onlineddl_test values (null, 7, 'yellow'); + +drop event if exists onlineddl_test; +delimiter ;; +create event onlineddl_test + on schedule every 1 second + starts current_timestamp + ends current_timestamp + interval 60 second + on completion not preserve + enable + do +begin + update onlineddl_test set color='dark grey' where i = 5; + insert into onlineddl_test values (null, 11, 'red'); + set @last_insert_id := last_insert_id(); + insert into onlineddl_test values (@last_insert_id, 11, 'green'); + insert into onlineddl_test values (null, 13, 'green'); + insert into onlineddl_test values (null, 17, 'blue'); + set @last_insert_id := last_insert_id(); + update onlineddl_test set color='orange' where id = @last_insert_id; + insert into onlineddl_test values (null, 23, null); + set @last_insert_id := last_insert_id(); + update onlineddl_test set i=i+1, color='black' where id = @last_insert_id; +end ;; diff --git a/go/test/endtoend/onlineddl/vrepl_suite/testdata/utf8/create.sql b/go/test/endtoend/onlineddl/vrepl_suite/testdata/utf8/create.sql new file mode 100644 index 00000000000..e17bbeba5e3 --- /dev/null +++ b/go/test/endtoend/onlineddl/vrepl_suite/testdata/utf8/create.sql @@ -0,0 +1,21 @@ +drop table if exists onlineddl_test; +create table onlineddl_test ( + id int auto_increment, + t varchar(128) charset utf8 collate utf8_general_ci, + primary key(id) +) auto_increment=1; + +drop event if exists onlineddl_test; +delimiter ;; +create event onlineddl_test + on schedule every 1 second + starts current_timestamp + ends current_timestamp + interval 60 second + on completion not preserve + enable + do +begin + insert into onlineddl_test values (null, md5(rand())); + insert into onlineddl_test values (null, 'novo proprietário'); + insert into onlineddl_test values (null, 'usuário'); +end ;; diff --git a/go/test/endtoend/onlineddl/vrepl_suite/testdata/utf8mb4/create.sql b/go/test/endtoend/onlineddl/vrepl_suite/testdata/utf8mb4/create.sql new file mode 100644 index 00000000000..f3ab6b52cb5 --- /dev/null +++ b/go/test/endtoend/onlineddl/vrepl_suite/testdata/utf8mb4/create.sql @@ -0,0 +1,21 @@ +drop table if exists onlineddl_test; +create table onlineddl_test ( + id int auto_increment, + t varchar(128) charset utf8mb4, + primary key(id) +) auto_increment=1; + +drop event if exists onlineddl_test; +delimiter ;; +create event onlineddl_test + on schedule every 1 second + starts current_timestamp + ends current_timestamp + interval 60 second + on completion not preserve + enable + do +begin + insert into onlineddl_test values (null, md5(rand())); + insert into onlineddl_test values (null, 'átesting'); + insert into onlineddl_test values (null, '🍻😀'); +end ;; diff --git a/go/test/endtoend/onlineddl/vrepl_suite/testdata/varbinary/create.sql b/go/test/endtoend/onlineddl/vrepl_suite/testdata/varbinary/create.sql new file mode 100644 index 00000000000..73fb605145e --- /dev/null +++ b/go/test/endtoend/onlineddl/vrepl_suite/testdata/varbinary/create.sql @@ -0,0 +1,40 @@ +drop table if exists onlineddl_test; +create table onlineddl_test ( + id binary(16) NOT NULL, + info varchar(255) COLLATE utf8_unicode_ci NOT NULL, + data binary(8) NOT NULL, + primary key (id), + unique key info_uidx (info) +) auto_increment=1; + +drop event if exists onlineddl_test; +delimiter ;; +create event onlineddl_test + on schedule every 1 second + starts current_timestamp + ends current_timestamp + interval 60 second + on completion not preserve + enable + do +begin + replace into onlineddl_test (id, info, data) values (X'12ffffffffffffffffffffffffffff00', 'item 1a', X'12ffffffffffffff'); + replace into onlineddl_test (id, info, data) values (X'34ffffffffffffffffffffffffffffff', 'item 3a', X'34ffffffffffffff'); + replace into onlineddl_test (id, info, data) values (X'90ffffffffffffffffffffffffffffff', 'item 9a', X'90ffffffffffff00'); + + DELETE FROM onlineddl_test WHERE id = X'11ffffffffffffffffffffffffffff00'; + UPDATE onlineddl_test SET info = 'item 2++' WHERE id = X'22ffffffffffffffffffffffffffff00'; + UPDATE onlineddl_test SET info = 'item 3++', data = X'33ffffffffffff00' WHERE id = X'33ffffffffffffffffffffffffffffff'; + DELETE FROM onlineddl_test WHERE id = X'44ffffffffffffffffffffffffffffff'; + UPDATE onlineddl_test SET info = 'item 5++', data = X'55ffffffffffffee' WHERE id = X'55ffffffffffffffffffffffffffffff'; + INSERT INTO onlineddl_test (id, info, data) VALUES (X'66ffffffffffffffffffffffffffff00', 'item 6', X'66ffffffffffffff'); + INSERT INTO onlineddl_test (id, info, data) VALUES (X'77ffffffffffffffffffffffffffffff', 'item 7', X'77ffffffffffff00'); + INSERT INTO onlineddl_test (id, info, data) VALUES (X'88ffffffffffffffffffffffffffffff', 'item 8', X'88ffffffffffffff'); +end ;; + +INSERT INTO onlineddl_test (id, info, data) VALUES + (X'11ffffffffffffffffffffffffffff00', 'item 1', X'11ffffffffffffff'), -- id ends in 00 + (X'22ffffffffffffffffffffffffffff00', 'item 2', X'22ffffffffffffff'), -- id ends in 00 + (X'33ffffffffffffffffffffffffffffff', 'item 3', X'33ffffffffffffff'), + (X'44ffffffffffffffffffffffffffffff', 'item 4', X'44ffffffffffffff'), + (X'55ffffffffffffffffffffffffffffff', 'item 5', X'55ffffffffffffff'), + (X'99ffffffffffffffffffffffffffffff', 'item 9', X'99ffffffffffff00'); -- data ends in 00 diff --git a/go/test/endtoend/onlineddl/vrepl_suite/untestdata/datetime-to-timestamp-pk-fail/__expect_failure b/go/test/endtoend/onlineddl/vrepl_suite/untestdata/datetime-to-timestamp-pk-fail/__expect_failure new file mode 100644 index 00000000000..98ddf4a0e9f --- /dev/null +++ b/go/test/endtoend/onlineddl/vrepl_suite/untestdata/datetime-to-timestamp-pk-fail/__expect_failure @@ -0,0 +1 @@ +No support at this time for converting a column from DATETIME to TIMESTAMP that is also part of the chosen unique key diff --git a/go/test/endtoend/onlineddl/vrepl_suite/untestdata/modify-change-case-pk/alter b/go/test/endtoend/onlineddl/vrepl_suite/untestdata/modify-change-case-pk/alter new file mode 100644 index 00000000000..066298972f8 --- /dev/null +++ b/go/test/endtoend/onlineddl/vrepl_suite/untestdata/modify-change-case-pk/alter @@ -0,0 +1 @@ +modify ID int diff --git a/go/test/endtoend/onlineddl/vrepl_suite/untestdata/modify-change-case-pk/create.sql b/go/test/endtoend/onlineddl/vrepl_suite/untestdata/modify-change-case-pk/create.sql new file mode 100644 index 00000000000..ffb6339d707 --- /dev/null +++ b/go/test/endtoend/onlineddl/vrepl_suite/untestdata/modify-change-case-pk/create.sql @@ -0,0 +1,28 @@ +drop table if exists onlineddl_test; +create table onlineddl_test ( + id int auto_increment, + c1 int not null default 0, + c2 int not null default 0, + primary key (id) +) auto_increment=1; + +insert into onlineddl_test values (97, 7, 23); + +drop event if exists onlineddl_test; +delimiter ;; +create event onlineddl_test + on schedule every 1 second + starts current_timestamp + ends current_timestamp + interval 60 second + on completion not preserve + enable + do +begin + insert ignore into onlineddl_test values (1, 11, 23); + insert ignore into onlineddl_test values (2, 13, 23); + insert into onlineddl_test values (null, 17, 23); + set @last_insert_id := last_insert_id(); + update onlineddl_test set c1=c1+@last_insert_id, c2=c2+@last_insert_id where id=@last_insert_id order by id desc limit 1; + delete from onlineddl_test where id=1; + delete from onlineddl_test where c1=13; -- id=2 +end ;; diff --git a/go/test/endtoend/onlineddl/vrepl_suite/untestdata/swap-pk-uk/alter b/go/test/endtoend/onlineddl/vrepl_suite/untestdata/swap-pk-uk/alter new file mode 100644 index 00000000000..faa45581aff --- /dev/null +++ b/go/test/endtoend/onlineddl/vrepl_suite/untestdata/swap-pk-uk/alter @@ -0,0 +1 @@ +drop primary key, drop key its_uidx, add primary key (i, ts), add unique key id_uidx(id) diff --git a/go/test/endtoend/onlineddl/vrepl_suite/untestdata/swap-pk-uk/create.sql b/go/test/endtoend/onlineddl/vrepl_suite/untestdata/swap-pk-uk/create.sql new file mode 100644 index 00000000000..14efb2f7c51 --- /dev/null +++ b/go/test/endtoend/onlineddl/vrepl_suite/untestdata/swap-pk-uk/create.sql @@ -0,0 +1,24 @@ +drop table if exists onlineddl_test; +create table onlineddl_test ( + id bigint, + i int not null, + ts timestamp(6), + primary key(id), + unique key its_uidx(i, ts) +) ; + +drop event if exists onlineddl_test; +delimiter ;; +create event onlineddl_test + on schedule every 1 second + starts current_timestamp + ends current_timestamp + interval 60 second + on completion not preserve + enable + do +begin + insert into onlineddl_test values ((unix_timestamp() << 2) + 0, 11, now(6)); + insert into onlineddl_test values ((unix_timestamp() << 2) + 1, 13, now(6)); + insert into onlineddl_test values ((unix_timestamp() << 2) + 2, 17, now(6)); + insert into onlineddl_test values ((unix_timestamp() << 2) + 3, 19, now(6)); +end ;; diff --git a/go/test/endtoend/onlineddl/vrepl_suite/untestdata/swap-pk-uk/ignore_versions b/go/test/endtoend/onlineddl/vrepl_suite/untestdata/swap-pk-uk/ignore_versions new file mode 100644 index 00000000000..7acd3f06f32 --- /dev/null +++ b/go/test/endtoend/onlineddl/vrepl_suite/untestdata/swap-pk-uk/ignore_versions @@ -0,0 +1 @@ +(5.5) diff --git a/go/test/endtoend/onlineddl/vrepl_suite/untestdata/swap-pk-uk/order_by b/go/test/endtoend/onlineddl/vrepl_suite/untestdata/swap-pk-uk/order_by new file mode 100644 index 00000000000..074d1eeb404 --- /dev/null +++ b/go/test/endtoend/onlineddl/vrepl_suite/untestdata/swap-pk-uk/order_by @@ -0,0 +1 @@ +id diff --git a/go/test/endtoend/onlineddl/vrepl_suite/untestdata/swap-uk-uk/alter b/go/test/endtoend/onlineddl/vrepl_suite/untestdata/swap-uk-uk/alter new file mode 100644 index 00000000000..6286a960e61 --- /dev/null +++ b/go/test/endtoend/onlineddl/vrepl_suite/untestdata/swap-uk-uk/alter @@ -0,0 +1 @@ +drop key id_uidx, drop key its_uidx, add unique key its2_uidx(i, ts), add unique key id2_uidx(id) diff --git a/go/test/endtoend/onlineddl/vrepl_suite/untestdata/swap-uk-uk/create.sql b/go/test/endtoend/onlineddl/vrepl_suite/untestdata/swap-uk-uk/create.sql new file mode 100644 index 00000000000..cc22a6ddb77 --- /dev/null +++ b/go/test/endtoend/onlineddl/vrepl_suite/untestdata/swap-uk-uk/create.sql @@ -0,0 +1,24 @@ +drop table if exists onlineddl_test; +create table onlineddl_test ( + id bigint not null, + i int not null, + ts timestamp(6) not null, + unique key id_uidx(id), + unique key its_uidx(i, ts) +) ; + +drop event if exists onlineddl_test; +delimiter ;; +create event onlineddl_test + on schedule every 1 second + starts current_timestamp + ends current_timestamp + interval 60 second + on completion not preserve + enable + do +begin + insert into onlineddl_test values ((unix_timestamp() << 2) + 0, 11, now(6)); + insert into onlineddl_test values ((unix_timestamp() << 2) + 1, 13, now(6)); + insert into onlineddl_test values ((unix_timestamp() << 2) + 2, 17, now(6)); + insert into onlineddl_test values ((unix_timestamp() << 2) + 3, 19, now(6)); +end ;; diff --git a/go/test/endtoend/onlineddl/vrepl_suite/untestdata/swap-uk-uk/ignore_versions b/go/test/endtoend/onlineddl/vrepl_suite/untestdata/swap-uk-uk/ignore_versions new file mode 100644 index 00000000000..7acd3f06f32 --- /dev/null +++ b/go/test/endtoend/onlineddl/vrepl_suite/untestdata/swap-uk-uk/ignore_versions @@ -0,0 +1 @@ +(5.5) diff --git a/go/test/endtoend/onlineddl/vrepl_suite/untestdata/swap-uk-uk/order_by b/go/test/endtoend/onlineddl/vrepl_suite/untestdata/swap-uk-uk/order_by new file mode 100644 index 00000000000..074d1eeb404 --- /dev/null +++ b/go/test/endtoend/onlineddl/vrepl_suite/untestdata/swap-uk-uk/order_by @@ -0,0 +1 @@ +id diff --git a/go/test/endtoend/onlineddl/vrepl_suite/untestdata/test.sh b/go/test/endtoend/onlineddl/vrepl_suite/untestdata/test.sh new file mode 100755 index 00000000000..59ea60ac844 --- /dev/null +++ b/go/test/endtoend/onlineddl/vrepl_suite/untestdata/test.sh @@ -0,0 +1,270 @@ +#!/bin/bash + +# Local integration tests. To be used by CI. +# See https://github.com/github/gh-ost/tree/doc/local-tests.md +# + +# Usage: localtests/test/sh [filter] +# By default, runs all tests. Given filter, will only run tests matching given regep + +tests_path=$(dirname $0) +test_logfile=/tmp/gh-ost-test.log +default_ghost_binary=/tmp/gh-ost-test +ghost_binary="" +exec_command_file=/tmp/gh-ost-test.bash +ghost_structure_output_file=/tmp/gh-ost-test.ghost.structure.sql +orig_content_output_file=/tmp/gh-ost-test.orig.content.csv +ghost_content_output_file=/tmp/gh-ost-test.ghost.content.csv +throttle_flag_file=/tmp/gh-ost-test.ghost.throttle.flag + +master_host= +master_port= +replica_host= +replica_port= +original_sql_mode= + +OPTIND=1 +while getopts "b:" OPTION +do + case $OPTION in + b) + ghost_binary="$OPTARG" + ;; + esac +done +shift $((OPTIND-1)) + +test_pattern="${1:-.}" + +verify_master_and_replica() { + if [ "$(gh-ost-test-mysql-master -e "select 1" -ss)" != "1" ] ; then + echo "Cannot verify gh-ost-test-mysql-master" + exit 1 + fi + read master_host master_port <<< $(gh-ost-test-mysql-master -e "select @@hostname, @@port" -ss) + [ "$master_host" == "$(hostname)" ] && master_host="127.0.0.1" + echo "# master verified at $master_host:$master_port" + if ! gh-ost-test-mysql-master -e "set global event_scheduler := 1" ; then + echo "Cannot enable event_scheduler on master" + exit 1 + fi + original_sql_mode="$(gh-ost-test-mysql-master -e "select @@global.sql_mode" -s -s)" + echo "sql_mode on master is ${original_sql_mode}" + + echo "Gracefully sleeping for 3 seconds while replica is setting up..." + sleep 3 + + if [ "$(gh-ost-test-mysql-replica -e "select 1" -ss)" != "1" ] ; then + echo "Cannot verify gh-ost-test-mysql-replica" + exit 1 + fi + if [ "$(gh-ost-test-mysql-replica -e "select @@global.binlog_format" -ss)" != "ROW" ] ; then + echo "Expecting test replica to have binlog_format=ROW" + exit 1 + fi + read replica_host replica_port <<< $(gh-ost-test-mysql-replica -e "select @@hostname, @@port" -ss) + [ "$replica_host" == "$(hostname)" ] && replica_host="127.0.0.1" + echo "# replica verified at $replica_host:$replica_port" +} + +exec_cmd() { + echo "$@" + command "$@" 1> $test_logfile 2>&1 + return $? +} + +echo_dot() { + echo -n "." +} + +start_replication() { + gh-ost-test-mysql-replica -e "stop slave; start slave;" + num_attempts=0 + while gh-ost-test-mysql-replica -e "show slave status\G" | grep Seconds_Behind_Master | grep -q NULL ; do + ((num_attempts=num_attempts+1)) + if [ $num_attempts -gt 10 ] ; then + echo + echo "ERROR replication failure" + exit 1 + fi + echo_dot + sleep 1 + done +} + +test_single() { + local test_name + test_name="$1" + + if [ -f $tests_path/$test_name/ignore_versions ] ; then + ignore_versions=$(cat $tests_path/$test_name/ignore_versions) + mysql_version=$(gh-ost-test-mysql-master -s -s -e "select @@version") + if echo "$mysql_version" | egrep -q "^${ignore_versions}" ; then + echo -n "Skipping: $test_name" + return 0 + fi + fi + + echo -n "Testing: $test_name" + + echo_dot + start_replication + echo_dot + + if [ -f $tests_path/$test_name/sql_mode ] ; then + gh-ost-test-mysql-master --default-character-set=utf8mb4 test -e "set @@global.sql_mode='$(cat $tests_path/$test_name/sql_mode)'" + gh-ost-test-mysql-replica --default-character-set=utf8mb4 test -e "set @@global.sql_mode='$(cat $tests_path/$test_name/sql_mode)'" + fi + + gh-ost-test-mysql-master --default-character-set=utf8mb4 test < $tests_path/$test_name/create.sql + + extra_args="" + if [ -f $tests_path/$test_name/extra_args ] ; then + extra_args=$(cat $tests_path/$test_name/extra_args) + fi + orig_columns="*" + ghost_columns="*" + order_by="" + if [ -f $tests_path/$test_name/orig_columns ] ; then + orig_columns=$(cat $tests_path/$test_name/orig_columns) + fi + if [ -f $tests_path/$test_name/ghost_columns ] ; then + ghost_columns=$(cat $tests_path/$test_name/ghost_columns) + fi + if [ -f $tests_path/$test_name/order_by ] ; then + order_by="order by $(cat $tests_path/$test_name/order_by)" + fi + # graceful sleep for replica to catch up + echo_dot + sleep 1 + # + cmd="$ghost_binary \ + --user=gh-ost \ + --password=gh-ost \ + --host=$replica_host \ + --port=$replica_port \ + --assume-master-host=${master_host}:${master_port} + --database=test \ + --table=onlineddl_test \ + --alter='engine=innodb' \ + --exact-rowcount \ + --assume-rbr \ + --initially-drop-old-table \ + --initially-drop-ghost-table \ + --throttle-query='select timestampdiff(second, min(last_update), now()) < 5 from _gh_ost_test_ghc' \ + --throttle-flag-file=$throttle_flag_file \ + --serve-socket-file=/tmp/gh-ost.test.sock \ + --initially-drop-socket-file \ + --test-on-replica \ + --default-retries=3 \ + --chunk-size=10 \ + --verbose \ + --debug \ + --stack \ + --execute ${extra_args[@]}" + echo_dot + echo $cmd > $exec_command_file + echo_dot + bash $exec_command_file 1> $test_logfile 2>&1 + + execution_result=$? + + if [ -f $tests_path/$test_name/sql_mode ] ; then + gh-ost-test-mysql-master --default-character-set=utf8mb4 test -e "set @@global.sql_mode='${original_sql_mode}'" + gh-ost-test-mysql-replica --default-character-set=utf8mb4 test -e "set @@global.sql_mode='${original_sql_mode}'" + fi + + if [ -f $tests_path/$test_name/destroy.sql ] ; then + gh-ost-test-mysql-master --default-character-set=utf8mb4 test < $tests_path/$test_name/destroy.sql + fi + + if [ -f $tests_path/$test_name/expect_failure ] ; then + if [ $execution_result -eq 0 ] ; then + echo + echo "ERROR $test_name execution was expected to exit on error but did not. cat $test_logfile" + return 1 + fi + if [ -s $tests_path/$test_name/expect_failure ] ; then + # 'expect_failure' file has content. We expect to find this content in the log. + expected_error_message="$(cat $tests_path/$test_name/expect_failure)" + if grep -q "$expected_error_message" $test_logfile ; then + return 0 + fi + echo + echo "ERROR $test_name execution was expected to exit with error message '${expected_error_message}' but did not. cat $test_logfile" + return 1 + fi + # 'expect_failure' file has no content. We generally agree that the failure is correct + return 0 + fi + + if [ $execution_result -ne 0 ] ; then + echo + echo "ERROR $test_name execution failure. cat $test_logfile:" + cat $test_logfile + return 1 + fi + + gh-ost-test-mysql-replica --default-character-set=utf8mb4 test -e "show create table _gh_ost_test_gho\G" -ss > $ghost_structure_output_file + + if [ -f $tests_path/$test_name/expect_table_structure ] ; then + expected_table_structure="$(cat $tests_path/$test_name/expect_table_structure)" + if ! grep -q "$expected_table_structure" $ghost_structure_output_file ; then + echo + echo "ERROR $test_name: table structure was expected to include ${expected_table_structure} but did not. cat $ghost_structure_output_file:" + cat $ghost_structure_output_file + return 1 + fi + fi + + echo_dot + gh-ost-test-mysql-replica --default-character-set=utf8mb4 test -e "select ${orig_columns} from onlineddl_test ${order_by}" -ss > $orig_content_output_file + gh-ost-test-mysql-replica --default-character-set=utf8mb4 test -e "select ${ghost_columns} from _gh_ost_test_gho ${order_by}" -ss > $ghost_content_output_file + orig_checksum=$(cat $orig_content_output_file | md5sum) + ghost_checksum=$(cat $ghost_content_output_file | md5sum) + + if [ "$orig_checksum" != "$ghost_checksum" ] ; then + echo "ERROR $test_name: checksum mismatch" + echo "---" + diff $orig_content_output_file $ghost_content_output_file + + echo "diff $orig_content_output_file $ghost_content_output_file" + + return 1 + fi +} + +build_binary() { + echo "Building" + rm -f $default_ghost_binary + [ "$ghost_binary" == "" ] && ghost_binary="$default_ghost_binary" + if [ -f "$ghost_binary" ] ; then + echo "Using binary: $ghost_binary" + return 0 + fi + go build -o $ghost_binary go/cmd/gh-ost/main.go + if [ $? -ne 0 ] ; then + echo "Build failure" + exit 1 + fi +} + +test_all() { + build_binary + find $tests_path ! -path . -type d -mindepth 1 -maxdepth 1 | cut -d "/" -f 3 | egrep "$test_pattern" | while read test_name ; do + test_single "$test_name" + if [ $? -ne 0 ] ; then + create_statement=$(gh-ost-test-mysql-replica test -t -e "show create table _gh_ost_test_gho \G") + echo "$create_statement" >> $test_logfile + echo "+ FAIL" + return 1 + else + echo + echo "+ pass" + fi + gh-ost-test-mysql-replica -e "start slave" + done +} + +verify_master_and_replica +test_all diff --git a/go/test/endtoend/onlineddl/vtgate_util.go b/go/test/endtoend/onlineddl/vtgate_util.go index 833f155ec8f..65ebedf2748 100644 --- a/go/test/endtoend/onlineddl/vtgate_util.go +++ b/go/test/endtoend/onlineddl/vtgate_util.go @@ -21,10 +21,12 @@ import ( "fmt" "os" "testing" + "time" "vitess.io/vitess/go/mysql" "vitess.io/vitess/go/sqltypes" "vitess.io/vitess/go/vt/schema" + "vitess.io/vitess/go/vt/sqlparser" "vitess.io/vitess/go/test/endtoend/cluster" @@ -76,8 +78,11 @@ func VtgateExecDDL(t *testing.T, vtParams *mysql.ConnParams, ddlStrategy string, // CheckRetryMigration attempts to retry a migration, and expects success/failure by counting affected rows func CheckRetryMigration(t *testing.T, vtParams *mysql.ConnParams, shards []cluster.Shard, uuid string, expectRetryPossible bool) { - retryQuery := fmt.Sprintf("alter vitess_migration '%s' retry", uuid) - r := VtgateExecQuery(t, vtParams, retryQuery, "") + query, err := sqlparser.ParseAndBind("alter vitess_migration %a retry", + sqltypes.StringBindVariable(uuid), + ) + require.NoError(t, err) + r := VtgateExecQuery(t, vtParams, query, "") if expectRetryPossible { assert.Equal(t, len(shards), int(r.RowsAffected)) @@ -88,8 +93,11 @@ func CheckRetryMigration(t *testing.T, vtParams *mysql.ConnParams, shards []clus // CheckCancelMigration attempts to cancel a migration, and expects success/failure by counting affected rows func CheckCancelMigration(t *testing.T, vtParams *mysql.ConnParams, shards []cluster.Shard, uuid string, expectCancelPossible bool) { - cancelQuery := fmt.Sprintf("alter vitess_migration '%s' cancel", uuid) - r := VtgateExecQuery(t, vtParams, cancelQuery, "") + query, err := sqlparser.ParseAndBind("alter vitess_migration %a cancel", + sqltypes.StringBindVariable(uuid), + ) + require.NoError(t, err) + r := VtgateExecQuery(t, vtParams, query, "") if expectCancelPossible { assert.Equal(t, len(shards), int(r.RowsAffected)) @@ -107,25 +115,64 @@ func CheckCancelAllMigrations(t *testing.T, vtParams *mysql.ConnParams, expectCo } // CheckMigrationStatus verifies that the migration indicated by given UUID has the given expected status -func CheckMigrationStatus(t *testing.T, vtParams *mysql.ConnParams, shards []cluster.Shard, uuid string, expectStatus schema.OnlineDDLStatus) { - showQuery := fmt.Sprintf("show vitess_migrations like '%s'", uuid) - r := VtgateExecQuery(t, vtParams, showQuery, "") - fmt.Printf("# output for `%s`:\n", showQuery) +func CheckMigrationStatus(t *testing.T, vtParams *mysql.ConnParams, shards []cluster.Shard, uuid string, expectStatuses ...schema.OnlineDDLStatus) { + query, err := sqlparser.ParseAndBind("show vitess_migrations like %a", + sqltypes.StringBindVariable(uuid), + ) + require.NoError(t, err) + + r := VtgateExecQuery(t, vtParams, query, "") + fmt.Printf("# output for `%s`:\n", query) PrintQueryResult(os.Stdout, r) count := 0 for _, row := range r.Named().Rows { - if row["migration_uuid"].ToString() == uuid && row["migration_status"].ToString() == string(expectStatus) { - count++ + if row["migration_uuid"].ToString() != uuid { + continue + } + for _, expectStatus := range expectStatuses { + if row["migration_status"].ToString() == string(expectStatus) { + count++ + break + } } } assert.Equal(t, len(shards), count) } +// WaitForMigrationStatus waits for a migration to reach either provided statuses (returns immediately), or eventually time out +func WaitForMigrationStatus(t *testing.T, vtParams *mysql.ConnParams, shards []cluster.Shard, uuid string, timeout time.Duration, expectStatuses ...schema.OnlineDDLStatus) schema.OnlineDDLStatus { + query, err := sqlparser.ParseAndBind("show vitess_migrations like %a", + sqltypes.StringBindVariable(uuid), + ) + require.NoError(t, err) + + statusesMap := map[string]bool{} + for _, status := range expectStatuses { + statusesMap[string(status)] = true + } + startTime := time.Now() + lastKnownStatus := "" + for time.Since(startTime) < timeout { + countMatchedShards := 0 + r := VtgateExecQuery(t, vtParams, query, "") + for _, row := range r.Named().Rows { + lastKnownStatus = row["migration_status"].ToString() + if row["migration_uuid"].ToString() == uuid && statusesMap[lastKnownStatus] { + countMatchedShards++ + } + } + if countMatchedShards == len(shards) { + return schema.OnlineDDLStatus(lastKnownStatus) + } + time.Sleep(1 * time.Second) + } + return schema.OnlineDDLStatus(lastKnownStatus) +} + // CheckMigrationArtifacts verifies given migration exists, and checks if it has artifacts func CheckMigrationArtifacts(t *testing.T, vtParams *mysql.ConnParams, shards []cluster.Shard, uuid string, expectArtifacts bool) { - showQuery := fmt.Sprintf("show vitess_migrations like '%s'", uuid) - r := VtgateExecQuery(t, vtParams, showQuery, "") + r := ReadMigrations(t, vtParams, uuid) assert.Equal(t, len(shards), len(r.Named().Rows)) for _, row := range r.Named().Rows { @@ -133,3 +180,28 @@ func CheckMigrationArtifacts(t *testing.T, vtParams *mysql.ConnParams, shards [] assert.Equal(t, expectArtifacts, hasArtifacts) } } + +// ReadMigrations reads migration entries +func ReadMigrations(t *testing.T, vtParams *mysql.ConnParams, like string) *sqltypes.Result { + query, err := sqlparser.ParseAndBind("show vitess_migrations like %a", + sqltypes.StringBindVariable(like), + ) + require.NoError(t, err) + + return VtgateExecQuery(t, vtParams, query, "") +} + +// ReadMigrationLogs reads migration logs for a given migration, on all shards +func ReadMigrationLogs(t *testing.T, vtParams *mysql.ConnParams, uuid string) (logs []string) { + query, err := sqlparser.ParseAndBind("show vitess_migration %a logs", + sqltypes.StringBindVariable(uuid), + ) + require.NoError(t, err) + + r := VtgateExecQuery(t, vtParams, query, "") + for _, row := range r.Named().Rows { + migrationLog := row["migration_log"].ToString() + logs = append(logs, migrationLog) + } + return logs +} diff --git a/go/test/endtoend/recovery/pitr/shardedpitr_test.go b/go/test/endtoend/recovery/pitr/shardedpitr_test.go index c7c89630651..52c38c7a6a4 100644 --- a/go/test/endtoend/recovery/pitr/shardedpitr_test.go +++ b/go/test/endtoend/recovery/pitr/shardedpitr_test.go @@ -299,7 +299,7 @@ func performResharding(t *testing.T) { require.NoError(t, err) } - err = clusterInstance.VtctlclientProcess.ExecuteCommand("Reshard", "ks.reshardWorkflow", "0", "-80,80-") + err = clusterInstance.VtctlclientProcess.ExecuteCommand("Reshard", "-v1", "ks.reshardWorkflow", "0", "-80,80-") require.NoError(t, err) err = clusterInstance.VtctlclientProcess.ExecuteCommand("SwitchReads", "-tablet_type=rdonly", "ks.reshardWorkflow") diff --git a/go/test/endtoend/reparent/utils_test.go b/go/test/endtoend/reparent/utils_test.go index fa97ebef4f8..44bf8ec918f 100644 --- a/go/test/endtoend/reparent/utils_test.go +++ b/go/test/endtoend/reparent/utils_test.go @@ -116,6 +116,7 @@ func setupCluster(ctx context.Context, t *testing.T, shardName string, cells []s clusterInstance.VtTabletExtraArgs = []string{ "-lock_tables_timeout", "5s", "-enable_semi_sync", + "-init_populate_metadata", "-track_schema_versions=true", } diff --git a/go/test/endtoend/sharding/mergesharding/mergesharding_base.go b/go/test/endtoend/sharding/mergesharding/mergesharding_base.go index ed20617156e..7263e2cca42 100644 --- a/go/test/endtoend/sharding/mergesharding/mergesharding_base.go +++ b/go/test/endtoend/sharding/mergesharding/mergesharding_base.go @@ -27,14 +27,11 @@ import ( "testing" "time" - "vitess.io/vitess/go/sqltypes" - "vitess.io/vitess/go/mysql" - - "github.com/prometheus/common/log" - + "vitess.io/vitess/go/sqltypes" "vitess.io/vitess/go/test/endtoend/cluster" "vitess.io/vitess/go/test/endtoend/sharding" + "vitess.io/vitess/go/vt/log" querypb "vitess.io/vitess/go/vt/proto/query" "vitess.io/vitess/go/vt/proto/topodata" @@ -340,8 +337,8 @@ func TestMergesharding(t *testing.T, useVarbinaryShardingKeyType bool) { require.NoError(t, err) assert.Equal(t, 2, len(qr.Rows)) assert.Contains(t, fmt.Sprintf("%v", qr.Rows), "SplitClone") - assert.Contains(t, fmt.Sprintf("%v", qr.Rows), `"keyspace:\"ks\" shard:\"-40\" key_range: "`) - assert.Contains(t, fmt.Sprintf("%v", qr.Rows), `"keyspace:\"ks\" shard:\"40-80\" key_range: "`) + assert.Contains(t, fmt.Sprintf("%v", qr.Rows), `"keyspace:\"ks\" shard:\"-40\" key_range:{end:\"\\x80\"}"`) + assert.Contains(t, fmt.Sprintf("%v", qr.Rows), `"keyspace:\"ks\" shard:\"40-80\" key_range:{end:\"\\x80\"}"`) // check the binlog players are running and exporting vars sharding.CheckDestinationMaster(t, *shard3Master, []string{shard1Ks, shard0Ks}, *clusterInstance) @@ -360,7 +357,7 @@ func TestMergesharding(t *testing.T, useVarbinaryShardingKeyType bool) { // testing filtered replication: insert a bunch of data on shard 1, check we get most of it after a few seconds, // wait for binlog server timeout, check we get all of it. - log.Debug("Inserting lots of data on source shard") + log.Info("Inserting lots of data on source shard") insertLots(t, 100, 0, tableName, fixedParentID, keyspaceName) //Checking 100 percent of data is sent quickly @@ -380,7 +377,7 @@ func TestMergesharding(t *testing.T, useVarbinaryShardingKeyType bool) { clusterInstance.VtworkerProcess.Cell = cell // Compare using SplitDiff - log.Debug("Running vtworker SplitDiff") + log.Info("Running vtworker SplitDiff") err = clusterInstance.VtworkerProcess.ExecuteVtworkerCommand(clusterInstance.GetAndReservePort(), clusterInstance.GetAndReservePort(), "--use_v3_resharding_mode=true", @@ -396,7 +393,7 @@ func TestMergesharding(t *testing.T, useVarbinaryShardingKeyType bool) { err = clusterInstance.VtctlclientProcess.ExecuteCommand("ChangeTabletType", shard3Rdonly.Alias, "rdonly") require.NoError(t, err) - log.Debug("Running vtworker SplitDiff on second half") + log.Info("Running vtworker SplitDiff on second half") err = clusterInstance.VtworkerProcess.ExecuteVtworkerCommand(clusterInstance.GetAndReservePort(), clusterInstance.GetAndReservePort(), @@ -418,7 +415,7 @@ func TestMergesharding(t *testing.T, useVarbinaryShardingKeyType bool) { "VtTabletStreamHealth", "-count", "1", shard3Master.Alias) require.NoError(t, err) - log.Debug("Got health: ", streamHealth) + log.Info("Got health: ", streamHealth) var streamHealthResponse querypb.StreamHealthResponse err = json.Unmarshal([]byte(streamHealth), &streamHealthResponse) diff --git a/go/test/endtoend/sharding/resharding/resharding_base.go b/go/test/endtoend/sharding/resharding/resharding_base.go index d04e0834a79..fabc1a0734b 100644 --- a/go/test/endtoend/sharding/resharding/resharding_base.go +++ b/go/test/endtoend/sharding/resharding/resharding_base.go @@ -505,13 +505,13 @@ func TestResharding(t *testing.T, useVarbinaryShardingKeyType bool) { require.Nil(t, err) assert.Equal(t, 1, len(qr.Rows)) assert.Contains(t, fmt.Sprintf("%v", qr.Rows), "SplitClone") - assert.Contains(t, fmt.Sprintf("%v", qr.Rows), `"keyspace:\"ks\" shard:\"80-\" key_range: "`) + assert.Contains(t, fmt.Sprintf("%v", qr.Rows), `"keyspace:\"ks\" shard:\"80-\" key_range:{start:\"\\x80\" end:\"\\xc0\"}"`) qr, err = shard3.MasterTablet().VttabletProcess.QueryTabletWithDB("select * from vreplication", "_vt") require.Nil(t, err) assert.Equal(t, 1, len(qr.Rows)) assert.Contains(t, fmt.Sprintf("%v", qr.Rows), "SplitClone") - assert.Contains(t, fmt.Sprintf("%v", qr.Rows), `"keyspace:\"ks\" shard:\"80-\" key_range: "`) + assert.Contains(t, fmt.Sprintf("%v", qr.Rows), `"keyspace:\"ks\" shard:\"80-\" key_range:{start:\"\\xc0\"}"`) // check the binlog players are running and exporting vars sharding.CheckDestinationMaster(t, *shard2Master, []string{shard1Ks}, *clusterInstance) diff --git a/go/test/endtoend/sharding/verticalsplit/vertical_split_test.go b/go/test/endtoend/sharding/verticalsplit/vertical_split_test.go index 676f181a26b..ad1c73ccaa1 100644 --- a/go/test/endtoend/sharding/verticalsplit/vertical_split_test.go +++ b/go/test/endtoend/sharding/verticalsplit/vertical_split_test.go @@ -708,7 +708,7 @@ func validateKeyspaceJSON(t *testing.T, keyspaceJSON string, cellsArr []string) assert.Contains(t, strings.Join(servedFrom.GetCells(), " "), eachCell) } } else { - assert.Equal(t, []string{}, servedFrom.GetCells()) + assert.Empty(t, servedFrom.GetCells()) } } } diff --git a/go/test/endtoend/tabletgateway/buffer/buffer_test.go b/go/test/endtoend/tabletgateway/buffer/buffer_test.go index 4c063f43737..d50bec6fe10 100644 --- a/go/test/endtoend/tabletgateway/buffer/buffer_test.go +++ b/go/test/endtoend/tabletgateway/buffer/buffer_test.go @@ -35,7 +35,6 @@ import ( "io/ioutil" "math/rand" "net/http" - "os" "reflect" "strconv" "strings" @@ -79,7 +78,6 @@ const ( //threadParams is set of params passed into read and write threads type threadParams struct { - writable bool quit bool rpcs int // Number of queries successfully executed. errors int // Number of failed queries. @@ -87,24 +85,32 @@ type threadParams struct { notifyLock sync.Mutex // notifyLock guards the two fields notifyAfterNSuccessfulRpcs/rpcsSoFar. notifyAfterNSuccessfulRpcs int // If 0, notifications are disabled rpcsSoFar int // Number of RPCs at the time a notification was requested - i int // - commitErrors int + index int // + internalErrs int executeFunction func(c *threadParams, conn *mysql.Conn) error // Implement the method for read/update. + typ string + reservedConn bool } // Thread which constantly executes a query on vtgate. func (c *threadParams) threadRun() { - ctx := context.Background() conn, err := mysql.Connect(ctx, &vtParams) if err != nil { log.Errorf("error connecting to mysql with params %v: %v", vtParams, err) } defer conn.Close() + if c.reservedConn { + _, err = conn.ExecuteFetch("set default_week_format = 1", 1000, true) + if err != nil { + c.errors++ + log.Errorf("error setting default_week_format: %v", err) + } + } for !c.quit { err = c.executeFunction(c, conn) if err != nil { c.errors++ - log.Errorf("error executing function %v: %v", c.executeFunction, err) + log.Errorf("error executing function %s: %v", c.typ, err) } c.rpcs++ // If notifications are requested, check if we already executed the @@ -135,14 +141,34 @@ func (c *threadParams) stop() { } func readExecute(c *threadParams, conn *mysql.Conn) error { - _, err := conn.ExecuteFetch(fmt.Sprintf("SELECT * FROM buffer WHERE id = %d", criticalReadRowID), 1000, true) - return err + attempt := c.index + c.index++ + qr, err := conn.ExecuteFetch(fmt.Sprintf("SELECT * FROM buffer WHERE id = %d", criticalReadRowID), 1000, true) + + if err != nil { + log.Errorf("select attempt #%d, failed with err: %v", attempt, err) + // For a reserved connection, read query can fail as it does not go through the gateway and + // goes to tablet directly and later is directed to use Gateway if the error is caused due to cluster failover operation. + if c.reservedConn { + c.internalErrs++ + if c.internalErrs > 1 { + log.Errorf("More Read Errors: %d", c.internalErrs) + return err + } + log.Error("This is okay once because we do not support buffering it.") + return nil + } + return err + } + + log.Infof("select attempt #%d, rows: %d", attempt, len(qr.Rows)) + return nil } func updateExecute(c *threadParams, conn *mysql.Conn) error { - attempt := c.i + attempt := c.index // Value used in next UPDATE query. Increased after every query. - c.i++ + c.index++ conn.ExecuteFetch("begin", 1000, true) result, err := conn.ExecuteFetch(fmt.Sprintf("UPDATE buffer SET msg='update %d' WHERE id = %d", attempt, updateRowID), 1000, true) @@ -156,28 +182,31 @@ func updateExecute(c *threadParams, conn *mysql.Conn) error { log.Infof("update attempt #%d affected %v rows", attempt, result.RowsAffected) _, err = conn.ExecuteFetch("commit", 1000, true) if err != nil { + log.Errorf("UPDATE #%d failed during COMMIT, err: %v", attempt, err) _, errRollback := conn.ExecuteFetch("rollback", 1000, true) if errRollback != nil { - log.Errorf("Error in rollback: %v", errRollback) + log.Errorf("Error in rollback #%d: %v", attempt, errRollback) } - c.commitErrors++ - if c.commitErrors > 1 { + c.internalErrs++ + if c.internalErrs > 1 { + log.Errorf("More Commit Errors: %d", c.internalErrs) return err } - log.Errorf("UPDATE %d failed during ROLLBACK. This is okay once because we do not support buffering it. err: %v", attempt, err) + log.Error("This is okay once because we do not support buffering it.") } + return nil } - if err != nil { - _, errRollback := conn.ExecuteFetch("rollback", 1000, true) - if errRollback != nil { - log.Errorf("Error in rollback: %v", errRollback) - } - c.commitErrors++ - if c.commitErrors > 1 { - return err - } - log.Errorf("UPDATE %d failed during COMMIT with err: %v.This is okay once because we do not support buffering it.", attempt, err) + log.Errorf("UPDATE #%d failed with err: %v", attempt, err) + _, errRollback := conn.ExecuteFetch("rollback", 1000, true) + if errRollback != nil { + log.Errorf("Error in rollback #%d: %v", attempt, errRollback) } + c.internalErrs++ + if c.internalErrs > 1 { + log.Errorf("More Rollback Errors: %d", c.internalErrs) + return err + } + log.Error("This is okay once because we do not support buffering it.") return nil } @@ -185,6 +214,7 @@ func createCluster() (*cluster.LocalProcessCluster, int) { clusterInstance = cluster.NewCluster(cell, hostname) // Start topo server + clusterInstance.VtctldExtraArgs = []string{"-remote_operation_timeout", "30s", "-topo_etcd_lease_ttl", "40"} if err := clusterInstance.StartTopo(); err != nil { return nil, 1 } @@ -194,8 +224,9 @@ func createCluster() (*cluster.LocalProcessCluster, int) { Name: keyspaceUnshardedName, SchemaSQL: sqlSchema, } - clusterInstance.VtTabletExtraArgs = []string{"-health_check_interval", "1s"} - + clusterInstance.VtTabletExtraArgs = []string{"-health_check_interval", "1s", + "-queryserver-config-transaction-timeout", "20", + } if err := clusterInstance.StartUnshardedKeyspace(*keyspace, 1, false); err != nil { return nil, 1 } @@ -227,23 +258,33 @@ func exec(t *testing.T, conn *mysql.Conn, query string) *sqltypes.Result { return qr } -func TestBufferInternalReparenting(t *testing.T) { - testBufferBase(t, false) +func TestBufferReparenting(t *testing.T) { + t.Run("TER without reserved connection", func(t *testing.T) { + testBufferBase(t, true, false) + }) + t.Run("TER with reserved connection", func(t *testing.T) { + testBufferBase(t, true, true) + }) + t.Run("PRS without reserved connections", func(t *testing.T) { + testBufferBase(t, false, false) + }) + t.Run("PRS with reserved connections", func(t *testing.T) { + testBufferBase(t, false, true) + }) } -func TestBufferExternalReparenting(t *testing.T) { - testBufferBase(t, true) -} +var ctx = context.Background() -func testBufferBase(t *testing.T, isExternalParent bool) { +func testBufferBase(t *testing.T, isExternalParent bool, useReservedConn bool) { defer cluster.PanicHandler(t) clusterInstance, exitCode := createCluster() if exitCode != 0 { - os.Exit(exitCode) + t.Fatal("failed to start cluster") } + defer clusterInstance.Teardown() + // Healthcheck interval on tablet is set to 1s, so sleep for 2s time.Sleep(2 * time.Second) - ctx := context.Background() conn, err := mysql.Connect(ctx, &vtParams) require.Nil(t, err) defer conn.Close() @@ -253,10 +294,22 @@ func testBufferBase(t *testing.T, isExternalParent bool) { exec(t, conn, fmt.Sprintf("INSERT INTO buffer (id, msg) VALUES (%d, %s)", updateRowID, "'update'")) //Start both threads. - readThreadInstance := &threadParams{writable: false, quit: false, rpcs: 0, errors: 0, notifyAfterNSuccessfulRpcs: 0, rpcsSoFar: 0, executeFunction: readExecute, waitForNotification: make(chan bool)} + readThreadInstance := &threadParams{ + index: 1, + typ: "read", + executeFunction: readExecute, + waitForNotification: make(chan bool), + reservedConn: useReservedConn, + } wg.Add(1) go readThreadInstance.threadRun() - updateThreadInstance := &threadParams{writable: false, quit: false, rpcs: 0, errors: 0, notifyAfterNSuccessfulRpcs: 0, rpcsSoFar: 0, executeFunction: updateExecute, i: 1, commitErrors: 0, waitForNotification: make(chan bool)} + updateThreadInstance := &threadParams{ + index: 1, + typ: "write", + executeFunction: updateExecute, + waitForNotification: make(chan bool), + reservedConn: useReservedConn, + } wg.Add(1) go updateThreadInstance.threadRun() @@ -272,24 +325,36 @@ func testBufferBase(t *testing.T, isExternalParent bool) { updateThreadInstance.setNotifyAfterNSuccessfulRpcs(10) if isExternalParent { - externalReparenting(ctx, t, clusterInstance) + err := externalReparenting(t, clusterInstance) + require.NoError(t, err) } else { //reparent call - clusterInstance.VtctlclientProcess.ExecuteCommand("PlannedReparentShard", "-keyspace_shard", + err := clusterInstance.VtctlclientProcess.ExecuteCommand("PlannedReparentShard", "-keyspace_shard", fmt.Sprintf("%s/%s", keyspaceUnshardedName, "0"), "-new_master", clusterInstance.Keyspaces[0].Shards[0].Vttablets[1].Alias) + require.NoError(t, err) } - <-readThreadInstance.waitForNotification - <-updateThreadInstance.waitForNotification + timeout := time.After(40 * time.Second) + select { + case <-readThreadInstance.waitForNotification: + case <-timeout: + timeout = time.After(100 * time.Millisecond) + log.Error("failed to get read thread notification") + } + select { + case <-updateThreadInstance.waitForNotification: + case <-timeout: + log.Error("failed to get update thread notification") + } // Stop threads readThreadInstance.stop() updateThreadInstance.stop() // Both threads must not see any error - assert.Equal(t, 0, readThreadInstance.errors) - assert.Equal(t, 0, updateThreadInstance.errors) + assert.Zero(t, readThreadInstance.errors, "found errors in read queries") + assert.Zero(t, updateThreadInstance.errors, "found errors in tx queries") //At least one thread should have been buffered. //This may fail if a failover is too fast. Add retries then. @@ -315,7 +380,7 @@ func testBufferBase(t *testing.T, isExternalParent bool) { if inFlightMax == 0 { // Missed buffering is okay when we observed the failover during the // COMMIT (which cannot trigger the buffering). - assert.Greater(t, updateThreadInstance.commitErrors, 0, "No buffering took place and the update thread saw no error during COMMIT. But one of it must happen.") + assert.Greater(t, updateThreadInstance.internalErrs, 0, "No buffering took place and the update thread saw no error during COMMIT. But one of it must happen.") } else { assert.Greater(t, inFlightMax, 0) } @@ -330,7 +395,6 @@ func testBufferBase(t *testing.T, isExternalParent bool) { assert.Equal(t, masterPromotedCount, bufferingStops) } wg.Wait() - clusterInstance.Teardown() } func getVarFromVtgate(t *testing.T, label string, param string, resultMap map[string]interface{}) int { @@ -350,7 +414,7 @@ func getVarFromVtgate(t *testing.T, label string, param string, resultMap map[st return paramVal } -func externalReparenting(ctx context.Context, t *testing.T, clusterInstance *cluster.LocalProcessCluster) { +func externalReparenting(t *testing.T, clusterInstance *cluster.LocalProcessCluster) error { start := time.Now() // Demote master Query @@ -391,5 +455,5 @@ func externalReparenting(ctx context.Context, t *testing.T, clusterInstance *clu oldMaster.VttabletProcess.QueryTablet(changeMasterCommands, keyspaceUnshardedName, true) // Notify the new vttablet master about the reparent. - clusterInstance.VtctlclientProcess.ExecuteCommand("TabletExternallyReparented", newMaster.Alias) + return clusterInstance.VtctlclientProcess.ExecuteCommand("TabletExternallyReparented", newMaster.Alias) } diff --git a/go/test/endtoend/tabletmanager/tablegc/tablegc_test.go b/go/test/endtoend/tabletmanager/tablegc/tablegc_test.go index 1bebe71a803..44933e32334 100644 --- a/go/test/endtoend/tabletmanager/tablegc/tablegc_test.go +++ b/go/test/endtoend/tabletmanager/tablegc/tablegc_test.go @@ -88,6 +88,7 @@ func TestMain(m *testing.M) { "-heartbeat_enable", "-heartbeat_interval", "250ms", "-gc_check_interval", "5s", + "-gc_purge_check_interval", "5s", "-table_gc_lifecycle", "hold,purge,evac,drop", } // We do not need semiSync for this test case. @@ -124,21 +125,21 @@ func checkTableRows(t *testing.T, tableName string, expect int64) { query := `select count(*) as c from %a` parsed := sqlparser.BuildParsedQuery(query, tableName) rs, err := masterTablet.VttabletProcess.QueryTablet(parsed.Query, keyspaceName, true) - assert.NoError(t, err) + require.NoError(t, err) count := rs.Named().Row().AsInt64("c", 0) assert.Equal(t, expect, count) } func populateTable(t *testing.T) { _, err := masterTablet.VttabletProcess.QueryTablet(sqlSchema, keyspaceName, true) - assert.NoError(t, err) + require.NoError(t, err) _, err = masterTablet.VttabletProcess.QueryTablet("delete from t1", keyspaceName, true) - assert.NoError(t, err) + require.NoError(t, err) _, err = masterTablet.VttabletProcess.QueryTablet("insert into t1 (id, value) values (null, md5(rand()))", keyspaceName, true) - assert.NoError(t, err) + require.NoError(t, err) for i := 0; i < 10; i++ { _, err = masterTablet.VttabletProcess.QueryTablet("insert into t1 (id, value) select null, md5(rand()) from t1", keyspaceName, true) - assert.NoError(t, err) + require.NoError(t, err) } checkTableRows(t, "t1", 1024) } @@ -297,46 +298,46 @@ func TestDrop(t *testing.T) { func TestPurge(t *testing.T) { populateTable(t) query, tableName, err := schema.GenerateRenameStatement("t1", schema.PurgeTableGCState, time.Now().UTC().Add(10*time.Second)) - assert.NoError(t, err) + require.NoError(t, err) _, err = masterTablet.VttabletProcess.QueryTablet(query, keyspaceName, true) - assert.NoError(t, err) + require.NoError(t, err) { exists, _, err := tableExists("t1") - assert.NoError(t, err) - assert.False(t, exists) + require.NoError(t, err) + require.False(t, exists) } { exists, _, err := tableExists(tableName) - assert.NoError(t, err) - assert.True(t, exists) + require.NoError(t, err) + require.True(t, exists) } time.Sleep(5 * time.Second) { // Table was created with +10s timestamp, so it should still exist exists, _, err := tableExists(tableName) - assert.NoError(t, err) - assert.True(t, exists) + require.NoError(t, err) + require.True(t, exists) checkTableRows(t, tableName, 1024) } - time.Sleep(1 * time.Minute) // purgeReentraceInterval + time.Sleep(15 * time.Second) // purgeReentranceInterval { // We're now both beyond table's timestamp as well as a tableGC interval exists, _, err := tableExists(tableName) - assert.NoError(t, err) - assert.False(t, exists) + require.NoError(t, err) + require.False(t, exists) } { // Table should be renamed as _vt_EVAC_... exists, evacTableName, err := tableExists(`\_vt\_EVAC\_%`) - assert.NoError(t, err) - assert.True(t, exists) + require.NoError(t, err) + require.True(t, exists) checkTableRows(t, evacTableName, 0) err = dropTable(evacTableName) - assert.NoError(t, err) + require.NoError(t, err) } } diff --git a/go/test/endtoend/topotest/consul/main_test.go b/go/test/endtoend/topotest/consul/main_test.go index f805c7c00b0..563f646f55e 100644 --- a/go/test/endtoend/topotest/consul/main_test.go +++ b/go/test/endtoend/topotest/consul/main_test.go @@ -96,7 +96,7 @@ func TestMain(m *testing.M) { os.Exit(exitCode) } -func TestTopoDownServingQuery(t *testing.T) { +func TestTopoRestart(t *testing.T) { defer cluster.PanicHandler(t) ctx := context.Background() vtParams := mysql.ConnParams{ @@ -107,16 +107,40 @@ func TestTopoDownServingQuery(t *testing.T) { require.Nil(t, err) defer conn.Close() - defer exec(t, conn, `delete from t1`) - execMulti(t, conn, `insert into t1(c1, c2, c3, c4) values (300,100,300,'abc'); ;; insert into t1(c1, c2, c3, c4) values (301,101,301,'abcd');;`) assertMatches(t, conn, `select c1,c2,c3 from t1`, `[[INT64(300) INT64(100) INT64(300)] [INT64(301) INT64(101) INT64(301)]]`) - clusterInstance.TopoProcess.TearDown(clusterInstance.Cell, clusterInstance.OriginalVTDATAROOT, clusterInstance.CurrentVTDATAROOT, true, *clusterInstance.TopoFlavorString()) - time.Sleep(3 * time.Second) - assertMatches(t, conn, `select c1,c2,c3 from t1`, `[[INT64(300) INT64(100) INT64(300)] [INT64(301) INT64(101) INT64(301)]]`) + + defer execute(t, conn, `delete from t1`) + + ch := make(chan interface{}) + + go func() { + clusterInstance.TopoProcess.TearDown(clusterInstance.Cell, clusterInstance.OriginalVTDATAROOT, clusterInstance.CurrentVTDATAROOT, true, *clusterInstance.TopoFlavorString()) + + // Some sleep to server few queries when topo is down. + time.Sleep(400 * time.Millisecond) + + clusterInstance.TopoProcess.Setup(*clusterInstance.TopoFlavorString(), clusterInstance) + + // topo is up now. + ch <- 1 + }() + + timeOut := time.After(15 * time.Second) + + for { + select { + case <-ch: + return + case <-timeOut: + require.Fail(t, "timed out - topo process did not come up") + case <-time.After(100 * time.Millisecond): + assertMatches(t, conn, `select c1,c2,c3 from t1`, `[[INT64(300) INT64(100) INT64(300)] [INT64(301) INT64(101) INT64(301)]]`) + } + } } -func exec(t *testing.T, conn *mysql.Conn, query string) *sqltypes.Result { +func execute(t *testing.T, conn *mysql.Conn, query string) *sqltypes.Result { t.Helper() qr, err := conn.ExecuteFetch(query, 1000, true) require.NoError(t, err) @@ -139,7 +163,7 @@ func execMulti(t *testing.T, conn *mysql.Conn, query string) []*sqltypes.Result func assertMatches(t *testing.T, conn *mysql.Conn, query, expected string) { t.Helper() - qr := exec(t, conn, query) + qr := execute(t, conn, query) got := fmt.Sprintf("%v", qr.Rows) diff := cmp.Diff(expected, got) if diff != "" { diff --git a/go/test/endtoend/versionupgrade/upgrade_test.go b/go/test/endtoend/versionupgrade/upgrade_test.go index f341bf792e0..f95ce9ea4f4 100644 --- a/go/test/endtoend/versionupgrade/upgrade_test.go +++ b/go/test/endtoend/versionupgrade/upgrade_test.go @@ -150,8 +150,8 @@ func TestDeploySchema(t *testing.T) { { sqlQuery := fmt.Sprintf(createTable, tableName) - _, err := clusterInstance.VtctlclientProcess.ApplySchemaWithOutput(keyspaceName, sqlQuery, "") - require.Nil(t, err) + result, err := clusterInstance.VtctlclientProcess.ApplySchemaWithOutput(keyspaceName, sqlQuery, cluster.VtctlClientParams{DDLStrategy: ""}) + require.Nil(t, err, result) } for i := range clusterInstance.Keyspaces[0].Shards { sqlQuery := fmt.Sprintf(insertIntoTable, tableName) diff --git a/go/test/endtoend/vreplication/cluster.go b/go/test/endtoend/vreplication/cluster.go index 0d26fac77dd..5b96a6255f4 100644 --- a/go/test/endtoend/vreplication/cluster.go +++ b/go/test/endtoend/vreplication/cluster.go @@ -1,27 +1,24 @@ package vreplication import ( - "context" - "errors" "fmt" "math/rand" "os" "os/exec" "path" "strings" + "sync" "testing" "time" "github.com/stretchr/testify/require" - "vitess.io/vitess/go/mysql" - "vitess.io/vitess/go/sqltypes" "vitess.io/vitess/go/test/endtoend/cluster" "vitess.io/vitess/go/vt/log" ) var ( - debug = false // set to true to always use local env vtdataroot for local debugging + debug = false // set to true for local debugging: this uses the local env vtdataroot and does not teardown clusters originalVtdataroot string vtdataroot string @@ -44,6 +41,8 @@ type ClusterConfig struct { tabletPortBase int tabletGrpcPortBase int tabletMysqlPortBase int + + vreplicationCompressGTID bool } // VitessCluster represents all components within the test cluster @@ -129,6 +128,11 @@ func getClusterConfig(idx int, dataRootDir string) *ClusterConfig { } func init() { + // for local debugging set this variable so that each run uses VTDATAROOT instead of a random dir + // and also does not teardown the cluster for inspecting logs and the databases + if os.Getenv("VREPLICATION_E2E_DEBUG") != "" { + debug = true + } rand.Seed(time.Now().UTC().UnixNano()) originalVtdataroot = os.Getenv("VTDATAROOT") var mainVtDataRoot string @@ -211,7 +215,7 @@ func (vc *VitessCluster) AddKeyspace(t *testing.T, cells []*Cell, ksName string, keyspace.VSchema = vschema for _, cell := range cells { if len(cell.Vtgates) == 0 { - fmt.Println("Starting vtgate") + log.Infof("Starting vtgate") vc.StartVtgate(t, cell, cellsToWatch) } } @@ -220,9 +224,20 @@ func (vc *VitessCluster) AddKeyspace(t *testing.T, cells []*Cell, ksName string, } // AddTablet creates new tablet with specified attributes -func (vc *VitessCluster) AddTablet(t *testing.T, cell *Cell, keyspace *Keyspace, shard *Shard, tabletType string, tabletID int) (*Tablet, *exec.Cmd, error) { +func (vc *VitessCluster) AddTablet(t testing.TB, cell *Cell, keyspace *Keyspace, shard *Shard, tabletType string, tabletID int) (*Tablet, *exec.Cmd, error) { tablet := &Tablet{} + options := []string{ + "-queryserver-config-schema-reload-time", "5", + "-enable-lag-throttler", + "-heartbeat_enable", + "-heartbeat_interval", "250ms", + } //FIXME: for multi-cell initial schema doesn't seem to load without "-queryserver-config-schema-reload-time" + + if mainClusterConfig.vreplicationCompressGTID { + options = append(options, "-vreplication_store_compressed_gtid=true") + } + vttablet := cluster.VttabletProcessInstance( vc.ClusterConfig.tabletPortBase+tabletID, vc.ClusterConfig.tabletGrpcPortBase+tabletID, @@ -235,12 +250,7 @@ func (vc *VitessCluster) AddTablet(t *testing.T, cell *Cell, keyspace *Keyspace, vc.Topo.Port, vc.ClusterConfig.hostname, vc.ClusterConfig.tmpDir, - []string{ - "-queryserver-config-schema-reload-time", "5", - "-enable-lag-throttler", - "-heartbeat_enable", - "-heartbeat_interval", "250ms", - }, //FIXME: for multi-cell initial schema doesn't seem to load without "-queryserver-config-schema-reload-time" + options, false) require.NotNil(t, vttablet) @@ -263,9 +273,9 @@ func (vc *VitessCluster) AddTablet(t *testing.T, cell *Cell, keyspace *Keyspace, } // AddShards creates shards given list of comma-separated keys with specified tablets in each shard -func (vc *VitessCluster) AddShards(t *testing.T, cells []*Cell, keyspace *Keyspace, names string, numReplicas int, numRdonly int, tabletIDBase int) error { +func (vc *VitessCluster) AddShards(t testing.TB, cells []*Cell, keyspace *Keyspace, names string, numReplicas int, numRdonly int, tabletIDBase int) error { arrNames := strings.Split(names, ",") - fmt.Printf("Addshards got %d shards with %+v\n", len(arrNames), arrNames) + log.Infof("Addshards got %d shards with %+v", len(arrNames), arrNames) isSharded := len(arrNames) > 1 masterTabletUID := 0 for ind, shardName := range arrNames { @@ -273,9 +283,9 @@ func (vc *VitessCluster) AddShards(t *testing.T, cells []*Cell, keyspace *Keyspa tabletIndex := 0 shard := &Shard{Name: shardName, IsSharded: isSharded, Tablets: make(map[string]*Tablet, 1)} if _, ok := keyspace.Shards[shardName]; ok { - fmt.Printf("Shard %s already exists, not adding\n", shardName) + log.Infof("Shard %s already exists, not adding", shardName) } else { - fmt.Printf("Adding Shard %s\n", shardName) + log.Infof("Adding Shard %s", shardName) if err := vc.VtctlClient.ExecuteCommand("CreateShard", keyspace.Name+"/"+shardName); err != nil { t.Fatalf("CreateShard command failed with %+v\n", err) } @@ -286,7 +296,7 @@ func (vc *VitessCluster) AddShards(t *testing.T, cells []*Cell, keyspace *Keyspa tablets := make([]*Tablet, 0) if i == 0 { // only add master tablet for first cell, so first time CreateShard is called - fmt.Println("Adding Master tablet") + log.Infof("Adding Master tablet") master, proc, err := vc.AddTablet(t, cell, keyspace, shard, "replica", tabletID+tabletIndex) require.NoError(t, err) require.NotNil(t, master) @@ -298,7 +308,7 @@ func (vc *VitessCluster) AddShards(t *testing.T, cells []*Cell, keyspace *Keyspa } for i := 0; i < numReplicas; i++ { - fmt.Println("Adding Replica tablet") + log.Infof("Adding Replica tablet") tablet, proc, err := vc.AddTablet(t, cell, keyspace, shard, "replica", tabletID+tabletIndex) require.NoError(t, err) require.NotNil(t, tablet) @@ -307,7 +317,7 @@ func (vc *VitessCluster) AddShards(t *testing.T, cells []*Cell, keyspace *Keyspa dbProcesses = append(dbProcesses, proc) } for i := 0; i < numRdonly; i++ { - fmt.Println("Adding RdOnly tablet") + log.Infof("Adding RdOnly tablet") tablet, proc, err := vc.AddTablet(t, cell, keyspace, shard, "rdonly", tabletID+tabletIndex) require.NoError(t, err) require.NotNil(t, tablet) @@ -317,40 +327,40 @@ func (vc *VitessCluster) AddShards(t *testing.T, cells []*Cell, keyspace *Keyspa } for ind, proc := range dbProcesses { - fmt.Printf("Waiting for mysql process for tablet %s\n", tablets[ind].Name) + log.Infof("Waiting for mysql process for tablet %s", tablets[ind].Name) if err := proc.Wait(); err != nil { t.Fatalf("%v :: Unable to start mysql server for %v", err, tablets[ind].Vttablet) } } for ind, tablet := range tablets { - fmt.Printf("Creating vt_keyspace database for tablet %s\n", tablets[ind].Name) + log.Infof("Creating vt_keyspace database for tablet %s", tablets[ind].Name) if _, err := tablet.Vttablet.QueryTablet(fmt.Sprintf("create database vt_%s", keyspace.Name), keyspace.Name, false); err != nil { t.Fatalf("Unable to start create database vt_%s for tablet %v", keyspace.Name, tablet.Vttablet) } - fmt.Printf("Running Setup() for vttablet %s\n", tablets[ind].Name) + log.Infof("Running Setup() for vttablet %s", tablets[ind].Name) if err := tablet.Vttablet.Setup(); err != nil { t.Fatalf(err.Error()) } } } require.NotEqual(t, 0, masterTabletUID, "Should have created a master tablet") - fmt.Printf("InitShardMaster for %d\n", masterTabletUID) + log.Infof("InitShardMaster for %d", masterTabletUID) require.NoError(t, vc.VtctlClient.InitShardMaster(keyspace.Name, shardName, cells[0].Name, masterTabletUID)) - fmt.Printf("Finished creating shard %s\n", shard.Name) + log.Infof("Finished creating shard %s", shard.Name) } return nil } // DeleteShard deletes a shard -func (vc *VitessCluster) DeleteShard(t *testing.T, cellName string, ksName string, shardName string) { +func (vc *VitessCluster) DeleteShard(t testing.TB, cellName string, ksName string, shardName string) { shard := vc.Cells[cellName].Keyspaces[ksName].Shards[shardName] require.NotNil(t, shard) for _, tab := range shard.Tablets { - fmt.Printf("Shutting down tablet %s\n", tab.Name) + log.Infof("Shutting down tablet %s", tab.Name) tab.Vttablet.TearDown() } - fmt.Printf("Deleting Shard %s\n", shardName) + log.Infof("Deleting Shard %s", shardName) //TODO how can we avoid the use of even_if_serving? if output, err := vc.VtctlClient.ExecuteCommandWithOutput("DeleteShard", "-recursive", "-even_if_serving", ksName+"/"+shardName); err != nil { t.Fatalf("DeleteShard command failed with error %+v and output %s\n", err, output) @@ -359,7 +369,7 @@ func (vc *VitessCluster) DeleteShard(t *testing.T, cellName string, ksName strin } // StartVtgate starts a vtgate process -func (vc *VitessCluster) StartVtgate(t *testing.T, cell *Cell, cellsToWatch string) { +func (vc *VitessCluster) StartVtgate(t testing.TB, cell *Cell, cellsToWatch string) { vtgate := cluster.VtgateProcessInstance( vc.ClusterConfig.vtgatePort, vc.ClusterConfig.vtgateGrpcPort, @@ -379,14 +389,13 @@ func (vc *VitessCluster) StartVtgate(t *testing.T, cell *Cell, cellsToWatch stri } // AddCell adds a new cell to the cluster -func (vc *VitessCluster) AddCell(t *testing.T, name string) (*Cell, error) { +func (vc *VitessCluster) AddCell(t testing.TB, name string) (*Cell, error) { cell := &Cell{Name: name, Keyspaces: make(map[string]*Keyspace), Vtgates: make([]*cluster.VtgateProcess, 0)} vc.Cells[name] = cell return cell, nil } -// TearDown brings down a cluster, deleting processes, removing topo keys -func (vc *VitessCluster) TearDown() { +func (vc *VitessCluster) teardown(t testing.TB) { for _, cell := range vc.Cells { for _, vtgate := range cell.Vtgates { if err := vtgate.TearDown(); err != nil { @@ -394,83 +403,64 @@ func (vc *VitessCluster) TearDown() { } } } + //collect unique keyspaces across cells + keyspaces := make(map[string]*Keyspace) for _, cell := range vc.Cells { for _, keyspace := range cell.Keyspaces { - for _, shard := range keyspace.Shards { - for _, tablet := range shard.Tablets { - if tablet.DbServer != nil && tablet.DbServer.TabletUID > 0 { - if _, err := tablet.DbServer.StopProcess(); err != nil { - log.Errorf("Error stopping mysql process: %s", err.Error()) + keyspaces[keyspace.Name] = keyspace + } + } + + var wg sync.WaitGroup + + for _, keyspace := range keyspaces { + for _, shard := range keyspace.Shards { + for _, tablet := range shard.Tablets { + wg.Add(1) + go func(tablet2 *Tablet) { + defer wg.Done() + if tablet2.DbServer != nil && tablet2.DbServer.TabletUID > 0 { + if _, err := tablet2.DbServer.StopProcess(); err != nil { + log.Infof("Error stopping mysql process: %s", err.Error()) } } - fmt.Printf("Stopping vttablet %s\n", tablet.Name) - if err := tablet.Vttablet.TearDown(); err != nil { - fmt.Printf("Stopped vttablet %s %s\n", tablet.Name, err.Error()) + if err := tablet2.Vttablet.TearDown(); err != nil { + log.Infof("Error stopping vttablet %s %s", tablet2.Name, err.Error()) + } else { + log.Infof("Successfully stopped vttablet %s", tablet2.Name) } - } + }(tablet) } } } - + wg.Wait() if err := vc.Vtctld.TearDown(); err != nil { - fmt.Printf("Error stopping Vtctld: %s\n", err.Error()) + log.Infof("Error stopping Vtctld: %s", err.Error()) } for _, cell := range vc.Cells { if err := vc.Topo.TearDown(cell.Name, originalVtdataroot, vtdataroot, false, "etcd2"); err != nil { - fmt.Printf("Error in etcd teardown - %s\n", err.Error()) + log.Infof("Error in etcd teardown - %s", err.Error()) } } } -// WaitForVReplicationToCatchup waits for "workflow" to finish copying -func (vc *VitessCluster) WaitForVReplicationToCatchup(vttablet *cluster.VttabletProcess, workflow string, database string, duration time.Duration) error { - queries := [3]string{ - fmt.Sprintf(`select count(*) from _vt.vreplication where workflow = "%s" and db_name = "%s" and pos = ''`, workflow, database), - "select count(*) from information_schema.tables where table_schema='_vt' and table_name='copy_state' limit 1;", - fmt.Sprintf(`select count(*) from _vt.copy_state where vrepl_id in (select id from _vt.vreplication where workflow = "%s" and db_name = "%s" )`, workflow, database), - } - results := [3]string{"[INT64(0)]", "[INT64(1)]", "[INT64(0)]"} - var lastChecked time.Time - for ind, query := range queries { - waitDuration := 500 * time.Millisecond - for duration > 0 { - fmt.Printf("Executing query %s on %s\n", query, vttablet.Name) - lastChecked = time.Now() - qr, err := vc.execTabletQuery(vttablet, query) - if err != nil { - return err - } - if qr != nil && qr.Rows != nil && len(qr.Rows) > 0 && fmt.Sprintf("%v", qr.Rows[0]) == string(results[ind]) { - break - } else { - fmt.Printf("In WaitForVReplicationToCatchup: %s %+v\n", query, qr.Rows) - } - time.Sleep(waitDuration) - duration -= waitDuration - } - if duration <= 0 { - fmt.Printf("WaitForVReplicationToCatchup timed out for workflow %s, keyspace %s\n", workflow, database) - return errors.New("WaitForVReplicationToCatchup timed out") - } - } - fmt.Printf("WaitForVReplicationToCatchup succeeded at %v\n", lastChecked) - return nil -} - -func (vc *VitessCluster) execTabletQuery(vttablet *cluster.VttabletProcess, query string) (*sqltypes.Result, error) { - vtParams := mysql.ConnParams{ - UnixSocket: fmt.Sprintf("%s/mysql.sock", vttablet.Directory), - Uname: "vt_dba", +// TearDown brings down a cluster, deleting processes, removing topo keys +func (vc *VitessCluster) TearDown(t testing.TB) { + if debug { + return } - ctx := context.Background() - var conn *mysql.Conn - conn, err := mysql.Connect(ctx, &vtParams) - if err != nil { - return nil, err + done := make(chan bool) + go func() { + vc.teardown(t) + done <- true + }() + select { + case <-done: + log.Infof("TearDown() was successful") + case <-time.After(1 * time.Minute): + log.Infof("TearDown() timed out") } - qr, err := conn.ExecuteFetch(query, 1000, true) - return qr, err } func (vc *VitessCluster) getVttabletsInKeyspace(t *testing.T, cell *Cell, ksName string, tabletType string) map[string]*cluster.VttabletProcess { @@ -479,7 +469,7 @@ func (vc *VitessCluster) getVttabletsInKeyspace(t *testing.T, cell *Cell, ksName for _, shard := range keyspace.Shards { for _, tablet := range shard.Tablets { if tablet.Vttablet.GetTabletStatus() == "SERVING" && strings.EqualFold(tablet.Vttablet.VreplicationTabletType, tabletType) { - fmt.Printf("Serving status of tablet %s is %s, %s\n", tablet.Name, tablet.Vttablet.ServingStatus, tablet.Vttablet.GetTabletStatus()) + log.Infof("Serving status of tablet %s is %s, %s", tablet.Name, tablet.Vttablet.ServingStatus, tablet.Vttablet.GetTabletStatus()) tablets[tablet.Name] = tablet.Vttablet } } diff --git a/go/test/endtoend/vreplication/config.go b/go/test/endtoend/vreplication/config.go index 214876a2050..74e67622e28 100644 --- a/go/test/endtoend/vreplication/config.go +++ b/go/test/endtoend/vreplication/config.go @@ -3,10 +3,10 @@ package vreplication var ( initialProductSchema = ` create table product(pid int, description varbinary(128), primary key(pid)); -create table customer(cid int, name varbinary(128), meta json default null, typ enum('individual','soho','enterprise'), sport set('football','cricket','baseball'),ts timestamp not null default current_timestamp, primary key(cid)) CHARSET=utf8mb4; +create table customer(cid int, name varbinary(128), meta json default null, typ enum('individual','soho','enterprise'), sport set('football','cricket','baseball'),ts timestamp not null default current_timestamp, bits bit(2) default b'11', primary key(cid)) CHARSET=utf8mb4; create table customer_seq(id int, next_id bigint, cache bigint, primary key(id)) comment 'vitess_sequence'; create table merchant(mname varchar(128), category varchar(128), primary key(mname)) DEFAULT CHARSET=utf8mb4 COLLATE=utf8mb4_general_ci; -create table orders(oid int, cid int, pid int, mname varchar(128), price int, primary key(oid)); +create table orders(oid int, cid int, pid int, mname varchar(128), price int, qty int, total int as (qty * price), total2 int as (qty * price) stored, primary key(oid)); create table order_seq(id int, next_id bigint, cache bigint, primary key(id)) comment 'vitess_sequence'; create table customer2(cid int, name varbinary(128), typ enum('individual','soho','enterprise'), sport set('football','cricket','baseball'),ts timestamp not null default current_timestamp, primary key(cid)); create table customer_seq2(id int, next_id bigint, cache bigint, primary key(id)) comment 'vitess_sequence'; @@ -42,7 +42,7 @@ create table tenant(tenant_id binary(16), name varbinary(16), primary key (tenan "reverse_bits": { "type": "reverse_bits" }, - "binary_md5": { + "bmd5": { "type": "binary_md5" } }, @@ -75,7 +75,7 @@ create table tenant(tenant_id binary(16), name varbinary(16), primary key (tenan "column_vindexes": [ { "column": "tenant_id", - "name": "binary_md5" + "name": "bmd5" } ] } @@ -243,8 +243,8 @@ create table tenant(tenant_id binary(16), name varbinary(16), primary key (tenan "targetKeyspace": "merchant", "tableSettings": [{ "targetTable": "morders", - "sourceExpression": "select * from orders", - "create_ddl": "create table morders(oid int, cid int, mname varchar(128), pid int, price int, primary key(oid))" + "sourceExpression": "select oid, cid, mname, pid, price, qty, total from orders", + "create_ddl": "create table morders(oid int, cid int, mname varchar(128), pid int, price int, qty int, total int, total2 int as (10 * total), primary key(oid))" }] } ` diff --git a/go/test/endtoend/vreplication/helper.go b/go/test/endtoend/vreplication/helper.go index 15726e2dd36..3a5a8b68762 100644 --- a/go/test/endtoend/vreplication/helper.go +++ b/go/test/endtoend/vreplication/helper.go @@ -167,10 +167,13 @@ func getQueryCount(url string, query string) int { func validateDryRunResults(t *testing.T, output string, want []string) { t.Helper() require.NotEmpty(t, output) - gotDryRun := strings.Split(output, "\n") require.True(t, len(gotDryRun) > 3) - gotDryRun = gotDryRun[3 : len(gotDryRun)-1] + startRow := 3 + if strings.Contains(gotDryRun[0], "deprecated") { + startRow = 4 + } + gotDryRun = gotDryRun[startRow : len(gotDryRun)-1] if len(want) != len(gotDryRun) { t.Fatalf("want and got: lengths don't match, \nwant\n%s\n\ngot\n%s", strings.Join(want, "\n"), strings.Join(gotDryRun, "\n")) } @@ -189,11 +192,11 @@ func validateDryRunResults(t *testing.T, output string, want []string) { } if !match { fail = true - t.Logf("want %s, got %s\n", w, gotDryRun[i]) + t.Fatalf("want %s, got %s\n", w, gotDryRun[i]) } } if fail { - t.Fatal("Dry run results don't match") + t.Fatalf("Dry run results don't match, want %s, got %s", want, gotDryRun) } } diff --git a/go/test/endtoend/vreplication/materialize_test.go b/go/test/endtoend/vreplication/materialize_test.go new file mode 100644 index 00000000000..0f576d94051 --- /dev/null +++ b/go/test/endtoend/vreplication/materialize_test.go @@ -0,0 +1,98 @@ +/* +Copyright 2021 The Vitess Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package vreplication + +import ( + "fmt" + "testing" + + "github.com/stretchr/testify/require" +) + +const smSchema = ` + CREATE TABLE tx ( + id bigint NOT NULL, + val varbinary(10) NOT NULL, + ts timestamp NOT NULL DEFAULT CURRENT_TIMESTAMP, + typ tinyint NOT NULL, + PRIMARY KEY (id), + KEY ts (ts), + KEY typ (typ) +) ENGINE=InnoDB DEFAULT CHARSET=utf8mb4 COLLATE=utf8mb4_unicode_ci; +` + +const smVSchema = ` +{ + "sharded": true, + "tables": { + "tx": { + "column_vindexes": [ + { + "column": "id", + "name": "hash" + } + ] + } + }, + "vindexes": { + "hash": { + "type": "hash" + } + } +} +` + +const smMaterializeSpec = `{"workflow": "wf1", "source_keyspace": "ks1", "target_keyspace": "ks2", "table_settings": [ {"target_table": "tx", "source_expression": "select * from tx where typ>=2 and val > 'abc'" }] }` + +const initDataQuery = `insert into ks1.tx(id, typ, val) values (1, 1, 'abc'), (2, 1, 'def'), (3, 2, 'def'), (4, 2, 'abc'), (5, 3, 'def'), (6, 3, 'abc')` + +// TestShardedMaterialize tests a materialize from a sharded (single shard) using comparison filters +func TestShardedMaterialize(t *testing.T) { + defaultCellName := "zone1" + allCells := []string{"zone1"} + allCellNames = "zone1" + vc = NewVitessCluster(t, "TestShardedMaterialize", allCells, mainClusterConfig) + ks1 := "ks1" + ks2 := "ks2" + require.NotNil(t, vc) + defaultReplicas = 0 // because of CI resource constraints we can only run this test with master tablets + defer func() { defaultReplicas = 1 }() + + defer vc.TearDown(t) + + defaultCell = vc.Cells[defaultCellName] + vc.AddKeyspace(t, []*Cell{defaultCell}, ks1, "-", smVSchema, smSchema, defaultReplicas, defaultRdonly, 100) + vtgate = defaultCell.Vtgates[0] + require.NotNil(t, vtgate) + vtgate.WaitForStatusOfTabletInShard(fmt.Sprintf("%s.%s.master", ks1, "0"), 1) + + vc.AddKeyspace(t, []*Cell{defaultCell}, ks2, "-", smVSchema, smSchema, defaultReplicas, defaultRdonly, 200) + vtgate.WaitForStatusOfTabletInShard(fmt.Sprintf("%s.%s.master", ks2, "0"), 1) + + vtgateConn = getConnection(t, vc.ClusterConfig.hostname, vc.ClusterConfig.vtgateMySQLPort) + defer vtgateConn.Close() + verifyClusterHealth(t, vc) + _, err := vtgateConn.ExecuteFetch(initDataQuery, 0, false) + require.NoError(t, err) + materialize(t, smMaterializeSpec) + tab := vc.Cells[defaultCell.Name].Keyspaces[ks2].Shards["-"].Tablets["zone1-200"].Vttablet + catchup(t, tab, "wf1", "Materialize") + + validateCount(t, vtgateConn, ks2, "tx", 2) + validateQuery(t, vtgateConn, "ks2:-", "select id, val from tx", + `[[INT64(3) VARBINARY("def")] [INT64(5) VARBINARY("def")]]`) +} diff --git a/go/test/endtoend/vreplication/migrate_test.go b/go/test/endtoend/vreplication/migrate_test.go index d327f39788f..e9b26ca64f9 100644 --- a/go/test/endtoend/vreplication/migrate_test.go +++ b/go/test/endtoend/vreplication/migrate_test.go @@ -52,7 +52,7 @@ func TestMigrate(t *testing.T) { require.NotNil(t, vc) defaultReplicas = 0 defaultRdonly = 0 - defer vc.TearDown() + defer vc.TearDown(t) defaultCell = vc.Cells[defaultCellName] vc.AddKeyspace(t, []*Cell{defaultCell}, "product", "0", initialProductVSchema, initialProductSchema, defaultReplicas, defaultRdonly, 100) @@ -70,7 +70,7 @@ func TestMigrate(t *testing.T) { extCells := []string{extCell} extVc := NewVitessCluster(t, "TestMigrateExternal", extCells, externalClusterConfig) require.NotNil(t, extVc) - defer extVc.TearDown() + defer extVc.TearDown(t) extCell2 := extVc.Cells[extCell] extVc.AddKeyspace(t, []*Cell{extCell2}, "rating", "0", initialExternalVSchema, initialExternalSchema, 0, 0, 1000) diff --git a/go/test/endtoend/vreplication/performance_test.go b/go/test/endtoend/vreplication/performance_test.go new file mode 100644 index 00000000000..435690d93b1 --- /dev/null +++ b/go/test/endtoend/vreplication/performance_test.go @@ -0,0 +1,119 @@ +/* +Copyright 2021 The Vitess Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package vreplication + +import ( + "fmt" + "io" + "testing" + "time" + + "vitess.io/vitess/go/test/endtoend/cluster" + + "github.com/stretchr/testify/require" +) + +func TestReplicationStress(t *testing.T) { + if !*cluster.PerfTest { + t.Skip("performance tests disabled") + } + + const initialStressVSchema = ` +{ + "tables": { + "largebin": {}, + "customer": {} + } +} +` + const initialStressSchema = ` +create table largebin(pid int, maindata varbinary(4096), primary key(pid)); +create table customer(cid int, name varbinary(128), meta json default null, typ enum('individual','soho','enterprise'), sport set('football','cricket','baseball'),ts timestamp not null default current_timestamp, primary key(cid)) CHARSET=utf8mb4; +` + + const defaultCellName = "zone1" + + const sourceKs = "stress_src" + const targetKs = "stress_tgt" + + allCells := []string{defaultCellName} + allCellNames = defaultCellName + + vc = NewVitessCluster(t, "TestReplicationStress", allCells, mainClusterConfig) + require.NotNil(t, vc) + + defer vc.TearDown(t) + + defaultCell = vc.Cells[defaultCellName] + vc.AddKeyspace(t, []*Cell{defaultCell}, sourceKs, "0", initialStressVSchema, initialStressSchema, 0, 0, 100) + vtgate = defaultCell.Vtgates[0] + require.NotNil(t, vtgate) + + vtgate.WaitForStatusOfTabletInShard(fmt.Sprintf("%s.%s.master", "product", "0"), 1) + + vtgateConn = getConnection(t, vc.ClusterConfig.hostname, vc.ClusterConfig.vtgateMySQLPort) + defer vtgateConn.Close() + + verifyClusterHealth(t, vc) + + const insertCount = 16 * 1024 * 1024 + + tablet := defaultCell.Keyspaces[sourceKs].Shards["0"].Tablets["zone1-100"].Vttablet + tablet.BulkLoad(t, "stress_src", "largebin", func(w io.Writer) { + for i := 0; i < insertCount; i++ { + fmt.Fprintf(w, "\"%d\",%q\n", i, "foobar") + } + }) + + validateCount(t, vtgateConn, "stress_src:0", "largebin", insertCount) + + t.Logf("creating new keysepace '%s'", targetKs) + vc.AddKeyspace(t, []*Cell{defaultCell}, targetKs, "0", initialStressVSchema, initialStressSchema, 0, 0, 200) + validateCount(t, vtgateConn, "stress_tgt:0", "largebin", 0) + + t.Logf("moving 'largebin' table...") + moveStart := time.Now() + + for _, ks := range defaultCell.Keyspaces { + for _, shard := range ks.Shards { + for _, tablet := range shard.Tablets { + tablet.Vttablet.ToggleProfiling() + } + } + } + + moveTables(t, defaultCell.Name, "stress_workflow", sourceKs, targetKs, "largebin") + + keyspaceTgt := defaultCell.Keyspaces[targetKs] + for _, shard := range keyspaceTgt.Shards { + for _, tablet := range shard.Tablets { + t.Logf("catchup shard=%v, tablet=%v", shard.Name, tablet.Name) + tablet.Vttablet.WaitForVReplicationToCatchup(t, "stress_workflow", fmt.Sprintf("vt_%s", tablet.Vttablet.Keyspace), 5*time.Minute) + } + } + + for _, ks := range defaultCell.Keyspaces { + for _, shard := range ks.Shards { + for _, tablet := range shard.Tablets { + tablet.Vttablet.ToggleProfiling() + } + } + } + + t.Logf("finished catching up after MoveTables (%v)", time.Since(moveStart)) + validateCount(t, vtgateConn, "stress_tgt:0", "largebin", insertCount) +} diff --git a/go/test/endtoend/vreplication/resharding_workflows_v2_test.go b/go/test/endtoend/vreplication/resharding_workflows_v2_test.go index cb189b8867c..d69a0f5a56e 100644 --- a/go/test/endtoend/vreplication/resharding_workflows_v2_test.go +++ b/go/test/endtoend/vreplication/resharding_workflows_v2_test.go @@ -94,7 +94,7 @@ func tstWorkflowExec(t *testing.T, cells, workflow, sourceKs, targetKs, tables, } else { args = append(args, "Reshard") } - args = append(args, "-v2") + switch action { case workflowActionCreate: if currentWorkflowType == wrangler.MoveTablesWorkflow { @@ -233,7 +233,7 @@ func getCurrentState(t *testing.T) string { func TestBasicV2Workflows(t *testing.T) { vc = setupCluster(t) defer vtgateConn.Close() - defer vc.TearDown() + defer vc.TearDown(t) testMoveTablesV2Workflow(t) testReshardV2Workflow(t) @@ -438,7 +438,7 @@ func setupCustomerKeyspace(t *testing.T) { func TestSwitchReadsWritesInAnyOrder(t *testing.T) { vc = setupCluster(t) - defer vc.TearDown() + defer vc.TearDown(t) moveCustomerTableSwitchFlows(t, []*Cell{vc.Cells["zone1"]}, "zone1") } diff --git a/go/test/endtoend/vreplication/unsharded_init_data.sql b/go/test/endtoend/vreplication/unsharded_init_data.sql index 1b58404cfb7..4dd20072436 100644 --- a/go/test/endtoend/vreplication/unsharded_init_data.sql +++ b/go/test/endtoend/vreplication/unsharded_init_data.sql @@ -5,9 +5,9 @@ insert into merchant(mname, category) values('monoprice', 'electronics'); insert into merchant(mname, category) values('newegg', 'electronics'); insert into product(pid, description) values(1, 'keyboard'); insert into product(pid, description) values(2, 'monitor'); -insert into orders(oid, cid, mname, pid, price) values(1, 1, 'monoprice', 1, 10); -insert into orders(oid, cid, mname, pid, price) values(2, 1, 'newegg', 2, 15); -insert into orders(oid, cid, mname, pid, price) values(3, 2, 'monoprice', 2, 20); +insert into orders(oid, cid, mname, pid, price, qty) values(1, 1, 'monoprice', 1, 10, 1); +insert into orders(oid, cid, mname, pid, price, qty) values(2, 1, 'newegg', 2, 15, 2); +insert into orders(oid, cid, mname, pid, price, qty) values(3, 2, 'monoprice', 2, 20, 3); insert into customer2(cid, name, typ, sport) values(1, 'john',1,'football,baseball'); insert into customer2(cid, name, typ, sport) values(2, 'paul','soho','cricket'); insert into customer2(cid, name, typ, sport) values(3, 'ringo','enterprise',''); diff --git a/go/test/endtoend/vreplication/vreplication_test.go b/go/test/endtoend/vreplication/vreplication_test.go index 4c06d5ec864..20e13965a44 100644 --- a/go/test/endtoend/vreplication/vreplication_test.go +++ b/go/test/endtoend/vreplication/vreplication_test.go @@ -25,9 +25,10 @@ import ( "testing" "time" - "github.com/stretchr/testify/require" + "vitess.io/vitess/go/vt/log" "github.com/stretchr/testify/assert" + "github.com/stretchr/testify/require" "vitess.io/vitess/go/mysql" "vitess.io/vitess/go/test/endtoend/cluster" @@ -93,7 +94,7 @@ func TestBasicVreplicationWorkflow(t *testing.T) { defaultReplicas = 0 // because of CI resource constraints we can only run this test with master tablets defer func() { defaultReplicas = 1 }() - defer vc.TearDown() + defer vc.TearDown(t) defaultCell = vc.Cells[defaultCellName] vc.AddKeyspace(t, []*Cell{defaultCell}, "product", "0", initialProductVSchema, initialProductSchema, defaultReplicas, defaultRdonly, 100) @@ -142,7 +143,7 @@ func TestMultiCellVreplicationWorkflow(t *testing.T) { defaultCellName := "zone1" defaultCell = vc.Cells[defaultCellName] - defer vc.TearDown() + defer vc.TearDown(t) cell1 := vc.Cells["zone1"] cell2 := vc.Cells["zone2"] @@ -162,14 +163,17 @@ func TestMultiCellVreplicationWorkflow(t *testing.T) { func TestCellAliasVreplicationWorkflow(t *testing.T) { cells := []string{"zone1", "zone2"} - + mainClusterConfig.vreplicationCompressGTID = true + defer func() { + mainClusterConfig.vreplicationCompressGTID = false + }() vc = NewVitessCluster(t, "TestBasicVreplicationWorkflow", cells, mainClusterConfig) require.NotNil(t, vc) allCellNames = "zone1,zone2" defaultCellName := "zone1" defaultCell = vc.Cells[defaultCellName] - defer vc.TearDown() + defer vc.TearDown(t) cell1 := vc.Cells["zone1"] cell2 := vc.Cells["zone2"] @@ -193,12 +197,12 @@ func TestCellAliasVreplicationWorkflow(t *testing.T) { func insertInitialData(t *testing.T) { t.Run("insertInitialData", func(t *testing.T) { - fmt.Printf("Inserting initial data\n") + log.Infof("Inserting initial data") lines, _ := ioutil.ReadFile("unsharded_init_data.sql") execMultipleQueries(t, vtgateConn, "product:0", string(lines)) execVtgateQuery(t, vtgateConn, "product:0", "insert into customer_seq(id, next_id, cache) values(0, 100, 100);") execVtgateQuery(t, vtgateConn, "product:0", "insert into order_seq(id, next_id, cache) values(0, 100, 100);") - fmt.Printf("Done inserting initial data\n") + log.Infof("Done inserting initial data") validateCount(t, vtgateConn, "product:0", "product", 2) validateCount(t, vtgateConn, "product:0", "customer", 3) @@ -296,6 +300,11 @@ func shardCustomer(t *testing.T, testReverse bool, cells []*Cell, sourceCellOrAl printShardPositions(vc, ksShards) switchWrites(t, reverseKsWorkflow, false) + output, err := vc.VtctlClient.ExecuteCommandWithOutput("Workflow", ksWorkflow, "show") + require.NoError(t, err) + require.Contains(t, output, "'customer.reverse_bits'") + require.Contains(t, output, "'customer.bmd5'") + insertQuery1 = "insert into customer(cid, name) values(1002, 'tempCustomer5')" require.True(t, validateThatQueryExecutesOnTablet(t, vtgateConn, productTab, "product", insertQuery1, matchInsertQuery1)) // both inserts go into 80-, this tests the edge-case where a stream (-80) has no relevant new events after the previous switch @@ -311,7 +320,7 @@ func shardCustomer(t *testing.T, testReverse bool, cells []*Cell, sourceCellOrAl dropSourcesDryRun(t, ksWorkflow, true, dryRunResultsDropSourcesRenameCustomerShard) var exists bool - exists, err := checkIfBlacklistExists(t, vc, "product:0", "customer") + exists, err = checkIfBlacklistExists(t, vc, "product:0", "customer") require.NoError(t, err, "Error getting blacklist for customer:0") require.True(t, exists) dropSources(t, ksWorkflow) @@ -468,17 +477,17 @@ func reshard(t *testing.T, ksName string, tableName string, workflow string, sou t.Fatal(err) } } - if err := vc.VtctlClient.ExecuteCommand("Reshard", "-cells="+sourceCellOrAlias, "-tablet_types=replica,master", ksWorkflow, sourceShards, targetShards); err != nil { + if err := vc.VtctlClient.ExecuteCommand("Reshard", "-v1", "-cells="+sourceCellOrAlias, "-tablet_types=replica,master", ksWorkflow, sourceShards, targetShards); err != nil { t.Fatalf("Reshard command failed with %+v\n", err) } tablets := vc.getVttabletsInKeyspace(t, defaultCell, ksName, "master") targetShards = "," + targetShards + "," for _, tab := range tablets { if strings.Contains(targetShards, ","+tab.Shard+",") { - fmt.Printf("Waiting for vrepl to catch up on %s since it IS a target shard\n", tab.Shard) + log.Infof("Waiting for vrepl to catch up on %s since it IS a target shard", tab.Shard) catchup(t, tab, workflow, "Reshard") } else { - fmt.Printf("Not waiting for vrepl to catch up on %s since it is NOT a target shard\n", tab.Shard) + log.Infof("Not waiting for vrepl to catch up on %s since it is NOT a target shard", tab.Shard) continue } } @@ -563,10 +572,10 @@ func shardMerchant(t *testing.T) { func vdiff(t *testing.T, workflow, cells string) { t.Run("vdiff", func(t *testing.T) { output, err := vc.VtctlClient.ExecuteCommandWithOutput("VDiff", "-tablet_types=master", "-source_cell="+cells, "-format", "json", workflow) - fmt.Printf("vdiff err: %+v, output: %+v\n", err, output) + log.Infof("vdiff err: %+v, output: %+v", err, output) require.Nil(t, err) require.NotNil(t, output) - diffReports := make([]*wrangler.DiffReport, 0) + diffReports := make(map[string]*wrangler.DiffReport) err = json.Unmarshal([]byte(output), &diffReports) require.Nil(t, err) if len(diffReports) < 1 { @@ -806,12 +815,11 @@ func verifyClusterHealth(t *testing.T, cluster *VitessCluster) { func catchup(t *testing.T, vttablet *cluster.VttabletProcess, workflow, info string) { const MaxWait = 10 * time.Second - err := vc.WaitForVReplicationToCatchup(vttablet, workflow, fmt.Sprintf("vt_%s", vttablet.Keyspace), MaxWait) - require.NoError(t, err, fmt.Sprintf("%s timed out for workflow %s on tablet %s.%s.%s", info, workflow, vttablet.Keyspace, vttablet.Shard, vttablet.Name)) + vttablet.WaitForVReplicationToCatchup(t, workflow, fmt.Sprintf("vt_%s", vttablet.Keyspace), MaxWait) } func moveTables(t *testing.T, cell, workflow, sourceKs, targetKs, tables string) { - if err := vc.VtctlClient.ExecuteCommand("MoveTables", "-cells="+cell, "-workflow="+workflow, + if err := vc.VtctlClient.ExecuteCommand("MoveTables", "-v1", "-cells="+cell, "-workflow="+workflow, "-tablet_types="+"master,replica,rdonly", sourceKs, targetKs, tables); err != nil { t.Fatalf("MoveTables command failed with %+v\n", err) } @@ -846,7 +854,7 @@ func printSwitchWritesExtraDebug(t *testing.T, ksWorkflow, msg string) { // Temporary code: print lots of info for debugging occasional flaky failures in customer reshard in CI for multicell test debug := true if debug { - fmt.Printf("------------------- START Extra debug info %s SwitchWrites %s\n", msg, ksWorkflow) + log.Infof("------------------- START Extra debug info %s SwitchWrites %s", msg, ksWorkflow) ksShards := []string{"product/0", "customer/-80", "customer/80-"} printShardPositions(vc, ksShards) custKs := vc.Cells[defaultCell.Name].Keyspaces["customer"] @@ -864,11 +872,11 @@ func printSwitchWritesExtraDebug(t *testing.T, ksWorkflow, msg string) { for _, query := range queries { qr, err := tab.QueryTablet(query, "", false) require.NoError(t, err) - fmt.Printf("\nTablet:%s.%s.%s.%d\nQuery: %s\n%+v\n\n", + log.Infof("\nTablet:%s.%s.%s.%d\nQuery: %s\n%+v\n", tab.Cell, tab.Keyspace, tab.Shard, tab.TabletUID, query, qr.Rows) } } - fmt.Printf("------------------- END Extra debug info %s SwitchWrites %s\n", msg, ksWorkflow) + log.Infof("------------------- END Extra debug info %s SwitchWrites %s", msg, ksWorkflow) } } diff --git a/go/test/endtoend/vreplication/vreplication_test_env.go b/go/test/endtoend/vreplication/vreplication_test_env.go index 4c53f5104b8..244025b83de 100644 --- a/go/test/endtoend/vreplication/vreplication_test_env.go +++ b/go/test/endtoend/vreplication/vreplication_test_env.go @@ -49,14 +49,14 @@ var dryRunResultsReadCustomerShard = []string{ var dryRunResultsSwitchWritesM2m3 = []string{ "Lock keyspace merchant", "Stop streams on keyspace merchant", - "/ Id 2 Keyspace customer Shard -80 Rules rules: at Position ", - "/ Id 2 Keyspace customer Shard -80 Rules rules: at Position ", - "/ Id 3 Keyspace customer Shard 80- Rules rules: at Position ", - "/ Id 3 Keyspace customer Shard 80- Rules rules: at Position ", - "/ Id 4 Keyspace customer Shard -80 Rules rules: at Position ", - "/ Id 4 Keyspace customer Shard -80 Rules rules: at Position ", - "/ Id 5 Keyspace customer Shard 80- Rules rules: at Position ", - "/ Id 5 Keyspace customer Shard 80- Rules rules: at Position ", + "/ Id 2 Keyspace customer Shard -80 Rules rules:{match:\"morders\" filter:\"select oid, cid, mname, pid, price, qty, total from orders where in_keyrange(mname, 'merchant.md5', '-80')\"} at Position ", + "/ Id 2 Keyspace customer Shard -80 Rules rules:{match:\"morders\" filter:\"select oid, cid, mname, pid, price, qty, total from orders where in_keyrange(mname, 'merchant.md5', '80-')\"} at Position ", + "/ Id 3 Keyspace customer Shard 80- Rules rules:{match:\"morders\" filter:\"select oid, cid, mname, pid, price, qty, total from orders where in_keyrange(mname, 'merchant.md5', '-80')\"} at Position ", + "/ Id 3 Keyspace customer Shard 80- Rules rules:{match:\"morders\" filter:\"select oid, cid, mname, pid, price, qty, total from orders where in_keyrange(mname, 'merchant.md5', '80-')\"} at Position ", + "/ Id 4 Keyspace customer Shard -80 Rules rules:{match:\"msales\" filter:\"select mname as merchant_name, count(*) as kount, sum(price) as amount from orders where in_keyrange(mname, 'merchant.md5', '-80') group by merchant_name\"} at Position ", + "/ Id 4 Keyspace customer Shard -80 Rules rules:{match:\"msales\" filter:\"select mname as merchant_name, count(*) as kount, sum(price) as amount from orders where in_keyrange(mname, 'merchant.md5', '80-') group by merchant_name\"} at Position ", + "/ Id 5 Keyspace customer Shard 80- Rules rules:{match:\"msales\" filter:\"select mname as merchant_name, count(*) as kount, sum(price) as amount from orders where in_keyrange(mname, 'merchant.md5', '-80') group by merchant_name\"} at Position ", + "/ Id 5 Keyspace customer Shard 80- Rules rules:{match:\"msales\" filter:\"select mname as merchant_name, count(*) as kount, sum(price) as amount from orders where in_keyrange(mname, 'merchant.md5', '80-') group by merchant_name\"} at Position ", "Stop writes on keyspace merchant, tables [/.*]:", "/ Keyspace merchant, Shard -80 at Position", "/ Keyspace merchant, Shard 80- at Position", diff --git a/go/test/endtoend/vtctldweb/vtctld_web_main_test.go b/go/test/endtoend/vtctldweb/vtctld_web_main_test.go index 910c0a60838..d8a37050420 100644 --- a/go/test/endtoend/vtctldweb/vtctld_web_main_test.go +++ b/go/test/endtoend/vtctldweb/vtctld_web_main_test.go @@ -177,7 +177,7 @@ func CreateWebDriver(port int) error { return err } - name, err := wd.CurrentWindowHandle() //nolint + name, _ := wd.CurrentWindowHandle() //nolint return wd.ResizeWindow(name, 1280, 1024) } @@ -203,7 +203,7 @@ func CreateWebDriver(port int) error { if err != nil { return err } - name, err := wd.CurrentWindowHandle() //nolint + name, _ := wd.CurrentWindowHandle() //nolint return wd.ResizeWindow(name, 1280, 1024) } @@ -348,7 +348,7 @@ func getDashboardKeyspaces(t *testing.T) []string { dashboardContent, err := wd.FindElement(selenium.ByTagName, "vt-dashboard") require.Nil(t, err) - ksCards, err := dashboardContent.FindElements(selenium.ByClassName, "vt-keyspace-card") //nolint + ksCards, _ := dashboardContent.FindElements(selenium.ByClassName, "vt-keyspace-card") //nolint var out []string for _, ks := range ksCards { out = append(out, text(t, ks)) @@ -363,7 +363,7 @@ func getDashboardShards(t *testing.T) []string { dashboardContent, err := wd.FindElement(selenium.ByTagName, "vt-dashboard") //nolint require.Nil(t, err) - ksCards, err := dashboardContent.FindElements(selenium.ByClassName, "vt-shard-stats") //nolint + ksCards, _ := dashboardContent.FindElements(selenium.ByClassName, "vt-shard-stats") //nolint var out []string for _, ks := range ksCards { out = append(out, text(t, ks)) @@ -392,7 +392,7 @@ func getShardTablets(t *testing.T) ([]string, []string) { shardContent, err := wd.FindElement(selenium.ByTagName, "vt-shard-view") require.Nil(t, err) - tableRows, err := shardContent.FindElements(selenium.ByTagName, "tr") //nolint + tableRows, _ := shardContent.FindElements(selenium.ByTagName, "tr") //nolint tableRows = tableRows[1:] var tabletTypes, tabletUIDs []string diff --git a/go/test/endtoend/vtgate/errors_as_warnings/main_test.go b/go/test/endtoend/vtgate/errors_as_warnings/main_test.go new file mode 100644 index 00000000000..21438bbf7e0 --- /dev/null +++ b/go/test/endtoend/vtgate/errors_as_warnings/main_test.go @@ -0,0 +1,185 @@ +/* +Copyright 2021 The Vitess Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package vtgate + +import ( + "context" + "flag" + "fmt" + "os" + "strings" + "testing" + + "github.com/google/go-cmp/cmp" + + "github.com/stretchr/testify/require" + + "vitess.io/vitess/go/mysql" + "vitess.io/vitess/go/sqltypes" + "vitess.io/vitess/go/test/endtoend/cluster" +) + +var ( + clusterInstance *cluster.LocalProcessCluster + vtParams mysql.ConnParams + KeyspaceName = "ks" + Cell = "test" + SchemaSQL = `create table t1( + id1 bigint, + id2 bigint, + primary key(id1) +) Engine=InnoDB;` + + VSchema = ` +{ + "sharded": true, + "vindexes": { + "hash": { + "type": "hash" + } + }, + "tables": { + "t1": { + "column_vindexes": [ + { + "column": "id1", + "name": "hash" + } + ] + } + } +}` +) + +func TestMain(m *testing.M) { + defer cluster.PanicHandler(nil) + flag.Parse() + + exitCode := func() int { + clusterInstance = cluster.NewCluster(Cell, "localhost") + defer clusterInstance.Teardown() + + // Start topo server + err := clusterInstance.StartTopo() + if err != nil { + return 1 + } + + // Start keyspace + keyspace := &cluster.Keyspace{ + Name: KeyspaceName, + SchemaSQL: SchemaSQL, + VSchema: VSchema, + } + if err := clusterInstance.StartKeyspace(*keyspace, []string{"-80", "80-"}, 1, false); err != nil { + return 1 + } + + // Start vtgate + if err := clusterInstance.StartVtgate(); err != nil { + return 1 + } + + vtParams = mysql.ConnParams{ + Host: clusterInstance.Hostname, + Port: clusterInstance.VtgateMySQLPort, + } + return m.Run() + }() + os.Exit(exitCode) +} + +func TestScatterErrsAsWarns(t *testing.T) { + oltp, err := mysql.Connect(context.Background(), &vtParams) + require.NoError(t, err) + defer oltp.Close() + + olap, err := mysql.Connect(context.Background(), &vtParams) + require.NoError(t, err) + defer olap.Close() + + checkedExec(t, oltp, `insert into t1(id1, id2) values (1, 1), (2, 2), (3, 3), (4, 4), (5, 5)`) + defer func() { + checkedExec(t, oltp, "use @master") + checkedExec(t, oltp, `delete from t1`) + }() + + query1 := `select /*vt+ SCATTER_ERRORS_AS_WARNINGS */ id1 from t1` + query2 := `select /*vt+ SCATTER_ERRORS_AS_WARNINGS */ id1 from t1 order by id1` + showQ := "show warnings" + + // stop the mysql on one tablet, query will fail at vttablet level + require.NoError(t, + clusterInstance.Keyspaces[0].Shards[0].Replica().MysqlctlProcess.Stop()) + + modes := []struct { + conn *mysql.Conn + m string + }{ + {m: "oltp", conn: oltp}, + {m: "olap", conn: olap}, + } + + for _, mode := range modes { + t.Run(mode.m, func(t *testing.T) { + // connection setup + checkedExec(t, mode.conn, "use @replica") + checkedExec(t, mode.conn, fmt.Sprintf("set workload = %s", mode.m)) + + assertMatches(t, mode.conn, query1, `[[INT64(4)]]`) + assertContainsOneOf(t, mode.conn, showQ, "no valid tablet", "no healthy tablet", "mysql.sock: connect: no such file or directory") + assertMatches(t, mode.conn, query2, `[[INT64(4)]]`) + assertContainsOneOf(t, mode.conn, showQ, "no valid tablet", "no healthy tablet", "mysql.sock: connect: no such file or directory") + + // invalid_field should throw error and not warning + _, err = mode.conn.ExecuteFetch("SELECT /*vt+ SCATTER_ERRORS_AS_WARNINGS */ invalid_field from t1;", 1, false) + require.Error(t, err) + serr := mysql.NewSQLErrorFromError(err).(*mysql.SQLError) + require.Equal(t, 1054, serr.Number()) + }) + } +} + +func checkedExec(t *testing.T, conn *mysql.Conn, query string) *sqltypes.Result { + t.Helper() + qr, err := conn.ExecuteFetch(query, 1000, true) + require.NoError(t, err) + return qr +} + +func assertMatches(t *testing.T, conn *mysql.Conn, query, expected string) { + t.Helper() + qr := checkedExec(t, conn, query) + got := fmt.Sprintf("%v", qr.Rows) + diff := cmp.Diff(expected, got) + if diff != "" { + t.Errorf("Query: %s (-want +got):\n%s\n%s", query, diff, got) + } +} + +func assertContainsOneOf(t *testing.T, conn *mysql.Conn, query string, expected ...string) { + t.Helper() + qr := checkedExec(t, conn, query) + got := fmt.Sprintf("%v", qr.Rows) + for _, s := range expected { + if strings.Contains(got, s) { + return + } + } + + t.Errorf("%s\n did not match any of %v", got, expected) +} diff --git a/go/test/endtoend/vtgate/gen4/gen4_test.go b/go/test/endtoend/vtgate/gen4/gen4_test.go new file mode 100644 index 00000000000..aa42b8aa3a2 --- /dev/null +++ b/go/test/endtoend/vtgate/gen4/gen4_test.go @@ -0,0 +1,71 @@ +/* +Copyright 2021 The Vitess Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package vtgate + +import ( + "context" + "fmt" + "testing" + + "github.com/google/go-cmp/cmp" + "github.com/stretchr/testify/require" + + "vitess.io/vitess/go/mysql" + "vitess.io/vitess/go/sqltypes" +) + +func TestOrderBy(t *testing.T) { + ctx := context.Background() + conn, err := mysql.Connect(ctx, &vtParams) + require.NoError(t, err) + defer conn.Close() + + // insert some data. + checkedExec(t, conn, `insert into t1(id, col) values (100, 123),(10, 12),(1, 13),(1000, 1234)`) + + // Gen4 only supported query. + assertMatches(t, conn, `select col from t1 order by id`, `[[INT64(13)] [INT64(12)] [INT64(123)] [INT64(1234)]]`) + + // Gen4 unsupported query. v3 supported. + assertMatches(t, conn, `select col from t1 order by 1`, `[[INT64(12)] [INT64(13)] [INT64(123)] [INT64(1234)]]`) + + // unsupported in v3 and Gen4. + _, err = exec(t, conn, `select t1.* from t1 order by id`) + require.Error(t, err) +} + +func assertMatches(t *testing.T, conn *mysql.Conn, query, expected string) { + t.Helper() + qr := checkedExec(t, conn, query) + got := fmt.Sprintf("%v", qr.Rows) + diff := cmp.Diff(expected, got) + if diff != "" { + t.Errorf("Query: %s (-want +got):\n%s", query, diff) + } +} + +func checkedExec(t *testing.T, conn *mysql.Conn, query string) *sqltypes.Result { + t.Helper() + qr, err := exec(t, conn, query) + require.NoError(t, err, "for query: "+query) + return qr +} + +func exec(t *testing.T, conn *mysql.Conn, query string) (*sqltypes.Result, error) { + t.Helper() + return conn.ExecuteFetch(query, 1000, true) +} diff --git a/go/test/endtoend/vtgate/gen4/main_test.go b/go/test/endtoend/vtgate/gen4/main_test.go new file mode 100644 index 00000000000..03a9bd25a98 --- /dev/null +++ b/go/test/endtoend/vtgate/gen4/main_test.go @@ -0,0 +1,99 @@ +/* +Copyright 2021 The Vitess Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package vtgate + +import ( + "flag" + "os" + "testing" + + "vitess.io/vitess/go/mysql" + "vitess.io/vitess/go/test/endtoend/cluster" +) + +var ( + clusterInstance *cluster.LocalProcessCluster + vtParams mysql.ConnParams + KeyspaceName = "ks" + Cell = "test" + SchemaSQL = `create table t1( + id bigint, + col bigint, + primary key(id) +) Engine=InnoDB; +` + + VSchema = ` +{ + "sharded": true, + "vindexes": { + "xxhash": { + "type": "xxhash" + } + }, + "tables": { + "t1": { + "column_vindexes": [ + { + "column": "id", + "name": "xxhash" + } + ] + } + } +}` +) + +func TestMain(m *testing.M) { + defer cluster.PanicHandler(nil) + flag.Parse() + + exitCode := func() int { + clusterInstance = cluster.NewCluster(Cell, "localhost") + defer clusterInstance.Teardown() + + // Start topo server + err := clusterInstance.StartTopo() + if err != nil { + return 1 + } + + // Start keyspace + keyspace := &cluster.Keyspace{ + Name: KeyspaceName, + SchemaSQL: SchemaSQL, + VSchema: VSchema, + } + err = clusterInstance.StartKeyspace(*keyspace, []string{"-80", "80-"}, 1, true) + if err != nil { + return 1 + } + + // Start vtgate + clusterInstance.VtGateExtraArgs = []string{"-planner_version", "Gen4Fallback"} // enable Gen4 planner. + err = clusterInstance.StartVtgate() + if err != nil { + return 1 + } + vtParams = mysql.ConnParams{ + Host: clusterInstance.Hostname, + Port: clusterInstance.VtgateMySQLPort, + } + return m.Run() + }() + os.Exit(exitCode) +} diff --git a/go/test/endtoend/vtgate/main_test.go b/go/test/endtoend/vtgate/main_test.go index e3c4817a800..672a6dcfb3d 100644 --- a/go/test/endtoend/vtgate/main_test.go +++ b/go/test/endtoend/vtgate/main_test.go @@ -136,6 +136,12 @@ create table t7_fk( CONSTRAINT t7_fk_ibfk_1 foreign key (t7_uid) references t7_xxhash(uid) on delete set null on update cascade ) Engine=InnoDB; + +create table t8( + id8 bigint, + testId bigint, + primary key(id8) +) Engine=InnoDB; ` VSchema = ` @@ -370,6 +376,14 @@ create table t7_fk( "name": "unicode_loose_xxhash" } ] + }, + "t8": { + "column_vindexes": [ + { + "column": "id8", + "name": "hash" + } + ] } } }` @@ -403,6 +417,8 @@ func TestMain(m *testing.M) { SchemaSQL: SchemaSQL, VSchema: VSchema, } + clusterInstance.VtGateExtraArgs = []string{"-schema_change_signal"} + clusterInstance.VtTabletExtraArgs = []string{"-queryserver-config-schema-change-signal", "-queryserver-config-schema-change-signal-interval", "0.1"} err = clusterInstance.StartKeyspace(*keyspace, []string{"-80", "80-"}, 1, true) if err != nil { return 1 @@ -418,11 +434,13 @@ func TestMain(m *testing.M) { return 1 } + clusterInstance.VtGateExtraArgs = append(clusterInstance.VtGateExtraArgs, "-enable_system_settings=true") // Start vtgate err = clusterInstance.StartVtgate() if err != nil { return 1 } + vtParams = mysql.ConnParams{ Host: clusterInstance.Hostname, Port: clusterInstance.VtgateMySQLPort, diff --git a/go/test/endtoend/vtgate/misc_test.go b/go/test/endtoend/vtgate/misc_test.go index 042c5062205..4e813c0a715 100644 --- a/go/test/endtoend/vtgate/misc_test.go +++ b/go/test/endtoend/vtgate/misc_test.go @@ -21,17 +21,14 @@ import ( "fmt" "testing" - "vitess.io/vitess/go/test/utils" - "github.com/google/go-cmp/cmp" - "github.com/stretchr/testify/assert" - "github.com/stretchr/testify/require" "vitess.io/vitess/go/mysql" "vitess.io/vitess/go/sqltypes" "vitess.io/vitess/go/test/endtoend/cluster" + "vitess.io/vitess/go/test/utils" ) func TestSelectNull(t *testing.T) { @@ -595,6 +592,138 @@ func TestSubQueryOnTopOfSubQuery(t *testing.T) { assertMatches(t, conn, "select id1 from t1 where id1 not in (select id3 from t2) and id2 in (select id4 from t2) order by id1", `[[INT64(3)] [INT64(4)]]`) } +func TestShowVGtid(t *testing.T) { + conn, err := mysql.Connect(context.Background(), &vtParams) + require.NoError(t, err) + defer conn.Close() + + query := "show global vgtid_executed from ks" + qr := exec(t, conn, query) + require.Equal(t, 1, len(qr.Rows)) + require.Equal(t, 2, len(qr.Rows[0])) + + defer exec(t, conn, `delete from t1`) + exec(t, conn, `insert into t1(id1, id2) values (1, 1), (2, 2), (3, 3), (4, 4), (5, 5)`) + qr2 := exec(t, conn, query) + require.Equal(t, 1, len(qr2.Rows)) + require.Equal(t, 2, len(qr2.Rows[0])) + + require.Equal(t, qr.Rows[0][0], qr2.Rows[0][0], "keyspace should be same") + require.NotEqual(t, qr.Rows[0][1].ToString(), qr2.Rows[0][1].ToString(), "vgtid should have changed") +} + +func TestShowGtid(t *testing.T) { + conn, err := mysql.Connect(context.Background(), &vtParams) + require.NoError(t, err) + defer conn.Close() + + query := "show global gtid_executed from ks" + qr := exec(t, conn, query) + require.Equal(t, 2, len(qr.Rows)) + + res := make(map[string]string, 2) + for _, row := range qr.Rows { + require.Equal(t, KeyspaceName, row[0].ToString()) + res[row[2].ToString()] = row[1].ToString() + } + + defer exec(t, conn, `delete from t1`) + exec(t, conn, `insert into t1(id1, id2) values (1, 1), (2, 2), (3, 3), (4, 4), (5, 5)`) + qr2 := exec(t, conn, query) + require.Equal(t, 2, len(qr2.Rows)) + + for _, row := range qr2.Rows { + require.Equal(t, KeyspaceName, row[0].ToString()) + gtid, exists := res[row[2].ToString()] + require.True(t, exists, "gtid not cached for row: %v", row) + require.NotEqual(t, gtid, row[1].ToString()) + } +} + +func TestQueryAndSubQWithLimit(t *testing.T) { + conn, err := mysql.Connect(context.Background(), &vtParams) + require.NoError(t, err) + defer conn.Close() + + exec(t, conn, "insert into t1(id1, id2) values(0,0),(1,1),(2,2),(3, 3), (4, 4), (5, 5), (6, 6), (7, 7), (8, 8),(9,9)") + result := exec(t, conn, `select id1, id2 from t1 where id1 >= ( select id1 from t1 order by id1 asc limit 1) limit 100`) + assert.Equal(t, 10, len(result.Rows)) +} + +func TestDeleteAlias(t *testing.T) { + conn, err := mysql.Connect(context.Background(), &vtParams) + require.NoError(t, err) + defer conn.Close() + + exec(t, conn, "delete t1 from t1 where id1 = 1") + exec(t, conn, "delete t.* from t1 t where t.id1 = 1") +} + +func TestFunctionInDefault(t *testing.T) { + defer cluster.PanicHandler(t) + ctx := context.Background() + conn, err := mysql.Connect(ctx, &vtParams) + require.NoError(t, err) + defer conn.Close() + + // set the sql mode ALLOW_INVALID_DATES + exec(t, conn, `SET sql_mode = 'ALLOW_INVALID_DATES'`) + + _, err = conn.ExecuteFetch(`create table function_default (x varchar(25) DEFAULT (TRIM(" check ")))`, 1000, true) + // this query fails because mysql57 does not support functions in default clause + require.Error(t, err) + + // verify that currenet_timestamp and it's aliases work as default values + exec(t, conn, `create table function_default ( +ts TIMESTAMP DEFAULT CURRENT_TIMESTAMP ON UPDATE CURRENT_TIMESTAMP, +dt DATETIME DEFAULT CURRENT_TIMESTAMP ON UPDATE CURRENT_TIMESTAMP, +ts2 TIMESTAMP DEFAULT CURRENT_TIMESTAMP, +dt2 DATETIME DEFAULT CURRENT_TIMESTAMP, +ts3 TIMESTAMP DEFAULT 0, +dt3 DATETIME DEFAULT 0, +ts4 TIMESTAMP DEFAULT 0 ON UPDATE CURRENT_TIMESTAMP, +dt4 DATETIME DEFAULT 0 ON UPDATE CURRENT_TIMESTAMP, +ts5 TIMESTAMP ON UPDATE CURRENT_TIMESTAMP, +ts6 TIMESTAMP NULL ON UPDATE CURRENT_TIMESTAMP, +dt5 DATETIME ON UPDATE CURRENT_TIMESTAMP, +dt6 DATETIME NOT NULL ON UPDATE CURRENT_TIMESTAMP, +ts7 TIMESTAMP(6) DEFAULT CURRENT_TIMESTAMP(6) ON UPDATE CURRENT_TIMESTAMP(6), +ts8 TIMESTAMP DEFAULT NOW(), +ts9 TIMESTAMP DEFAULT LOCALTIMESTAMP, +ts10 TIMESTAMP DEFAULT LOCALTIME, +ts11 TIMESTAMP DEFAULT LOCALTIMESTAMP(), +ts12 TIMESTAMP DEFAULT LOCALTIME() +)`) + exec(t, conn, "drop table function_default") + + _, err = conn.ExecuteFetch(`create table function_default (ts TIMESTAMP DEFAULT UTC_TIMESTAMP)`, 1000, true) + // this query fails because utc_timestamp is not supported in default clause + require.Error(t, err) + + exec(t, conn, `create table function_default (x varchar(25) DEFAULT "check")`) + exec(t, conn, "drop table function_default") +} + +func TestRenameFieldsOnOLAP(t *testing.T) { + defer cluster.PanicHandler(t) + ctx := context.Background() + conn, err := mysql.Connect(ctx, &vtParams) + require.NoError(t, err) + defer conn.Close() + + _ = exec(t, conn, "set workload = olap") + defer func() { + exec(t, conn, "set workload = oltp") + }() + + qr := exec(t, conn, "show tables") + require.Equal(t, 1, len(qr.Fields)) + assert.Equal(t, `Tables_in_ks`, fmt.Sprintf("%v", qr.Fields[0].Name)) + _ = exec(t, conn, "use mysql") + qr = exec(t, conn, "select @@workload") + assert.Equal(t, `[[VARBINARY("OLAP")]]`, fmt.Sprintf("%v", qr.Rows)) +} + func assertMatches(t *testing.T, conn *mysql.Conn, query, expected string) { t.Helper() qr := exec(t, conn, query) @@ -604,6 +733,7 @@ func assertMatches(t *testing.T, conn *mysql.Conn, query, expected string) { t.Errorf("Query: %s (-want +got):\n%s", query, diff) } } + func assertMatchesNoOrder(t *testing.T, conn *mysql.Conn, query, expected string) { t.Helper() qr := exec(t, conn, query) diff --git a/go/test/endtoend/vtgate/mysql80/main_test.go b/go/test/endtoend/vtgate/mysql80/main_test.go index 56661010449..e3dfa1319c5 100644 --- a/go/test/endtoend/vtgate/mysql80/main_test.go +++ b/go/test/endtoend/vtgate/mysql80/main_test.go @@ -1,5 +1,5 @@ /* -Copyright 2019 The Vitess Authors. +Copyright 2021 The Vitess Authors. Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with the License. @@ -55,6 +55,7 @@ func TestMain(m *testing.M) { return 1 } + clusterInstance.VtGateExtraArgs = append(clusterInstance.VtGateExtraArgs, "-enable_system_settings=true") // Start vtgate err = clusterInstance.StartVtgate() if err != nil { diff --git a/go/test/endtoend/vtgate/mysql80/misc_test.go b/go/test/endtoend/vtgate/mysql80/misc_test.go index 72e4eff8c69..0f614535dc1 100644 --- a/go/test/endtoend/vtgate/mysql80/misc_test.go +++ b/go/test/endtoend/vtgate/mysql80/misc_test.go @@ -1,5 +1,5 @@ /* -Copyright 2019 The Vitess Authors. +Copyright 2021 The Vitess Authors. Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with the License. @@ -35,9 +35,38 @@ func TestFunctionInDefault(t *testing.T) { require.NoError(t, err) defer conn.Close() + // set the sql mode ALLOW_INVALID_DATES + exec(t, conn, `SET sql_mode = 'ALLOW_INVALID_DATES'`) + exec(t, conn, `create table function_default (x varchar(25) DEFAULT (TRIM(" check ")))`) exec(t, conn, "drop table function_default") + exec(t, conn, `create table function_default ( +ts TIMESTAMP DEFAULT CURRENT_TIMESTAMP ON UPDATE CURRENT_TIMESTAMP, +dt DATETIME DEFAULT CURRENT_TIMESTAMP ON UPDATE CURRENT_TIMESTAMP, +ts2 TIMESTAMP DEFAULT CURRENT_TIMESTAMP, +dt2 DATETIME DEFAULT CURRENT_TIMESTAMP, +ts3 TIMESTAMP DEFAULT 0, +dt3 DATETIME DEFAULT 0, +ts4 TIMESTAMP DEFAULT 0 ON UPDATE CURRENT_TIMESTAMP, +dt4 DATETIME DEFAULT 0 ON UPDATE CURRENT_TIMESTAMP, +ts5 TIMESTAMP ON UPDATE CURRENT_TIMESTAMP, +ts6 TIMESTAMP NULL ON UPDATE CURRENT_TIMESTAMP, +dt5 DATETIME ON UPDATE CURRENT_TIMESTAMP, +dt6 DATETIME NOT NULL ON UPDATE CURRENT_TIMESTAMP, +ts7 TIMESTAMP(6) DEFAULT CURRENT_TIMESTAMP(6) ON UPDATE CURRENT_TIMESTAMP(6), +ts8 TIMESTAMP DEFAULT NOW(), +ts9 TIMESTAMP DEFAULT LOCALTIMESTAMP, +ts10 TIMESTAMP DEFAULT LOCALTIME, +ts11 TIMESTAMP DEFAULT LOCALTIMESTAMP(), +ts12 TIMESTAMP DEFAULT LOCALTIME() +)`) + exec(t, conn, "drop table function_default") + + // this query works because utc_timestamp will get parenthesised before reaching MySQL. However, this syntax is not supported in MySQL 8.0 + exec(t, conn, `create table function_default (ts TIMESTAMP DEFAULT UTC_TIMESTAMP)`) + exec(t, conn, "drop table function_default") + exec(t, conn, `create table function_default (x varchar(25) DEFAULT "check")`) exec(t, conn, "drop table function_default") } diff --git a/go/test/endtoend/vtgate/prefixfanout/main_test.go b/go/test/endtoend/vtgate/prefixfanout/main_test.go new file mode 100644 index 00000000000..b9af63210fd --- /dev/null +++ b/go/test/endtoend/vtgate/prefixfanout/main_test.go @@ -0,0 +1,273 @@ +/* +Copyright 2021 The Vitess Authors. +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + http://www.apache.org/licenses/LICENSE-2.0 +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ +package prefixfanout + +import ( + "context" + "flag" + "fmt" + "os" + "testing" + + "github.com/stretchr/testify/assert" + "github.com/stretchr/testify/require" + + "vitess.io/vitess/go/mysql" + "vitess.io/vitess/go/sqltypes" + "vitess.io/vitess/go/test/endtoend/cluster" +) + +var ( + clusterInstance *cluster.LocalProcessCluster + cell = "zone1" + hostname = "localhost" + + sKs = "cfc_testing" + sSchema = ` +CREATE TABLE t1 ( +c1 VARCHAR(20) NOT NULL, +c2 varchar(40) NOT NULL, +PRIMARY KEY (c1) +) ENGINE=Innodb; +` + sVSchema = ` +{ + "sharded": true, + "vindexes": { + "cfc": { + "type": "cfc" + } + }, + "tables": { + "t1": { + "column_vindexes": [ + { + "column": "c1", + "name": "cfc" + } + ], + "columns": [ + { + "name": "c2", + "type": "VARCHAR" + } + ] + } + } +}` + + sKsMD5 = `cfc_testing_md5` + sSchemaMD5 = ` +CREATE TABLE t2 ( +c1 VARCHAR(20) NOT NULL, +c2 varchar(40) NOT NULL, +PRIMARY KEY (c1) +) ENGINE=Innodb;` + + sVSchemaMD5 = ` +{ + "sharded": true, + "vindexes": { + "cfc_md5": { + "type": "cfc", + "params": { + "hash": "md5", + "offsets": "[2]" + } + } + }, + "tables": { + "t2": { + "column_vindexes": [ + { + "column": "c1", + "name": "cfc_md5" + } + ], + "columns": [ + { + "name": "c2", + "type": "VARCHAR" + } + ] + } + } +}` +) + +func TestMain(m *testing.M) { + defer cluster.PanicHandler(nil) + flag.Parse() + + exitCode := func() int { + clusterInstance = cluster.NewCluster(cell, hostname) + defer clusterInstance.Teardown() + + // Start topo server + if err := clusterInstance.StartTopo(); err != nil { + return 1 + } + + // Start keyspace + sKeyspace := &cluster.Keyspace{ + Name: sKs, + SchemaSQL: sSchema, + VSchema: sVSchema, + } + // cfc_testing + if err := clusterInstance.StartKeyspace(*sKeyspace, []string{"-41", "41-4180", "4180-42", "42-"}, 0, false); err != nil { + return 1 + } + // cfc_testing_md5 + if err := clusterInstance.StartKeyspace( + cluster.Keyspace{ + Name: sKsMD5, + SchemaSQL: sSchemaMD5, + VSchema: sVSchemaMD5, + }, []string{"-c2", "c2-c20a80", "c20a80-d0", "d0-"}, 0, false); err != nil { + return 1 + } + + // Start vtgate + if err := clusterInstance.StartVtgate(); err != nil { + return 1 + } + + return m.Run() + }() + os.Exit(exitCode) +} + +func TestCFCPrefixQueryNoHash(t *testing.T) { + defer cluster.PanicHandler(t) + ctx := context.Background() + vtParams := mysql.ConnParams{ + Host: "localhost", + Port: clusterInstance.VtgateMySQLPort, + } + conn, err := mysql.Connect(ctx, &vtParams) + require.Nil(t, err) + defer conn.Close() + + exec(t, conn, "delete from t1") + defer exec(t, conn, "delete from t1") + // prepare the sentinel rows, i.e. every shard stores a row begins with letter A. + // hex ascii code of 'A' is 41. For a given primary key, e.g. 'AA' here, it should + // only legally belong to a single shard. We insert into all shards with different + // `c2` value so that we can test if a query fans out to all or not. Based on the + // following shard layout only "41-4180", "4180-42" should serve the rows staring with 'A'. + shards := []string{"-41", "41-4180", "4180-42", "42-"} + for i, s := range shards { + exec(t, conn, fmt.Sprintf("use `%s:%s`", sKs, s)) + exec(t, conn, fmt.Sprintf("insert into t1 values('AA', 'shard-%d')", i)) + } + exec(t, conn, "use cfc_testing") + qr := exec(t, conn, "select c2 from t1 where c1 like 'A%' order by c2") + assert.Equal(t, 2, len(qr.Rows)) + // should only target a subset of shards serving rows starting with 'A'. + assert.EqualValues(t, `[[VARCHAR("shard-1")] [VARCHAR("shard-2")]]`, fmt.Sprintf("%v", qr.Rows)) + // should only target a subset of shards serving rows starting with 'AA', + // the shards to which 'AA' maps to. + qr = exec(t, conn, "select c2 from t1 where c1 like 'AA'") + assert.Equal(t, 1, len(qr.Rows)) + assert.EqualValues(t, `[[VARCHAR("shard-1")]]`, fmt.Sprintf("%v", qr.Rows)) + // fan out to all when there is no prefix + qr = exec(t, conn, "select c2 from t1 where c1 like '%A' order by c2") + assert.Equal(t, 4, len(qr.Rows)) + fmt.Printf("%v", qr.Rows) + for i, r := range qr.Rows { + assert.Equal(t, fmt.Sprintf("shard-%d", i), r[0].ToString()) + } +} + +func TestCFCPrefixQueryWithHash(t *testing.T) { + defer cluster.PanicHandler(t) + ctx := context.Background() + vtParams := mysql.ConnParams{ + Host: "localhost", + Port: clusterInstance.VtgateMySQLPort, + } + conn, err := mysql.Connect(ctx, &vtParams) + require.Nil(t, err) + defer conn.Close() + + exec(t, conn, "delete from t2") + defer exec(t, conn, "delete from t2") + + shards := []string{"-c2", "c2-c20a80", "c20a80-d0", "d0-"} + // same idea of sentinel rows as above. Even though each row legally belongs to + // only one shard, we insert into all shards with different info to test our fan out. + for i, s := range shards { + exec(t, conn, fmt.Sprintf("use `%s:%s`", sKsMD5, s)) + exec(t, conn, fmt.Sprintf("insert into t2 values('12AX', 'shard-%d')", i)) + exec(t, conn, fmt.Sprintf("insert into t2 values('12BX', 'shard-%d')", i)) + exec(t, conn, fmt.Sprintf("insert into t2 values('27CX', 'shard-%d')", i)) + } + + exec(t, conn, fmt.Sprintf("use `%s`", sKsMD5)) + // The prefix is ('12', 'A') + // md5('12') -> c20ad4d76fe97759aa27a0c99bff6710 + // md5('A') -> 7fc56270e7a70fa81a5935b72eacbe29 + // so keyspace id is c20a7f, which means shards "c2-c20a80" + qr := exec(t, conn, "select c2 from t2 where c1 like '12A%' order by c2") + assert.Equal(t, 1, len(qr.Rows)) + assert.Equal(t, `[[VARCHAR("shard-1")]]`, fmt.Sprintf("%v", qr.Rows)) + // The prefix is ('12') + // md5('12') -> c20ad4d76fe97759aa27a0c99bff6710 so the corresponding + // so keyspace id is c20a, which means shards "c2-c20a80", "c20a80-d0" + qr = exec(t, conn, "select c2 from t2 where c1 like '12%' order by c2") + assert.Equal(t, 4, len(qr.Rows)) + assert.Equal(t, `[[VARCHAR("shard-1")] [VARCHAR("shard-1")] [VARCHAR("shard-2")] [VARCHAR("shard-2")]]`, fmt.Sprintf("%v", qr.Rows)) + // in vschema the prefix length is defined as 2 bytes however only 1 byte + // is provided here so the query fans out to all. + qr = exec(t, conn, "select c2 from t2 where c1 like '2%' order by c2") + assert.Equal(t, 4, len(qr.Rows)) + assert.Equal(t, `[[VARCHAR("shard-0")] [VARCHAR("shard-1")] [VARCHAR("shard-2")] [VARCHAR("shard-3")]]`, fmt.Sprintf("%v", qr.Rows)) +} + +func TestCFCInsert(t *testing.T) { + defer cluster.PanicHandler(t) + ctx := context.Background() + vtParams := mysql.ConnParams{ + Host: "localhost", + Port: clusterInstance.VtgateMySQLPort, + } + conn, err := mysql.Connect(ctx, &vtParams) + require.Nil(t, err) + defer conn.Close() + + exec(t, conn, "delete from t1") + defer exec(t, conn, "delete from t1") + + exec(t, conn, "insert into t1 (c1, c2) values ('AAA', 'BBB')") + qr := exec(t, conn, "select c2 from t1 where c1 like 'A%'") + assert.Equal(t, 1, len(qr.Rows)) + shards := []string{"-41", "4180-42", "42-"} + for _, s := range shards { + exec(t, conn, fmt.Sprintf("use `cfc_testing:%s`", s)) + qr = exec(t, conn, "select * from t1") + assert.Equal(t, 0, len(qr.Rows)) + } + // 'AAA' belongs to 41-4180 + exec(t, conn, "use `cfc_testing:41-4180`") + qr = exec(t, conn, "select c2 from t1") + assert.Equal(t, 1, len(qr.Rows)) + assert.Equal(t, `[[VARCHAR("BBB")]]`, fmt.Sprintf("%v", qr.Rows)) +} + +func exec(t *testing.T, conn *mysql.Conn, query string) *sqltypes.Result { + t.Helper() + qr, err := conn.ExecuteFetch(query, 1000, true) + require.NoError(t, err) + return qr +} diff --git a/go/test/endtoend/vtgate/reservedconn/sysvar_test.go b/go/test/endtoend/vtgate/reservedconn/sysvar_test.go index 69531781085..f14aa287e50 100644 --- a/go/test/endtoend/vtgate/reservedconn/sysvar_test.go +++ b/go/test/endtoend/vtgate/reservedconn/sysvar_test.go @@ -393,5 +393,5 @@ func TestSysvarSocket(t *testing.T) { require.True(t, ok, "not a mysql error: %T", err) assert.Equal(t, mysql.ERIncorrectGlobalLocalVar, sqlErr.Number()) assert.Equal(t, mysql.SSUnknownSQLState, sqlErr.SQLState()) - assert.Equal(t, "Variable 'socket' is a read only variable (errno 1238) (sqlstate HY000) during query: set socket = '/any/path'", sqlErr.Error()) + assert.Equal(t, "variable 'socket' is a read only variable (errno 1238) (sqlstate HY000) during query: set socket = '/any/path'", sqlErr.Error()) } diff --git a/go/test/endtoend/vtgate/schematracker/loadkeyspace/schema_load_keyspace_test.go b/go/test/endtoend/vtgate/schematracker/loadkeyspace/schema_load_keyspace_test.go new file mode 100644 index 00000000000..56c91a0ead1 --- /dev/null +++ b/go/test/endtoend/vtgate/schematracker/loadkeyspace/schema_load_keyspace_test.go @@ -0,0 +1,154 @@ +/* +Copyright 2021 The Vitess Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package loadkeyspace + +import ( + "io/ioutil" + "path" + "testing" + "time" + + "github.com/stretchr/testify/require" + + "vitess.io/vitess/go/test/endtoend/cluster" +) + +var ( + clusterInstance *cluster.LocalProcessCluster + hostname = "localhost" + keyspaceName = "ks" + cell = "zone1" + sqlSchema = ` + create table vt_user ( + id bigint, + name varchar(64), + primary key (id) + ) Engine=InnoDB; + + create table main ( + id bigint, + val varchar(128), + primary key(id) + ) Engine=InnoDB; + + create table test_table ( + id bigint, + val varchar(128), + primary key(id) + ) Engine=InnoDB; +` +) + +func TestBlockedLoadKeyspace(t *testing.T) { + defer cluster.PanicHandler(t) + var err error + + clusterInstance = cluster.NewCluster(cell, hostname) + defer clusterInstance.Teardown() + + // Start topo server + err = clusterInstance.StartTopo() + require.NoError(t, err) + + // Start keyspace without the -queryserver-config-schema-change-signal flag + keyspace := &cluster.Keyspace{ + Name: keyspaceName, + SchemaSQL: sqlSchema, + } + err = clusterInstance.StartUnshardedKeyspace(*keyspace, 1, false) + require.NoError(t, err) + + // Start vtgate with the schema_change_signal flag + clusterInstance.VtGateExtraArgs = []string{"-schema_change_signal"} + err = clusterInstance.StartVtgate() + require.NoError(t, err) + + // wait for addKeyspaceToTracker to timeout + time.Sleep(10 * time.Second) + + // check warning logs + logDir := clusterInstance.VtgateProcess.LogDir + all, err := ioutil.ReadFile(path.Join(logDir, "vtgate-stderr.txt")) + require.NoError(t, err) + require.Contains(t, string(all), "Unable to get initial schema reload") +} + +func TestLoadKeyspaceWithNoTablet(t *testing.T) { + defer cluster.PanicHandler(t) + var err error + + clusterInstance = cluster.NewCluster(cell, hostname) + defer clusterInstance.Teardown() + + // Start topo server + err = clusterInstance.StartTopo() + require.NoError(t, err) + + // create keyspace + keyspace := &cluster.Keyspace{ + Name: keyspaceName, + SchemaSQL: sqlSchema, + } + clusterInstance.VtTabletExtraArgs = []string{"-queryserver-config-schema-change-signal"} + err = clusterInstance.StartUnshardedKeyspace(*keyspace, 1, false) + require.NoError(t, err) + + // teardown vttablets + for _, vttablet := range clusterInstance.Keyspaces[0].Shards[0].Vttablets { + err = vttablet.VttabletProcess.TearDown() + require.NoError(t, err) + } + + // Start vtgate with the schema_change_signal flag + clusterInstance.VtGateExtraArgs = []string{"-schema_change_signal"} + err = clusterInstance.StartVtgate() + require.NoError(t, err) + + // check warning logs + logDir := clusterInstance.VtgateProcess.LogDir + all, err := ioutil.ReadFile(path.Join(logDir, "vtgate-stderr.txt")) + require.NoError(t, err) + require.Contains(t, string(all), "Unable to get initial schema reload") +} + +func TestNoInitialKeyspace(t *testing.T) { + defer cluster.PanicHandler(t) + var err error + + clusterInstance = cluster.NewCluster(cell, hostname) + defer clusterInstance.Teardown() + + // Start topo server + err = clusterInstance.StartTopo() + require.NoError(t, err) + + // Start vtgate with the schema_change_signal flag + clusterInstance.VtGateExtraArgs = []string{"-schema_change_signal"} + err = clusterInstance.StartVtgate() + require.NoError(t, err) + + logDir := clusterInstance.VtgateProcess.LogDir + + // teardown vtgate to flush logs + err = clusterInstance.VtgateProcess.TearDown() + require.NoError(t, err) + + // check info logs + all, err := ioutil.ReadFile(path.Join(logDir, "vtgate.INFO")) + require.NoError(t, err) + require.Contains(t, string(all), "No keyspace to load") +} diff --git a/go/test/endtoend/vtgate/schematracker/restarttablet/schema_restart_test.go b/go/test/endtoend/vtgate/schematracker/restarttablet/schema_restart_test.go new file mode 100644 index 00000000000..bb780d2c997 --- /dev/null +++ b/go/test/endtoend/vtgate/schematracker/restarttablet/schema_restart_test.go @@ -0,0 +1,128 @@ +/* +Copyright 2021 The Vitess Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package schematracker + +import ( + "context" + "flag" + "fmt" + "os" + "testing" + + "github.com/stretchr/testify/require" + + "github.com/stretchr/testify/assert" + + "vitess.io/vitess/go/mysql" + "vitess.io/vitess/go/sqltypes" + "vitess.io/vitess/go/test/endtoend/cluster" +) + +var ( + clusterInstance *cluster.LocalProcessCluster + vtParams mysql.ConnParams + hostname = "localhost" + keyspaceName = "ks" + cell = "zone1" + sqlSchema = ` + create table vt_user ( + id bigint, + name varchar(64), + primary key (id) + ) Engine=InnoDB; + + create table main ( + id bigint, + val varchar(128), + primary key(id) + ) Engine=InnoDB; + + create table test_table ( + id bigint, + val varchar(128), + primary key(id) + ) Engine=InnoDB; +` +) + +func TestMain(m *testing.M) { + defer cluster.PanicHandler(nil) + flag.Parse() + + exitcode := func() int { + clusterInstance = cluster.NewCluster(cell, hostname) + defer clusterInstance.Teardown() + + // Start topo server + if err := clusterInstance.StartTopo(); err != nil { + return 1 + } + + // List of users authorized to execute vschema ddl operations + clusterInstance.VtGateExtraArgs = []string{"-schema_change_signal"} + + // Start keyspace + keyspace := &cluster.Keyspace{ + Name: keyspaceName, + SchemaSQL: sqlSchema, + } + if err := clusterInstance.StartUnshardedKeyspace(*keyspace, 1, false); err != nil { + return 1 + } + + // restart the tablet so that the schema.Engine gets a chance to start with existing schema + tablet := clusterInstance.Keyspaces[0].Shards[0].MasterTablet() + tablet.VttabletProcess.ExtraArgs = []string{"-queryserver-config-schema-change-signal"} + if err := tablet.RestartOnlyTablet(); err != nil { + return 1 + } + + // Start vtgate + if err := clusterInstance.StartVtgate(); err != nil { + clusterInstance.VtgateProcess = cluster.VtgateProcess{} + return 1 + } + vtParams = mysql.ConnParams{ + Host: clusterInstance.Hostname, + Port: clusterInstance.VtgateMySQLPort, + } + return m.Run() + }() + os.Exit(exitcode) +} + +func TestVSchemaTrackerInit(t *testing.T) { + defer cluster.PanicHandler(t) + ctx := context.Background() + conn, err := mysql.Connect(ctx, &vtParams) + require.NoError(t, err) + defer conn.Close() + + qr := exec(t, conn, "SHOW VSCHEMA TABLES") + got := fmt.Sprintf("%v", qr.Rows) + want := `[[VARCHAR("dual")] [VARCHAR("main")] [VARCHAR("test_table")] [VARCHAR("vt_user")]]` + assert.Equal(t, want, got) +} + +func exec(t *testing.T, conn *mysql.Conn, query string) *sqltypes.Result { + t.Helper() + qr, err := conn.ExecuteFetch(query, 1000, true) + if err != nil { + t.Fatal(err) + } + return qr +} diff --git a/go/test/endtoend/vtgate/schematracker/sharded/st_sharded_test.go b/go/test/endtoend/vtgate/schematracker/sharded/st_sharded_test.go new file mode 100644 index 00000000000..08901571299 --- /dev/null +++ b/go/test/endtoend/vtgate/schematracker/sharded/st_sharded_test.go @@ -0,0 +1,299 @@ +/* +Copyright 2021 The Vitess Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package sharded + +import ( + "context" + "flag" + "fmt" + "os" + "testing" + "time" + + "github.com/google/go-cmp/cmp" + + "vitess.io/vitess/go/test/utils" + + "github.com/stretchr/testify/assert" + "github.com/stretchr/testify/require" + + "vitess.io/vitess/go/mysql" + "vitess.io/vitess/go/sqltypes" + "vitess.io/vitess/go/test/endtoend/cluster" +) + +var ( + clusterInstance *cluster.LocalProcessCluster + vtParams mysql.ConnParams + KeyspaceName = "ks" + Cell = "test" + SchemaSQL = ` +create table t2( + id3 bigint, + id4 bigint, + primary key(id3) +) Engine=InnoDB; + +create table t2_id4_idx( + id bigint not null auto_increment, + id4 bigint, + id3 bigint, + primary key(id), + key idx_id4(id4) +) Engine=InnoDB; + +create table t8( + id8 bigint, + testId bigint, + primary key(id8) +) Engine=InnoDB; +` + + VSchema = ` +{ + "sharded": true, + "vindexes": { + "unicode_loose_xxhash" : { + "type": "unicode_loose_xxhash" + }, + "unicode_loose_md5" : { + "type": "unicode_loose_md5" + }, + "hash": { + "type": "hash" + }, + "xxhash": { + "type": "xxhash" + }, + "t2_id4_idx": { + "type": "lookup_hash", + "params": { + "table": "t2_id4_idx", + "from": "id4", + "to": "id3", + "autocommit": "true" + }, + "owner": "t2" + } + }, + "tables": { + "t2": { + "column_vindexes": [ + { + "column": "id3", + "name": "hash" + }, + { + "column": "id4", + "name": "t2_id4_idx" + } + ] + }, + "t2_id4_idx": { + "column_vindexes": [ + { + "column": "id4", + "name": "hash" + } + ] + }, + "t8": { + "column_vindexes": [ + { + "column": "id8", + "name": "hash" + } + ] + } + } +}` +) + +func TestMain(m *testing.M) { + defer cluster.PanicHandler(nil) + flag.Parse() + + exitCode := func() int { + clusterInstance = cluster.NewCluster(Cell, "localhost") + defer clusterInstance.Teardown() + + // Start topo server + err := clusterInstance.StartTopo() + if err != nil { + return 1 + } + + // Start keyspace + keyspace := &cluster.Keyspace{ + Name: KeyspaceName, + SchemaSQL: SchemaSQL, + VSchema: VSchema, + } + clusterInstance.VtGateExtraArgs = []string{"-schema_change_signal", "-vschema_ddl_authorized_users", "%"} + clusterInstance.VtTabletExtraArgs = []string{"-queryserver-config-schema-change-signal", "-queryserver-config-schema-change-signal-interval", "0.1"} + err = clusterInstance.StartKeyspace(*keyspace, []string{"-80", "80-"}, 1, true) + if err != nil { + return 1 + } + + // Start vtgate + err = clusterInstance.StartVtgate() + if err != nil { + return 1 + } + + vtParams = mysql.ConnParams{ + Host: clusterInstance.Hostname, + Port: clusterInstance.VtgateMySQLPort, + } + return m.Run() + }() + os.Exit(exitCode) +} + +func TestAmbiguousColumnJoin(t *testing.T) { + defer cluster.PanicHandler(t) + ctx := context.Background() + conn, err := mysql.Connect(ctx, &vtParams) + require.NoError(t, err) + defer conn.Close() + // this query only works if we know which table the testId belongs to. The vschema does not contain + // this info, so we are testing that the schema tracker has added column info to the vschema + _, err = conn.ExecuteFetch(`select testId from t8 join t2`, 1000, true) + require.NoError(t, err) +} + +func TestInitAndUpdate(t *testing.T) { + ctx := context.Background() + conn, err := mysql.Connect(ctx, &vtParams) + require.NoError(t, err) + defer conn.Close() + + assertMatches(t, conn, "SHOW VSCHEMA TABLES", `[[VARCHAR("dual")] [VARCHAR("t2")] [VARCHAR("t2_id4_idx")] [VARCHAR("t8")]]`) + + // Init + _ = exec(t, conn, "create table test_sc (id bigint primary key)") + assertMatchesWithTimeout(t, conn, + "SHOW VSCHEMA TABLES", + `[[VARCHAR("dual")] [VARCHAR("t2")] [VARCHAR("t2_id4_idx")] [VARCHAR("t8")] [VARCHAR("test_sc")]]`, + 100*time.Millisecond, + 3*time.Second, + "test_sc not in vschema tables") + + // Tables Update via health check. + _ = exec(t, conn, "create table test_sc1 (id bigint primary key)") + assertMatchesWithTimeout(t, conn, + "SHOW VSCHEMA TABLES", + `[[VARCHAR("dual")] [VARCHAR("t2")] [VARCHAR("t2_id4_idx")] [VARCHAR("t8")] [VARCHAR("test_sc")] [VARCHAR("test_sc1")]]`, + 100*time.Millisecond, + 3*time.Second, + "test_sc1 not in vschema tables") + + _ = exec(t, conn, "drop table test_sc, test_sc1") + assertMatchesWithTimeout(t, conn, + "SHOW VSCHEMA TABLES", + `[[VARCHAR("dual")] [VARCHAR("t2")] [VARCHAR("t2_id4_idx")] [VARCHAR("t8")]]`, + 100*time.Millisecond, + 3*time.Second, + "test_sc and test_sc_1 should not be in vschema tables") + +} + +func TestDMLOnNewTable(t *testing.T) { + ctx := context.Background() + conn, err := mysql.Connect(ctx, &vtParams) + require.NoError(t, err) + defer conn.Close() + + // create a new table which is not part of the VSchema + exec(t, conn, `create table new_table_tracked(id bigint, name varchar(100), primary key(id)) Engine=InnoDB`) + + // wait for vttablet's schema reload interval to pass + assertMatchesWithTimeout(t, conn, + "SHOW VSCHEMA TABLES", + `[[VARCHAR("dual")] [VARCHAR("new_table_tracked")] [VARCHAR("t2")] [VARCHAR("t2_id4_idx")] [VARCHAR("t8")]]`, + 100*time.Millisecond, + 3*time.Second, + "test_sc not in vschema tables") + + assertMatches(t, conn, "select id from new_table_tracked", `[]`) // select + assertMatches(t, conn, "select id from new_table_tracked where id = 5", `[]`) // select + // DML on new table + // insert initial data ,update and delete will fail since we have not added a primary vindex + errorMessage := "table 'new_table_tracked' does not have a primary vindex (errno 1173) (sqlstate 42000)" + assertError(t, conn, `insert into new_table_tracked(id) values(0),(1)`, errorMessage) + assertError(t, conn, `update new_table_tracked set name = "newName1"`, errorMessage) + assertError(t, conn, "delete from new_table_tracked", errorMessage) + + exec(t, conn, `select name from new_table_tracked join t8`) + + // add a primary vindex for the table + exec(t, conn, "alter vschema on ks.new_table_tracked add vindex hash(id) using hash") + time.Sleep(1 * time.Second) + exec(t, conn, `insert into new_table_tracked(id) values(0),(1)`) + exec(t, conn, `insert into t8(id8) values(2)`) + defer exec(t, conn, `delete from t8`) + assertMatchesNoOrder(t, conn, `select id from new_table_tracked join t8`, `[[INT64(0)] [INT64(1)]]`) +} + +func assertMatches(t *testing.T, conn *mysql.Conn, query, expected string) { + t.Helper() + qr := exec(t, conn, query) + got := fmt.Sprintf("%v", qr.Rows) + diff := cmp.Diff(expected, got) + if diff != "" { + t.Errorf("Query: %s (-want +got):\n%s", query, diff) + } +} + +func assertMatchesWithTimeout(t *testing.T, conn *mysql.Conn, query, expected string, r time.Duration, d time.Duration, failureMsg string) { + t.Helper() + timeout := time.After(d) + diff := "actual and expectation does not match" + for len(diff) > 0 { + select { + case <-timeout: + require.Fail(t, failureMsg, diff) + case <-time.After(r): + qr := exec(t, conn, query) + diff = cmp.Diff(expected, + fmt.Sprintf("%v", qr.Rows)) + } + + } +} + +func assertMatchesNoOrder(t *testing.T, conn *mysql.Conn, query, expected string) { + t.Helper() + qr := exec(t, conn, query) + actual := fmt.Sprintf("%v", qr.Rows) + assert.Equal(t, utils.SortString(expected), utils.SortString(actual), "for query: [%s] expected \n%s \nbut actual \n%s", query, expected, actual) +} + +func assertError(t *testing.T, conn *mysql.Conn, query, errorMessage string) { + t.Helper() + _, err := conn.ExecuteFetch(query, 1000, true) + require.Error(t, err) + assert.Contains(t, err.Error(), errorMessage) +} + +func exec(t *testing.T, conn *mysql.Conn, query string) *sqltypes.Result { + t.Helper() + qr, err := conn.ExecuteFetch(query, 1000, true) + require.NoError(t, err, "for query: "+query) + return qr +} diff --git a/go/test/endtoend/vtgate/schematracker/unsharded/st_unsharded_test.go b/go/test/endtoend/vtgate/schematracker/unsharded/st_unsharded_test.go new file mode 100644 index 00000000000..e4ced166703 --- /dev/null +++ b/go/test/endtoend/vtgate/schematracker/unsharded/st_unsharded_test.go @@ -0,0 +1,168 @@ +/* +Copyright 2021 The Vitess Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package unsharded + +import ( + "context" + "flag" + "fmt" + "os" + "testing" + "time" + + "github.com/google/go-cmp/cmp" + "github.com/stretchr/testify/require" + + "vitess.io/vitess/go/mysql" + "vitess.io/vitess/go/sqltypes" + "vitess.io/vitess/go/test/endtoend/cluster" +) + +var ( + clusterInstance *cluster.LocalProcessCluster + vtParams mysql.ConnParams + keyspaceName = "ks" + cell = "zone1" + sqlSchema = ` + create table main ( + id bigint, + val varchar(128), + primary key(id) + ) Engine=InnoDB; +` +) + +func TestMain(m *testing.M) { + defer cluster.PanicHandler(nil) + flag.Parse() + + exitCode := func() int { + clusterInstance = cluster.NewCluster(cell, "localhost") + defer clusterInstance.Teardown() + + // Start topo server + err := clusterInstance.StartTopo() + if err != nil { + return 1 + } + + // Start keyspace + keyspace := &cluster.Keyspace{ + Name: keyspaceName, + SchemaSQL: sqlSchema, + } + clusterInstance.VtTabletExtraArgs = []string{"-queryserver-config-schema-change-signal", "-queryserver-config-schema-change-signal-interval", "0.1"} + err = clusterInstance.StartUnshardedKeyspace(*keyspace, 0, false) + if err != nil { + return 1 + } + + // Start vtgate + clusterInstance.VtGateExtraArgs = []string{"-schema_change_signal", "-vschema_ddl_authorized_users", "%"} + err = clusterInstance.StartVtgate() + if err != nil { + return 1 + } + + vtParams = mysql.ConnParams{ + Host: clusterInstance.Hostname, + Port: clusterInstance.VtgateMySQLPort, + } + return m.Run() + }() + os.Exit(exitCode) +} + +func TestNewUnshardedTable(t *testing.T) { + defer cluster.PanicHandler(t) + + // create a sql connection + ctx := context.Background() + conn, err := mysql.Connect(ctx, &vtParams) + require.NoError(t, err) + defer conn.Close() + + // ensuring our initial table "main" is in the schema + qr := exec(t, conn, "SHOW VSCHEMA TABLES") + got := fmt.Sprintf("%v", qr.Rows) + want := `[[VARCHAR("dual")] [VARCHAR("main")]]` + require.Equal(t, want, got) + + // create a new table which is not part of the VSchema + exec(t, conn, `create table new_table_tracked(id bigint, name varchar(100), primary key(id)) Engine=InnoDB`) + + // waiting for the vttablet's schema_reload interval to kick in + assertMatchesWithTimeout(t, conn, + "SHOW VSCHEMA TABLES", + `[[VARCHAR("dual")] [VARCHAR("main")] [VARCHAR("new_table_tracked")]]`, + 100*time.Millisecond, + 3*time.Second, + "new_table_tracked not in vschema tables") + + assertMatches(t, conn, "select id from new_table_tracked", `[]`) // select + assertMatches(t, conn, "select id from new_table_tracked where id = 5", `[]`) // select + // DML on new table + // insert initial data ,update and delete for the new table + exec(t, conn, `insert into new_table_tracked(id) values(0),(1)`) + exec(t, conn, `update new_table_tracked set name = "newName1"`) + exec(t, conn, "delete from new_table_tracked where id = 0") + assertMatches(t, conn, `select * from new_table_tracked`, `[[INT64(1) VARCHAR("newName1")]]`) + + exec(t, conn, `drop table new_table_tracked`) + + // waiting for the vttablet's schema_reload interval to kick in + assertMatchesWithTimeout(t, conn, + "SHOW VSCHEMA TABLES", + `[[VARCHAR("dual")] [VARCHAR("main")]]`, + 100*time.Millisecond, + 3*time.Second, + "new_table_tracked not in vschema tables") +} + +func assertMatches(t *testing.T, conn *mysql.Conn, query, expected string) { + t.Helper() + qr := exec(t, conn, query) + got := fmt.Sprintf("%v", qr.Rows) + diff := cmp.Diff(expected, got) + if diff != "" { + t.Errorf("Query: %s (-want +got):\n%s", query, diff) + } +} + +func assertMatchesWithTimeout(t *testing.T, conn *mysql.Conn, query, expected string, r time.Duration, d time.Duration, failureMsg string) { + t.Helper() + timeout := time.After(d) + diff := "actual and expectation does not match" + for len(diff) > 0 { + select { + case <-timeout: + require.Fail(t, failureMsg, diff) + case <-time.After(r): + qr := exec(t, conn, query) + diff = cmp.Diff(expected, + fmt.Sprintf("%v", qr.Rows)) + } + + } +} + +func exec(t *testing.T, conn *mysql.Conn, query string) *sqltypes.Result { + t.Helper() + qr, err := conn.ExecuteFetch(query, 1000, true) + require.NoError(t, err, "for query: "+query) + return qr +} diff --git a/go/test/endtoend/vtgate/system_schema_test.go b/go/test/endtoend/vtgate/system_schema_test.go index ed1c99b91da..c5b0af8e445 100644 --- a/go/test/endtoend/vtgate/system_schema_test.go +++ b/go/test/endtoend/vtgate/system_schema_test.go @@ -78,6 +78,8 @@ func TestInformationSchemaQuery(t *testing.T) { assertResultIsEmpty(t, conn, "table_schema = 'PERFORMANCE_SCHEMA'") assertSingleRowIsReturned(t, conn, "table_schema = 'performance_schema' and table_name = 'users'", "performance_schema") assertResultIsEmpty(t, conn, "table_schema = 'performance_schema' and table_name = 'foo'") + assertSingleRowIsReturned(t, conn, "table_schema = 'vt_ks' and table_name = 't1'", "vt_ks") + assertSingleRowIsReturned(t, conn, "table_schema = 'ks' and table_name = 't1'", "vt_ks") } func assertResultIsEmpty(t *testing.T, conn *mysql.Conn, pre string) { diff --git a/go/test/endtoend/vtgate/transaction/rollback/txn_rollback_shutdown_test.go b/go/test/endtoend/vtgate/transaction/rollback/txn_rollback_shutdown_test.go index 04ee7760064..9b4640a8272 100644 --- a/go/test/endtoend/vtgate/transaction/rollback/txn_rollback_shutdown_test.go +++ b/go/test/endtoend/vtgate/transaction/rollback/txn_rollback_shutdown_test.go @@ -160,5 +160,5 @@ func TestErrorInAutocommitSession(t *testing.T) { // if we have properly working autocommit code, both the successful inserts should be visible to a second // connection, even if we have not done an explicit commit - assert.Equal(t, `[[INT64(1) VARCHAR("foo")] [INT64(2) VARCHAR("baz")]]`, fmt.Sprintf("%v", result.Rows)) + assert.Equal(t, `[[INT64(1) VARCHAR("foo")] [INT64(2) VARCHAR("baz")] [INT64(3) VARCHAR("mark")] [INT64(4) VARCHAR("doug")]]`, fmt.Sprintf("%v", result.Rows)) } diff --git a/go/test/endtoend/vtgate/unsharded/main_test.go b/go/test/endtoend/vtgate/unsharded/main_test.go index 547b12ef2af..b5209ba3eb8 100644 --- a/go/test/endtoend/vtgate/unsharded/main_test.go +++ b/go/test/endtoend/vtgate/unsharded/main_test.go @@ -436,6 +436,19 @@ func TestNumericPrecisionScale(t *testing.T) { assert.True(t, qr.Rows[0][1].Type() == sqltypes.Uint64 || qr.Rows[0][1].Type() == sqltypes.Uint32) } +func TestDeleteAlias(t *testing.T) { + vtParams := mysql.ConnParams{ + Host: "localhost", + Port: clusterInstance.VtgateMySQLPort, + } + conn, err := mysql.Connect(context.Background(), &vtParams) + require.NoError(t, err) + defer conn.Close() + + exec(t, conn, "delete t1 from t1 where c1 = 1") + exec(t, conn, "delete t.* from t1 t where t.c1 = 1") +} + func exec(t *testing.T, conn *mysql.Conn, query string) *sqltypes.Result { t.Helper() qr, err := conn.ExecuteFetch(query, 1000, true) diff --git a/go/test/endtoend/orchestrator/test_config.json b/go/test/endtoend/vtorc/test_config.json similarity index 100% rename from go/test/endtoend/orchestrator/test_config.json rename to go/test/endtoend/vtorc/test_config.json diff --git a/go/test/endtoend/orchestrator/orc_test.go b/go/test/endtoend/vtorc/vtorc_test.go similarity index 96% rename from go/test/endtoend/orchestrator/orc_test.go rename to go/test/endtoend/vtorc/vtorc_test.go index ca1f89394c3..f6e8690e8d2 100644 --- a/go/test/endtoend/orchestrator/orc_test.go +++ b/go/test/endtoend/vtorc/vtorc_test.go @@ -14,7 +14,7 @@ See the License for the specific language governing permissions and limitations under the License. */ -package orchestrator +package vtorc import ( "context" @@ -204,6 +204,24 @@ func TestDownMaster(t *testing.T) { } } +func waitForReadOnlyValue(t *testing.T, curMaster *cluster.Vttablet, expectValue int64) (match bool) { + timeout := 15 * time.Second + startTime := time.Now() + for time.Since(startTime) < timeout { + qr := runSQL(t, "select @@global.read_only as read_only", curMaster, "") + require.NotNil(t, qr) + row := qr.Named().Row() + require.NotNil(t, row) + readOnly, err := row.ToInt64("read_only") + require.NoError(t, err) + if readOnly == expectValue { + return true + } + time.Sleep(time.Second) + } + return false +} + // 3. make master readonly, let orc repair func TestMasterReadOnly(t *testing.T) { defer cluster.PanicHandler(t) @@ -224,12 +242,8 @@ func TestMasterReadOnly(t *testing.T) { runSQL(t, "set global read_only=ON", curMaster, "") // wait for repair - // TODO(deepthi): wait for condition instead of sleep - time.Sleep(15 * time.Second) - qr := runSQL(t, "select @@global.read_only", curMaster, "") - require.NotNil(t, qr) - require.Equal(t, 1, len(qr.Rows)) - require.Equal(t, "[[INT64(0)]]", fmt.Sprintf("%s", qr.Rows), qr.Rows) + match := waitForReadOnlyValue(t, curMaster, 0) + require.True(t, match) } // 4. make replica ReadWrite, let orc repair @@ -260,12 +274,8 @@ func TestReplicaReadWrite(t *testing.T) { runSQL(t, "set global read_only=OFF", replica, "") // wait for repair - // TODO(deepthi): wait for condition instead of sleep - time.Sleep(15 * time.Second) - qr := runSQL(t, "select @@global.read_only", replica, "") - require.NotNil(t, qr) - require.Equal(t, 1, len(qr.Rows)) - require.Equal(t, "[[INT64(1)]]", fmt.Sprintf("%s", qr.Rows), qr.Rows) + match := waitForReadOnlyValue(t, replica, 1) + require.True(t, match) } // 5. stop replication, let orc repair @@ -456,7 +466,7 @@ func checkMasterTablet(t *testing.T, cluster *cluster.LocalProcessCluster, table //if !streamHealthResponse.GetServing() { // log.Exitf("stream health not updated") //} - assert.True(t, streamHealthResponse.GetServing(), "stream health: %v", streamHealthResponse) + assert.True(t, streamHealthResponse.GetServing(), "stream health: %v", &streamHealthResponse) tabletType := streamHealthResponse.GetTarget().GetTabletType() require.Equal(t, topodatapb.TabletType_MASTER, tabletType) break diff --git a/go/test/fuzzing/fuzzdata/clusterfuzz-testcase-minimized-vtctl_fuzzer-6117897597485056 b/go/test/fuzzing/fuzzdata/clusterfuzz-testcase-minimized-vtctl_fuzzer-6117897597485056 new file mode 100644 index 00000000000..6f539f685c9 --- /dev/null +++ b/go/test/fuzzing/fuzzdata/clusterfuzz-testcase-minimized-vtctl_fuzzer-6117897597485056 @@ -0,0 +1 @@ + v \ No newline at end of file diff --git a/go/test/fuzzing/fuzzdata/clusterfuzz-testcase-vtctl_fuzzer-6117897597485056 b/go/test/fuzzing/fuzzdata/clusterfuzz-testcase-vtctl_fuzzer-6117897597485056 new file mode 100644 index 00000000000..3a03bbce046 Binary files /dev/null and b/go/test/fuzzing/fuzzdata/clusterfuzz-testcase-vtctl_fuzzer-6117897597485056 differ diff --git a/go/test/fuzzing/oss_fuzz_build.sh b/go/test/fuzzing/oss_fuzz_build.sh index 167f6769f8e..c8c7b74ea08 100644 --- a/go/test/fuzzing/oss_fuzz_build.sh +++ b/go/test/fuzzing/oss_fuzz_build.sh @@ -14,15 +14,23 @@ # See the License for the specific language governing permissions and # limitations under the License. -compile_go_fuzzer ./go/test/fuzzing Fuzz vtctl_fuzzer -compile_go_fuzzer ./go/test/fuzzing FuzzIsDML is_dml_fuzzer -compile_go_fuzzer ./go/test/fuzzing FuzzNormalizer normalizer_fuzzer -compile_go_fuzzer ./go/test/fuzzing FuzzParser parser_fuzzer +compile_go_fuzzer vitess.io/vitess/go/test/fuzzing Fuzz vtctl_fuzzer +compile_go_fuzzer vitess.io/vitess/go/test/fuzzing FuzzIsDML is_dml_fuzzer +compile_go_fuzzer vitess.io/vitess/go/test/fuzzing FuzzNormalizer normalizer_fuzzer +compile_go_fuzzer vitess.io/vitess/go/test/fuzzing FuzzParser parser_fuzzer -#cp ./go/test/fuzzing/mysql/mysql_fuzzer.go ./go/mysql/ -compile_go_fuzzer ./go/mysql FuzzWritePacket write_packet_fuzzer -compile_go_fuzzer ./go/mysql FuzzHandleNextCommand handle_next_command_fuzzer -compile_go_fuzzer ./go/mysql FuzzReadQueryResults read_query_results_fuzzer +compile_go_fuzzer vitess.io/vitess/go/mysql FuzzWritePacket write_packet_fuzzer +compile_go_fuzzer vitess.io/vitess/go/mysql FuzzHandleNextCommand handle_next_command_fuzzer +compile_go_fuzzer vitess.io/vitess/go/mysql FuzzReadQueryResults read_query_results_fuzzer +compile_go_fuzzer vitess.io/vitess/go/mysql FuzzTLSServer fuzz_tls +compile_go_fuzzer vitess.io/vitess/go/vt/vtgate/grpcvtgateconn Fuzz grpc_vtgate_fuzzer +compile_go_fuzzer vitess.io/vitess/go/vt/vtgate/planbuilder/abstract FuzzAnalyse planbuilder_fuzzer gofuzz + +mv ./go/vt/vtgate/engine/fake_vcursor_test.go \ + ./go/vt/vtgate/engine/fake_vcursor.go +mv ./go/vt/vtgate/engine/fuzz_flaky_test.go ./go/vt/vtgate/engine/engine_fuzz.go +compile_go_fuzzer vitess.io/vitess/go/vt/vtgate/engine FuzzEngine engine_fuzzer # Build dictionaries cp $SRC/vitess/go/test/fuzzing/vtctl_fuzzer.dict $OUT/ + diff --git a/go/test/fuzzing/parser_fuzzer.go b/go/test/fuzzing/parser_fuzzer.go index a1595fcfef5..135373bc4b2 100644 --- a/go/test/fuzzing/parser_fuzzer.go +++ b/go/test/fuzzing/parser_fuzzer.go @@ -32,9 +32,8 @@ func FuzzNormalizer(data []byte) int { if err != nil { return -1 } - prefix := "bv" bv := make(map[string]*querypb.BindVariable) - sqlparser.Normalize(stmt, reservedVars, bv, prefix) + sqlparser.Normalize(stmt, sqlparser.NewReservedVars("bv", reservedVars), bv) return 1 } diff --git a/go/test/fuzzing/vtctl_fuzzer_test.go b/go/test/fuzzing/vtctl_fuzzer_test.go new file mode 100644 index 00000000000..5eee83f7461 --- /dev/null +++ b/go/test/fuzzing/vtctl_fuzzer_test.go @@ -0,0 +1,47 @@ +/* +Copyright 2021 The Vitess Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package fuzzing + +import ( + "io/ioutil" + "path" + "runtime/debug" + "testing" + + "github.com/stretchr/testify/require" +) + +func TestVtctlFuzzer(t *testing.T) { + directoryName := "fuzzdata" + files, err := ioutil.ReadDir(directoryName) + require.NoError(t, err) + for _, file := range files { + t.Run(file.Name(), func(t *testing.T) { + defer func() { + r := recover() + if r != nil { + t.Error(r) + t.Fatal(string(debug.Stack())) + } + }() + testcase, err := ioutil.ReadFile(path.Join(directoryName, file.Name())) + require.NoError(t, err) + res := Fuzz(testcase) + require.Equal(t, 1, res) + }) + } +} diff --git a/go/test/utils/diff.go b/go/test/utils/diff.go index 293d279e8c4..ebd4f129f22 100644 --- a/go/test/utils/diff.go +++ b/go/test/utils/diff.go @@ -17,8 +17,13 @@ limitations under the License. package utils import ( + "reflect" "testing" + "google.golang.org/protobuf/encoding/prototext" + + "google.golang.org/protobuf/proto" + "github.com/google/go-cmp/cmp" ) @@ -43,11 +48,16 @@ import ( // In Test*() function: // // mustMatch(t, want, got, "something doesn't match") -func MustMatchFn(allowUnexportedTypes []interface{}, ignoredFields []string, extraOpts ...cmp.Option) func(t *testing.T, want, got interface{}, errMsg ...string) { - diffOpts := append([]cmp.Option{ - cmp.AllowUnexported(allowUnexportedTypes...), +func MustMatchFn(ignoredFields ...string) func(t *testing.T, want, got interface{}, errMsg ...string) { + diffOpts := []cmp.Option{ + cmp.Comparer(func(a, b proto.Message) bool { + return proto.Equal(a, b) + }), + cmp.Exporter(func(reflect.Type) bool { + return true + }), cmpIgnoreFields(ignoredFields...), - }, extraOpts...) + } // Diffs want/got and fails with errMsg on any failure. return func(t *testing.T, want, got interface{}, errMsg ...string) { t.Helper() @@ -62,7 +72,7 @@ func MustMatchFn(allowUnexportedTypes []interface{}, ignoredFields []string, ext // Usage in Test*() function: // // testutils.MustMatch(t, want, got, "something doesn't match") -var MustMatch = MustMatchFn(nil, nil) +var MustMatch = MustMatchFn() // Skips fields of pathNames for cmp.Diff. // Similar to standard cmpopts.IgnoreFields, but allows unexported fields. @@ -81,3 +91,14 @@ func cmpIgnoreFields(pathNames ...string) cmp.Option { return false }, cmp.Ignore()) } + +func MustMatchPB(t *testing.T, expected string, pb proto.Message) { + t.Helper() + + expectedPb := pb.ProtoReflect().New().Interface() + if err := prototext.Unmarshal([]byte(expected), expectedPb); err != nil { + t.Fatal(err) + } + + MustMatch(t, expectedPb, pb) +} diff --git a/go/textutil/template.go b/go/textutil/template.go new file mode 100644 index 00000000000..8f9280634ad --- /dev/null +++ b/go/textutil/template.go @@ -0,0 +1,33 @@ +/* +Copyright 2021 The Vitess Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package textutil + +import ( + "strings" + "text/template" +) + +// ExecuteTemplate executes the given text template with the given data, and +// returns the resulting string. +func ExecuteTemplate(tmpl *template.Template, data interface{}) (string, error) { + buf := &strings.Builder{} + if err := tmpl.Execute(buf, data); err != nil { + return "", err + } + + return buf.String(), nil +} diff --git a/go/tools/astfmtgen/main.go b/go/tools/astfmtgen/main.go index 839c8a52cc5..0997e816bf7 100644 --- a/go/tools/astfmtgen/main.go +++ b/go/tools/astfmtgen/main.go @@ -22,23 +22,21 @@ import ( "go/printer" "go/token" "go/types" + "log" "os" "path" "strconv" "strings" + "vitess.io/vitess/go/tools/common" + "golang.org/x/tools/go/ast/astutil" "golang.org/x/tools/go/packages" ) func main() { - err := load(os.Args[1]) - if err != nil { - panic(err) - } -} + packageName := os.Args[1] -func load(packageName string) error { config := &packages.Config{ Mode: packages.NeedName | packages.NeedFiles | @@ -49,19 +47,18 @@ func load(packageName string) error { packages.NeedTypesInfo, } pkgs, err := packages.Load(config, packageName) - if err != nil { - return fmt.Errorf("error loading package %s: %w", packageName, err) + if err != nil || common.PkgFailed(pkgs) { + log.Fatal("error loading packaged") } for _, pkg := range pkgs { if pkg.Name == "sqlparser" { rewriter := &Rewriter{pkg: pkg} err := rewriter.Rewrite() if err != nil { - return err + log.Fatal(err.Error()) } } } - return nil } type Rewriter struct { diff --git a/go/tools/asthelpergen/asthelpergen.go b/go/tools/asthelpergen/asthelpergen.go index c1d7072b7c5..e8786b5514c 100644 --- a/go/tools/asthelpergen/asthelpergen.go +++ b/go/tools/asthelpergen/asthelpergen.go @@ -25,6 +25,8 @@ import ( "path" "strings" + "vitess.io/vitess/go/tools/common" + "github.com/dave/jennifer/jen" "golang.org/x/tools/go/packages" ) @@ -201,7 +203,8 @@ func GenerateASTHelpers(packagePatterns []string, rootIface, exceptCloneType str Mode: packages.NeedName | packages.NeedTypes | packages.NeedTypesSizes | packages.NeedTypesInfo | packages.NeedDeps | packages.NeedImports | packages.NeedModule, }, packagePatterns...) - if err != nil { + if err != nil || common.PkgFailed(loaded) { + log.Fatal("error loading packaged") return nil, err } diff --git a/go/tools/asthelpergen/main/main.go b/go/tools/asthelpergen/main/main.go index 9225aa7615d..9994364a567 100644 --- a/go/tools/asthelpergen/main/main.go +++ b/go/tools/asthelpergen/main/main.go @@ -1,3 +1,19 @@ +/* +Copyright 2021 The Vitess Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + package main import ( diff --git a/go/tools/common/common.go b/go/tools/common/common.go new file mode 100644 index 00000000000..118be03e670 --- /dev/null +++ b/go/tools/common/common.go @@ -0,0 +1,35 @@ +/* +Copyright 2021 The Vitess Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package common + +import ( + "log" + + "golang.org/x/tools/go/packages" +) + +// PkgFailed returns true if any of the packages contain errors +func PkgFailed(loaded []*packages.Package) bool { + failed := false + for _, pkg := range loaded { + for _, e := range pkg.Errors { + log.Println(e.Error()) + failed = true + } + } + return failed +} diff --git a/go/tools/release-notes/release_notes.go b/go/tools/release-notes/release_notes.go new file mode 100644 index 00000000000..f7025e3af8e --- /dev/null +++ b/go/tools/release-notes/release_notes.go @@ -0,0 +1,402 @@ +/* +Copyright 2021 The Vitess Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package main + +import ( + "encoding/json" + "flag" + "fmt" + "log" + "os" + "os/exec" + "regexp" + "sort" + "strings" + "sync" + "text/template" +) + +type ( + label struct { + Name string `json:"name"` + } + + author struct { + Login string `json:"login"` + } + + prInfo struct { + Labels []label `json:"labels"` + Number int `json:"number"` + Title string `json:"title"` + Author author `json:"author"` + } + + prsByComponent = map[string][]prInfo + + prsByType = map[string]prsByComponent + + sortedPRComponent struct { + Name string + PrInfos []prInfo + } + + sortedPRType struct { + Name string + Components []sortedPRComponent + } +) + +const ( + markdownTemplate = ` +{{- range $type := . }} +## {{ $type.Name }} +{{- range $component := $type.Components }} +### {{ $component.Name }} +{{- range $prInfo := $component.PrInfos }} + * {{ $prInfo.Title }} #{{ $prInfo.Number }} +{{- end }} +{{- end }} +{{- end }} +` + + prefixType = "Type: " + prefixComponent = "Component: " + numberOfThreads = 10 + lengthOfSingleSHA = 40 +) + +func loadMergedPRs(from, to string) (prs []string, authors []string, commitCount int, err error) { + // load the git log with "author \t title \t parents" + out, err := execCmd("git", "log", `--pretty=format:%ae%x09%s%x09%P%x09%h`, fmt.Sprintf("%s..%s", from, to)) + + if err != nil { + return + } + + return parseGitLog(string(out)) +} + +func parseGitLog(s string) (prs []string, authorCommits []string, commitCount int, err error) { + rx := regexp.MustCompile(`(.+)\t(.+)\t(.+)\t(.+)`) + mergePR := regexp.MustCompile(`Merge pull request #(\d+)`) + squashPR := regexp.MustCompile(`\(#(\d+)\)`) + authMap := map[string]string{} // here we will store email <-> gh user mappings + lines := strings.Split(s, "\n") + for _, line := range lines { + lineInfo := rx.FindStringSubmatch(line) + if len(lineInfo) != 5 { + log.Fatalf("failed to parse the output from git log: %s", line) + } + authorEmail := lineInfo[1] + title := lineInfo[2] + parents := lineInfo[3] + sha := lineInfo[4] + merged := mergePR.FindStringSubmatch(title) + if len(merged) == 2 { + // this is a merged PR. remember the PR # + prs = append(prs, merged[1]) + continue + } + + if len(parents) <= lengthOfSingleSHA { + // we have a single parent, and the commit counts + commitCount++ + if _, exists := authMap[authorEmail]; !exists { + authMap[authorEmail] = sha + } + } + + squashed := squashPR.FindStringSubmatch(title) + if len(squashed) == 2 { + // this is a merged PR. remember the PR # + prs = append(prs, squashed[1]) + continue + } + } + + for _, author := range authMap { + authorCommits = append(authorCommits, author) + } + + sort.Strings(prs) + sort.Strings(authorCommits) // not really needed, but makes testing easier + + return +} + +func execCmd(name string, arg ...string) ([]byte, error) { + out, err := exec.Command(name, arg...).Output() + if err != nil { + execErr, ok := err.(*exec.ExitError) + if ok { + return nil, fmt.Errorf("%s:\nstderr: %s\nstdout: %s", err.Error(), execErr.Stderr, out) + } + if strings.Contains(err.Error(), " executable file not found in") { + return nil, fmt.Errorf("the command `gh` seems to be missing. Please install it from https://github.com/cli/cli") + } + return nil, err + } + return out, nil +} + +func loadPRInfo(pr string) (prInfo, error) { + out, err := execCmd("gh", "pr", "view", pr, "--json", "title,number,labels,author") + if err != nil { + return prInfo{}, err + } + var prInfo prInfo + err = json.Unmarshal(out, &prInfo) + return prInfo, err +} + +func loadAuthorInfo(sha string) (string, error) { + out, err := execCmd("gh", "api", "/repos/vitessio/vitess/commits/"+sha) + if err != nil { + return "", err + } + var prInfo prInfo + err = json.Unmarshal(out, &prInfo) + if err != nil { + return "", err + } + return prInfo.Author.Login, nil +} + +type req struct { + isPR bool + key string +} + +func loadAllPRs(prs, authorCommits []string) ([]prInfo, []string, error) { + errChan := make(chan error) + wgDone := make(chan bool) + prChan := make(chan req, len(prs)+len(authorCommits)) + // fill the work queue + for _, s := range prs { + prChan <- req{isPR: true, key: s} + } + for _, s := range authorCommits { + prChan <- req{isPR: false, key: s} + } + close(prChan) + + var prInfos []prInfo + var authors []string + fmt.Printf("Found %d merged PRs. Loading PR info", len(prs)) + wg := sync.WaitGroup{} + mu := sync.Mutex{} + + shouldLoad := func(in string) bool { + if in == "" { + return false + } + mu.Lock() + defer mu.Unlock() + + for _, existing := range authors { + if existing == in { + return false + } + } + return true + } + addAuthor := func(in string) { + mu.Lock() + defer mu.Unlock() + authors = append(authors, in) + } + addPR := func(in prInfo) { + mu.Lock() + defer mu.Unlock() + prInfos = append(prInfos, in) + } + + for i := 0; i < numberOfThreads; i++ { + wg.Add(1) + go func() { + // load meta data about PRs + defer wg.Done() + + for b := range prChan { + fmt.Print(".") + + if b.isPR { + prInfo, err := loadPRInfo(b.key) + if err != nil { + errChan <- err + break + } + addPR(prInfo) + continue + } + author, err := loadAuthorInfo(b.key) + if err != nil { + errChan <- err + break + } + if shouldLoad(author) { + addAuthor(author) + } + + } + }() + } + + go func() { + // wait for the loading to finish + wg.Wait() + close(wgDone) + }() + + var err error + select { + case <-wgDone: + break + case err = <-errChan: + break + } + + fmt.Println() + + sort.Strings(authors) + + return prInfos, authors, err +} + +func groupPRs(prInfos []prInfo) prsByType { + prPerType := prsByType{} + + for _, info := range prInfos { + var typ, component string + for _, lbl := range info.Labels { + switch { + case strings.HasPrefix(lbl.Name, prefixType): + typ = strings.TrimPrefix(lbl.Name, prefixType) + case strings.HasPrefix(lbl.Name, prefixComponent): + component = strings.TrimPrefix(lbl.Name, prefixComponent) + } + } + switch typ { + case "": + typ = "Other" + case "Bug": + typ = "Bug fixes" + } + + if component == "" { + component = "Other" + } + components, exists := prPerType[typ] + if !exists { + components = prsByComponent{} + prPerType[typ] = components + } + + prsPerComponentAndType := components[component] + components[component] = append(prsPerComponentAndType, info) + } + return prPerType +} + +func createSortedPrTypeSlice(prPerType prsByType) []sortedPRType { + var data []sortedPRType + for typeKey, typeElem := range prPerType { + newPrType := sortedPRType{ + Name: typeKey, + } + for componentKey, prInfos := range typeElem { + newComponent := sortedPRComponent{ + Name: componentKey, + PrInfos: prInfos, + } + sort.Slice(newComponent.PrInfos, func(i, j int) bool { + return newComponent.PrInfos[i].Number < newComponent.PrInfos[j].Number + }) + newPrType.Components = append(newPrType.Components, newComponent) + } + sort.Slice(newPrType.Components, func(i, j int) bool { + return newPrType.Components[i].Name < newPrType.Components[j].Name + }) + data = append(data, newPrType) + } + sort.Slice(data, func(i, j int) bool { + return data[i].Name < data[j].Name + }) + return data +} + +func getOutput(fileout string) (*os.File, error) { + if fileout == "" { + return os.Stdout, nil + } + + return os.OpenFile(fileout, os.O_RDWR|os.O_CREATE|os.O_TRUNC, 0666) +} + +func writePrInfos(writeTo *os.File, prPerType prsByType) (err error) { + data := createSortedPrTypeSlice(prPerType) + + t := template.Must(template.New("markdownTemplate").Parse(markdownTemplate)) + err = t.ExecuteTemplate(writeTo, "markdownTemplate", data) + if err != nil { + return err + } + return nil +} + +func main() { + from := flag.String("from", "", "from sha/tag/branch") + to := flag.String("to", "HEAD", "to sha/tag/branch") + fileout := flag.String("file", "", "file on which to write release notes, stdout if empty") + + flag.Parse() + + prs, authorCommits, commits, err := loadMergedPRs(*from, *to) + if err != nil { + log.Fatal(err) + } + + prInfos, authors, err := loadAllPRs(prs, authorCommits) + if err != nil { + log.Fatal(err) + } + + prPerType := groupPRs(prInfos) + out, err := getOutput(*fileout) + if err != nil { + log.Fatal(err) + } + defer func() { + _ = out.Close() + }() + + err = writePrInfos(out, prPerType) + if err != nil { + log.Fatal(err) + } + + _, err = out.WriteString(fmt.Sprintf("\n\nThe release includes %d commits (excluding merges)\n", commits)) + if err != nil { + log.Fatal(err) + } + + _, err = out.WriteString(fmt.Sprintf("Thanks to all our contributors: @%s\n", strings.Join(authors, ", @"))) + if err != nil { + log.Fatal(err) + } +} diff --git a/go/tools/release-notes/release_notes_test.go b/go/tools/release-notes/release_notes_test.go new file mode 100644 index 00000000000..b4d358bba46 --- /dev/null +++ b/go/tools/release-notes/release_notes_test.go @@ -0,0 +1,106 @@ +/* +Copyright 2021 The Vitess Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package main + +import ( + "testing" + + "github.com/stretchr/testify/assert" + "github.com/stretchr/testify/require" + + "vitess.io/vitess/go/test/utils" +) + +func Test_groupPRs(t *testing.T) { + tests := []struct { + name string + prInfos []prInfo + want map[string]map[string][]prInfo + }{ + { + name: "Single PR info with no labels", + prInfos: []prInfo{{Title: "pr 1", Number: 1}}, + want: map[string]map[string][]prInfo{"Other": {"Other": []prInfo{{Title: "pr 1", Number: 1}}}}, + }, { + name: "Single PR info with type label", + prInfos: []prInfo{{Title: "pr 1", Number: 1, Labels: []label{{Name: prefixType + "Bug"}}}}, + want: map[string]map[string][]prInfo{"Bug fixes": {"Other": []prInfo{{Title: "pr 1", Number: 1, Labels: []label{{Name: prefixType + "Bug"}}}}}}}, + { + name: "Single PR info with type and component labels", + prInfos: []prInfo{{Title: "pr 1", Number: 1, Labels: []label{{Name: prefixType + "Bug"}, {Name: prefixComponent + "VTGate"}}}}, + want: map[string]map[string][]prInfo{"Bug fixes": {"VTGate": []prInfo{{Title: "pr 1", Number: 1, Labels: []label{{Name: prefixType + "Bug"}, {Name: prefixComponent + "VTGate"}}}}}}}, + { + name: "Multiple PR infos with type and component labels", prInfos: []prInfo{ + {Title: "pr 1", Number: 1, Labels: []label{{Name: prefixType + "Bug"}, {Name: prefixComponent + "VTGate"}}}, + {Title: "pr 2", Number: 2, Labels: []label{{Name: prefixType + "Feature"}, {Name: prefixComponent + "VTTablet"}}}}, + want: map[string]map[string][]prInfo{"Bug fixes": {"VTGate": []prInfo{{Title: "pr 1", Number: 1, Labels: []label{{Name: prefixType + "Bug"}, {Name: prefixComponent + "VTGate"}}}}}, "Feature": {"VTTablet": []prInfo{{Title: "pr 2", Number: 2, Labels: []label{{Name: prefixType + "Feature"}, {Name: prefixComponent + "VTTablet"}}}}}}}, + } + for _, tt := range tests { + t.Run(tt.name, func(t *testing.T) { + got := groupPRs(tt.prInfos) + utils.MustMatch(t, tt.want, got) + }) + } +} + +func TestParseGitLogOutput(t *testing.T) { + in := `harshTEST@planetscale.com Merge pull request #7968 from planetscale/bump_java_snapshot_v11 7e8ebbb5b79b65d2d45fd6c838efb51bdafc7c0b 195a09df191d3e86a32ebcc7a1f1dde168fe819e 168fe819e +deeptTEST@planetscale.com Merge pull request #7970 from planetscale/vttestserver-default-charset 887be6914690b6d106aba001c72deea80a4d8dab ff8c750eda4b30787e772547a451ed1f50931150 f50931150 +deeptTEST@planetscale.com Merge pull request #7943 from planetscale/fix-mysql80-container-image 01fb7e55ab92df7c3f300b85976fdf3fd5bd35b3 3cc94a10752014c9ce311d88af9e1aa18e7fa2d8 18e7fa2d8 +57520317+rohit-nayak-TEST@users.noreply.github.com Merge pull request #7831 from planetscale/rn-vr-log2 37c09d3be83922a8ef936fbc028a5031f96b7dbf f57350c3ea1720496e5f1cec35d58f069e4df515 69e4df515 +TEST@planetscale.com docker/vttestserver/run.sh: Add $CHARSET environment variable 482a7008117ee3215663aeb33cad981e5242a88a e5242a88a +rohTEST@planetscale.com Add ability to select from vreplication_log in VReplicationExec 427cac89cd6b143d3a1928ee682b3a9538709da5 538709da5 +rohTEST@planetscale.com Use withDDL for vreplication log queries 4a1ab946e3628ba8ef610ea4a158186a5fdd17ba a5fdd17ba +rohTEST@planetscale.com Add license file. Minor refactor fa9de690ce0d27a781befbc1866aca5cd447798f cd447798f +rohTEST@planetscale.com Added comments and refactored tests b6d39acb08939ba56e9e9587f34f3b8bcdcdc504 bcdcdc504 +rohTEST@planetscale.com Add logs for start and end of the copy phase 1cf72866ddfbd554700d6c9e32b9835ebb3b444c ebb3b444c +rohTEST@planetscale.com Fix test 0992d39c6d473b548679d012cfa5a889ffa448ef 9ffa448ef +rohTEST@planetscale.com Add test for vreplication log and fix string conversion bug b616143b14b75e7c23042c2eef4f6b27a275b0f7 7a275b0f7 +rohTEST@planetscale.com Ignore queries related to _vt.vreplication_log in tests e6926932c14da9a2213be246bc2de5f011668551 011668551 +rohTEST@planetscale.com Create log table. Util functions to insert logs. Insert logs in VReplicationExec and setMessage/State 37c09d3be83922a8ef936fbc028a5031f96b7dbf 1f96b7dbf +harshTEST@planetscale.com Merge pull request #7951 from vmg/vmg/vr-client-perf 7794c62651066970e1176181cb7000d385d0b327 172fac7dec8b11937a4efb26ebf4bedf1771f189 f1771f189 +alkin.tezuysTEST@gmail.com java: Bump SNAPSHOT version to 11.0.0-SNAPSHOT after Vitess release v10 7794c62651066970e1176181cb7000d385d0b327 385d0b327 +alkin.tezuysTEST@gmail.com Merge pull request #7964 from planetscale/10_0_RC1_release_notes 31d84d6ce8e233a053794ad0ffe5168d34d04450 b020dc71f5c7dc663d814563f1b6c97340f4411f 340f4411f +vTEST@strn.cat vstreamer: fix docs e7bf329da0029414c3b18e18e5cb2226b9a731a2 6b9a731a2 +amasTEST@slack-corp.com [workflow] extract migration targets from wrangler (#7934) 8bd5a7cb093369b50a0926bfa3a112b3b744e782 3b744e782 +alkin.tezuysTEST@gmail.com More spacing issues fixed 7509d47ba785e7a39b8726dc80f93955953ab98d 5953ab98d +alkin.tezuysTEST@gmail.com Minor spacing fixes d31362e76ac69fb2bc4083e22e7c87683099fecd 83099fecd +alkin.tezuysTEST@gmail.com Update 10_0_0_release_notes.md a7034bdf5d454a47738335ed2afc75f72bdbcf37 72bdbcf37 +alkin.tezuysTEST@gmail.com v10 GA Release Notes ad37320b2637620ee36d44d163399ecc2c1eea6c c2c1eea6c +andrTEST@planetscale.com Merge pull request #7912 from planetscale/show-databases-like 7e13d4bccca0325ca07a488334e77c4f2f964f6b 95eceb17d10c62d56f2e94e5478afb5a1b63e1c2 a1b63e1c2 +andrTEST@planetscale.com Merge pull request #7629 from planetscale/gen4-table-aliases 2e1b1e9322a6bfcfe792cca341b0d52860d3c66e 7ad14e3f3d26cb1780cdbf9c22029740e5aebde4 0e5aebde4 +andrTEST@planetscale.com Merge remote-tracking branch 'upstream/master' into show-databases-like 6b3ee1c31a939fc6628515f00087baa3e1e8acf7 2e1b1e9322a6bfcfe792cca341b0d52860d3c66e 860d3c66e +2607934+shlomi-noaTEST@users.noreply.github.com Merge pull request #7959 from Hellcatlk/master 6c826115937d28ef83f05a1f0d54db0fcb814db4 cdab3040aaaa11c51e291d6b1a7af6fadd83dedf add83dedf +zouy.fnTEST@cn.fujitsu.com Fix a gofmt warning 08038850a258d6de250cf9d864d6118616f5562c 616f5562c +vTEST@strn.cat mysql: allow reusing row storage when reading from a stream a2850bbf41100618cb1192067b16585ba7c6b0c7 ba7c6b0c7 +vTEST@strn.cat throttle: do not check for time constantly e0b90daebe9e6b98d969934a24899b41d25e3a68 1d25e3a68 +andrTEST@planetscale.com fix compilation error 18036f5fb5f58523dbf50726beb741cedac2baf8 edac2baf8 +andrTEST@planetscale.com better code comment c173c945cf0e75e8649e6fa621509b5fb4ebd6c9 fb4ebd6c9 +vTEST@strn.cat conn: do not let header escape to the heap d31fb23d8cb9463810ed9fc132df4060a6812f6e 0a6812f6e +vTEST@strn.cat vstreamer: do not allocate when filtering rows dafc1cb729d7be7dff2c05bd05a926005eb9a044 05eb9a044 +vTEST@strn.cat vstreamer: do not allocate when converting rows c5cd3067aeb9d952a2f45084c37634267e4f9062 67e4f9062 +andrTEST@planetscale.com Merge remote-tracking branch 'upstream/master' into gen4-table-aliases 8c01827ed8b748240f213d9476ee162306ab01eb b1f9000ddd166d49adda6581e7ca9e0aca10c252 aca10c252 +aquarapTEST@gmail.com Fix mysql80 docker build with dep. a28591577b8d432b9c5d78abf59ad494a0a943b0 4a0a943b0 +TEST@planetscale.com Revert "docker/lite/install_dependencies.sh: Upgrade MySQL 8 to 8.0.24" 7858ff46545cff749b3663c92ae90ef27a5dfbc2 27a5dfbc2 +TEST@planetscale.com docker/lite/install_dependencies.sh: Upgrade MySQL 8 to 8.0.24 c91d46782933292941a846fef2590ff1a6fa193f a6fa193f` + + prs, authorCommits, nonMergeCommits, err := parseGitLog(in) + require.NoError(t, err) + assert.Equal(t, prs, []string{"7629", "7831", "7912", "7934", "7943", "7951", "7959", "7964", "7968", "7970"}) + assert.Equal(t, authorCommits, []string{"385d0b327", "3b744e782", "4a0a943b0", "538709da5", "616f5562c", "6b9a731a2", "e5242a88a", "edac2baf8"}) + assert.Equal(t, 28, nonMergeCommits) +} diff --git a/go/tools/sizegen/sizegen.go b/go/tools/sizegen/sizegen.go index c108f44fa39..44824fbcc8d 100644 --- a/go/tools/sizegen/sizegen.go +++ b/go/tools/sizegen/sizegen.go @@ -27,6 +27,8 @@ import ( "sort" "strings" + "vitess.io/vitess/go/tools/common" + "github.com/dave/jennifer/jen" "golang.org/x/tools/go/packages" ) @@ -507,6 +509,10 @@ func GenerateSizeHelpers(packagePatterns []string, typePatterns []string) (map[s return nil, err } + if common.PkgFailed(loaded) { + return nil, fmt.Errorf("failed to load packages") + } + sizegen := newSizegen(loaded[0].Module, loaded[0].TypesSizes) scopes := make(map[string]*types.Scope) diff --git a/go/trace/logger.go b/go/trace/logger.go new file mode 100644 index 00000000000..158fab3c8b8 --- /dev/null +++ b/go/trace/logger.go @@ -0,0 +1,32 @@ +/* +Copyright 2021 The Vitess Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package trace + +import "vitess.io/vitess/go/vt/log" + +// traceLogger wraps the standard vitess log package to satisfy the datadog and +// jaeger logger interfaces. +type traceLogger struct{} + +// Log is part of the ddtrace.Logger interface. Datadog only ever logs errors. +func (*traceLogger) Log(msg string) { log.Errorf(msg) } + +// Error is part of the jaeger.Logger interface. +func (*traceLogger) Error(msg string) { log.Errorf(msg) } + +// Infof is part of the jaeger.Logger interface. +func (*traceLogger) Infof(msg string, args ...interface{}) { log.Infof(msg, args...) } diff --git a/go/trace/plugin_datadog.go b/go/trace/plugin_datadog.go index 87809d9bc50..f222af1cbeb 100644 --- a/go/trace/plugin_datadog.go +++ b/go/trace/plugin_datadog.go @@ -20,12 +20,18 @@ func newDatadogTracer(serviceName string) (tracingService, io.Closer, error) { return nil, nil, fmt.Errorf("need host and port to datadog agent to use datadog tracing") } - t := opentracer.New( - ddtracer.WithAgentAddr(*dataDogHost+":"+*dataDogPort), + opts := []ddtracer.StartOption{ + ddtracer.WithAgentAddr(*dataDogHost + ":" + *dataDogPort), ddtracer.WithServiceName(serviceName), ddtracer.WithDebugMode(true), - ddtracer.WithSampler(ddtracer.NewRateSampler(*samplingRate)), - ) + ddtracer.WithSampler(ddtracer.NewRateSampler(samplingRate.Get())), + } + + if *enableLogging { + opts = append(opts, ddtracer.WithLogger(&traceLogger{})) + } + + t := opentracer.New(opts...) opentracing.SetGlobalTracer(t) diff --git a/go/trace/plugin_jaeger.go b/go/trace/plugin_jaeger.go index 587f3ac5233..38781ed8d5b 100644 --- a/go/trace/plugin_jaeger.go +++ b/go/trace/plugin_jaeger.go @@ -19,11 +19,13 @@ package trace import ( "flag" "io" + "os" "github.com/opentracing/opentracing-go" "github.com/uber/jaeger-client-go" "github.com/uber/jaeger-client-go/config" + "vitess.io/vitess/go/flagutil" "vitess.io/vitess/go/vt/log" ) @@ -35,9 +37,15 @@ included but nothing Jaeger specific. var ( agentHost = flag.String("jaeger-agent-host", "", "host and port to send spans to. if empty, no tracing will be done") - samplingRate = flag.Float64("tracing-sampling-rate", 0.1, "sampling rate for the probabilistic jaeger sampler") + samplingType = flagutil.NewOptionalString("const") + samplingRate = flagutil.NewOptionalFloat64(0.1) ) +func init() { + flag.Var(samplingType, "tracing-sampling-type", "sampling strategy to use for jaeger. possible values are 'const', 'probabilistic', 'rateLimiting', or 'remote'") + flag.Var(samplingRate, "tracing-sampling-rate", "sampling rate for the probabilistic jaeger sampler") +} + // newJagerTracerFromEnv will instantiate a tracingService implemented by Jaeger, // taking configuration from environment variables. Available properties are: // JAEGER_SERVICE_NAME -- If this is set, the service name used in code will be ignored and this value used instead @@ -70,13 +78,34 @@ func newJagerTracerFromEnv(serviceName string) (tracingService, io.Closer, error cfg.Reporter.LocalAgentHostPort = *agentHost } log.Infof("Tracing to: %v as %v", cfg.Reporter.LocalAgentHostPort, cfg.ServiceName) - cfg.Sampler = &config.SamplerConfig{ - Type: jaeger.SamplerTypeConst, - Param: *samplingRate, + + if os.Getenv("JAEGER_SAMPLER_PARAM") == "" { + // If the environment variable was not set, we take the flag regardless + // of whether it was explicitly set on the command line. + cfg.Sampler.Param = samplingRate.Get() + } else if samplingRate.IsSet() { + // If the environment variable was set, but the user also explicitly + // passed the command line flag, the flag takes precedence. + cfg.Sampler.Param = samplingRate.Get() + } + + if samplingType.IsSet() { + cfg.Sampler.Type = samplingType.Get() + } else if cfg.Sampler.Type == "" { + log.Infof("-tracing-sampler-type was not set, and JAEGER_SAMPLER_TYPE was not set, defaulting to const sampler") + cfg.Sampler.Type = jaeger.SamplerTypeConst + } + + log.Infof("Tracing sampler type %v (param: %v)", cfg.Sampler.Type, cfg.Sampler.Param) + + var opts []config.Option + if *enableLogging { + opts = append(opts, config.Logger(&traceLogger{})) + } else if cfg.Reporter.LogSpans { + log.Warningf("JAEGER_REPORTER_LOG_SPANS was set, but -tracing-enable-logging was not; spans will not be logged") } - log.Infof("Tracing sampling rate: %v", *samplingRate) - tracer, closer, err := cfg.NewTracer() + tracer, closer, err := cfg.NewTracer(opts...) if err != nil { return nil, &nilCloser{}, err diff --git a/go/trace/trace.go b/go/trace/trace.go index 0038051468a..181d3964e57 100644 --- a/go/trace/trace.go +++ b/go/trace/trace.go @@ -133,6 +133,7 @@ var currentTracer tracingService = noopTracingServer{} var ( tracingServer = flag.String("tracer", "noop", "tracing service to use") + enableLogging = flag.Bool("tracing-enable-logging", false, "whether to enable logging in the tracing service") ) // StartTracing enables tracing for a named service diff --git a/go/vt/automation/cluster_operation_instance.go b/go/vt/automation/cluster_operation_instance.go index 4cdfb63e620..e7ff59d283e 100644 --- a/go/vt/automation/cluster_operation_instance.go +++ b/go/vt/automation/cluster_operation_instance.go @@ -17,7 +17,7 @@ limitations under the License. package automation import ( - "github.com/golang/protobuf/proto" + "google.golang.org/protobuf/proto" automationpb "vitess.io/vitess/go/vt/proto/automation" ) @@ -26,14 +26,14 @@ import ( // Unlike the protobuf message, the additional runtime data will not be part of a checkpoint. // Methods of this struct are not thread-safe. type ClusterOperationInstance struct { - automationpb.ClusterOperation + *automationpb.ClusterOperation taskIDGenerator *IDGenerator } // NewClusterOperationInstance creates a new cluster operation instance with one initial task. func NewClusterOperationInstance(clusterOpID string, initialTask *automationpb.TaskContainer, taskIDGenerator *IDGenerator) ClusterOperationInstance { c := ClusterOperationInstance{ - automationpb.ClusterOperation{ + &automationpb.ClusterOperation{ Id: clusterOpID, SerialTasks: []*automationpb.TaskContainer{}, State: automationpb.ClusterOperationState_CLUSTER_OPERATION_NOT_STARTED, @@ -59,6 +59,6 @@ func (c *ClusterOperationInstance) InsertTaskContainers(newTaskContainers []*aut // Other elements e.g. taskIDGenerator are not deep-copied. func (c ClusterOperationInstance) Clone() ClusterOperationInstance { var clone = c - clone.ClusterOperation = *(proto.Clone(&c.ClusterOperation).(*automationpb.ClusterOperation)) + clone.ClusterOperation = proto.Clone(c.ClusterOperation).(*automationpb.ClusterOperation) return clone } diff --git a/go/vt/automation/horizontal_resharding_task_test.go b/go/vt/automation/horizontal_resharding_task_test.go index 589b4811624..6a8eb44401f 100644 --- a/go/vt/automation/horizontal_resharding_task_test.go +++ b/go/vt/automation/horizontal_resharding_task_test.go @@ -19,7 +19,7 @@ package automation import ( "testing" - "github.com/golang/protobuf/proto" + "google.golang.org/protobuf/encoding/prototext" ) func TestHorizontalReshardingTaskEmittedTasks(t *testing.T) { @@ -46,6 +46,6 @@ func TestHorizontalReshardingTaskEmittedTasks(t *testing.T) { // TODO(mberlin): Check emitted tasks against expected output. for _, tc := range newTaskContainers { - t.Logf("new tasks: %v", proto.MarshalTextString(tc)) + t.Logf("new tasks: %v", prototext.Format(tc)) } } diff --git a/go/vt/automation/scheduler.go b/go/vt/automation/scheduler.go index c2267d72e38..345f35ec341 100644 --- a/go/vt/automation/scheduler.go +++ b/go/vt/automation/scheduler.go @@ -26,6 +26,8 @@ import ( "fmt" "sync" + "vitess.io/vitess/go/vt/proto/automationservice" + "context" "vitess.io/vitess/go/vt/log" @@ -46,6 +48,8 @@ type taskCreator func(string) Task // Scheduler executes automation tasks and maintains the execution state. type Scheduler struct { + automationservice.UnimplementedAutomationServer + idGenerator IDGenerator mu sync.Mutex @@ -370,7 +374,7 @@ func (s *Scheduler) GetClusterOperationDetails(ctx context.Context, req *automat return nil, err } return &automationpb.GetClusterOperationDetailsResponse{ - ClusterOp: &clusterOp.ClusterOperation, + ClusterOp: clusterOp.ClusterOperation, }, nil } diff --git a/go/vt/automation/scheduler_test.go b/go/vt/automation/scheduler_test.go index 6808748ebb9..d8cc881a6bd 100644 --- a/go/vt/automation/scheduler_test.go +++ b/go/vt/automation/scheduler_test.go @@ -21,9 +21,9 @@ import ( "testing" "time" - context "context" + "google.golang.org/protobuf/encoding/prototext" - "github.com/golang/protobuf/proto" + context "context" automationpb "vitess.io/vitess/go/vt/proto/automation" ) @@ -90,7 +90,8 @@ func waitForClusterOperation(t *testing.T, scheduler *Scheduler, id string, expe } if expectedOutputLastTask != "" { if got := lastTc.ParallelTasks[len(lastTc.ParallelTasks)-1].Output; !strings.Contains(got, expectedOutputLastTask) { - t.Fatalf("ClusterOperation finished but did not contain expected output. got: %v want: %v Full ClusterOperation details: %v", got, expectedOutputLastTask, proto.MarshalTextString(getDetailsResponse.ClusterOp)) + protoText, _ := prototext.Marshal(getDetailsResponse.ClusterOp) + t.Fatalf("ClusterOperation finished but did not contain expected output. got: %v want: %v Full ClusterOperation details: %s", got, expectedOutputLastTask, protoText) } } if expectedErrorLastTask != "" { @@ -262,6 +263,7 @@ func TestTaskEmitsTaskWhichCannotBeInstantiated(t *testing.T) { details := waitForClusterOperation(t, scheduler, enqueueResponse.Id, "emitted TestingEchoTask", "no implementation found for: TestingEchoTask") if len(details.SerialTasks) != 1 { - t.Errorf("A task has been emitted, but it shouldn't. Details:\n%v", proto.MarshalTextString(details)) + protoText, _ := prototext.Marshal(details) + t.Errorf("A task has been emitted, but it shouldn't. Details:\n%s", protoText) } } diff --git a/go/vt/binlog/binlog_connection.go b/go/vt/binlog/binlog_connection.go index 09d5984c921..ded68cfb27f 100644 --- a/go/vt/binlog/binlog_connection.go +++ b/go/vt/binlog/binlog_connection.go @@ -100,20 +100,21 @@ func connectForReplication(cp dbconfigs.Connector) (*mysql.Conn, error) { func (bc *BinlogConnection) StartBinlogDumpFromCurrent(ctx context.Context) (mysql.Position, <-chan mysql.BinlogEvent, error) { ctx, bc.cancel = context.WithCancel(ctx) - masterPosition, err := bc.Conn.MasterPosition() + position, err := bc.Conn.PrimaryPosition() if err != nil { - return mysql.Position{}, nil, fmt.Errorf("failed to get master position: %v", err) + return mysql.Position{}, nil, fmt.Errorf("failed to get primary position: %v", err) } - c, err := bc.StartBinlogDumpFromPosition(ctx, masterPosition) - return masterPosition, c, err + c, err := bc.StartBinlogDumpFromPosition(ctx, position) + return position, c, err } // StartBinlogDumpFromPosition requests a replication binlog dump from -// the master mysqld at the given Position and then sends binlog +// the replication source mysqld (typically the primary server in the cluster) +// at the given Position and then sends binlog // events to the provided channel. // The stream will continue in the background, waiting for new events if -// necessary, until the connection is closed, either by the master or +// necessary, until the connection is closed, either by the replication source or // by canceling the context. // // Note the context is valid and used until eventChan is closed. @@ -166,7 +167,7 @@ func (bc *BinlogConnection) streamEvents(ctx context.Context) chan mysql.BinlogE } // StartBinlogDumpFromBinlogBeforeTimestamp requests a replication -// binlog dump from the master mysqld starting with a file that has +// binlog dump from the source mysqld starting with a file that has // timestamps smaller than the provided timestamp, and then sends // binlog events to the provided channel. // @@ -189,7 +190,7 @@ func (bc *BinlogConnection) streamEvents(ctx context.Context) chan mysql.BinlogE // given range. // // The stream will continue in the background, waiting for new events if -// necessary, until the connection is closed, either by the master or +// necessary, until the connection is closed, either by the source or // by canceling the context. // // Note the context is valid and used until eventChan is closed. diff --git a/go/vt/binlog/binlog_streamer.go b/go/vt/binlog/binlog_streamer.go index aebd9d05a3e..986b1bfdf3e 100644 --- a/go/vt/binlog/binlog_streamer.go +++ b/go/vt/binlog/binlog_streamer.go @@ -22,9 +22,9 @@ import ( "io" "strings" - "context" + "google.golang.org/protobuf/proto" - "github.com/golang/protobuf/proto" + "context" "vitess.io/vitess/go/mysql" "vitess.io/vitess/go/sqltypes" @@ -200,7 +200,7 @@ func (bls *Streamer) Stream(ctx context.Context) (err error) { if err != nil { return fmt.Errorf("can't get charset to check binlog stream: %v", err) } - log.Infof("binlog stream client charset = %v, server charset = %v", *bls.clientCharset, cs) + log.Infof("binlog stream client charset = %v, server charset = %v", bls.clientCharset, cs) if !proto.Equal(cs, bls.clientCharset) { return fmt.Errorf("binlog stream client charset (%v) doesn't match server (%v)", bls.clientCharset, cs) } diff --git a/go/vt/binlog/binlog_streamer_test.go b/go/vt/binlog/binlog_streamer_test.go index df518582084..3da7e52c25a 100644 --- a/go/vt/binlog/binlog_streamer_test.go +++ b/go/vt/binlog/binlog_streamer_test.go @@ -23,9 +23,9 @@ import ( "testing" "time" - "context" + "google.golang.org/protobuf/proto" - "github.com/golang/protobuf/proto" + "context" "vitess.io/vitess/go/mysql" @@ -40,7 +40,7 @@ type fullBinlogTransaction struct { statements []FullBinlogStatement } -type binlogStatements []binlogdatapb.BinlogTransaction +type binlogStatements []*binlogdatapb.BinlogTransaction func (bs *binlogStatements) sendTransaction(eventToken *querypb.EventToken, statements []FullBinlogStatement) error { var s []*binlogdatapb.BinlogTransaction_Statement @@ -50,19 +50,19 @@ func (bs *binlogStatements) sendTransaction(eventToken *querypb.EventToken, stat s[i] = statement.Statement } } - *bs = append(*bs, binlogdatapb.BinlogTransaction{ + *bs = append(*bs, &binlogdatapb.BinlogTransaction{ Statements: s, EventToken: eventToken, }) return nil } -func (bs *binlogStatements) equal(bts []binlogdatapb.BinlogTransaction) bool { +func (bs *binlogStatements) equal(bts []*binlogdatapb.BinlogTransaction) bool { if len(*bs) != len(bts) { return false } for i, s := range *bs { - if !proto.Equal(&s, &bts[i]) { + if !proto.Equal(s, bts[i]) { return false } } @@ -96,7 +96,7 @@ func TestStreamerParseEventsXID(t *testing.T) { events := make(chan mysql.BinlogEvent) - want := []binlogdatapb.BinlogTransaction{ + want := []*binlogdatapb.BinlogTransaction{ { Statements: []*binlogdatapb.BinlogTransaction_Statement{ {Category: binlogdatapb.BinlogTransaction_Statement_BL_SET, Sql: []byte("SET TIMESTAMP=1407805592")}, @@ -159,7 +159,7 @@ func TestStreamerParseEventsCommit(t *testing.T) { events := make(chan mysql.BinlogEvent) - want := []binlogdatapb.BinlogTransaction{ + want := []*binlogdatapb.BinlogTransaction{ { Statements: []*binlogdatapb.BinlogTransaction_Statement{ {Category: binlogdatapb.BinlogTransaction_Statement_BL_SET, Sql: []byte("SET TIMESTAMP=1407805592")}, @@ -578,7 +578,7 @@ func TestStreamerParseEventsRollback(t *testing.T) { events := make(chan mysql.BinlogEvent) - want := []binlogdatapb.BinlogTransaction{ + want := []*binlogdatapb.BinlogTransaction{ { Statements: nil, EventToken: &querypb.EventToken{ @@ -649,7 +649,7 @@ func TestStreamerParseEventsDMLWithoutBegin(t *testing.T) { events := make(chan mysql.BinlogEvent) - want := []binlogdatapb.BinlogTransaction{ + want := []*binlogdatapb.BinlogTransaction{ { Statements: []*binlogdatapb.BinlogTransaction_Statement{ {Category: binlogdatapb.BinlogTransaction_Statement_BL_SET, Sql: []byte("SET TIMESTAMP=1407805592")}, @@ -724,7 +724,7 @@ func TestStreamerParseEventsBeginWithoutCommit(t *testing.T) { events := make(chan mysql.BinlogEvent) - want := []binlogdatapb.BinlogTransaction{ + want := []*binlogdatapb.BinlogTransaction{ { Statements: []*binlogdatapb.BinlogTransaction_Statement{ {Category: binlogdatapb.BinlogTransaction_Statement_BL_SET, Sql: []byte("SET TIMESTAMP=1407805592")}, @@ -800,7 +800,7 @@ func TestStreamerParseEventsSetInsertID(t *testing.T) { events := make(chan mysql.BinlogEvent) - want := []binlogdatapb.BinlogTransaction{ + want := []*binlogdatapb.BinlogTransaction{ { Statements: []*binlogdatapb.BinlogTransaction_Statement{ {Category: binlogdatapb.BinlogTransaction_Statement_BL_SET, Sql: []byte("SET INSERT_ID=101")}, @@ -905,7 +905,7 @@ func TestStreamerParseEventsOtherDB(t *testing.T) { events := make(chan mysql.BinlogEvent) - want := []binlogdatapb.BinlogTransaction{ + want := []*binlogdatapb.BinlogTransaction{ { Statements: []*binlogdatapb.BinlogTransaction_Statement{ {Category: binlogdatapb.BinlogTransaction_Statement_BL_SET, Sql: []byte("SET TIMESTAMP=1407805592")}, @@ -967,7 +967,7 @@ func TestStreamerParseEventsOtherDBBegin(t *testing.T) { events := make(chan mysql.BinlogEvent) - want := []binlogdatapb.BinlogTransaction{ + want := []*binlogdatapb.BinlogTransaction{ { Statements: []*binlogdatapb.BinlogTransaction_Statement{ {Category: binlogdatapb.BinlogTransaction_Statement_BL_SET, Sql: []byte("SET TIMESTAMP=1407805592")}, @@ -1069,7 +1069,7 @@ func TestStreamerParseEventsMariadbBeginGTID(t *testing.T) { events := make(chan mysql.BinlogEvent) - want := []binlogdatapb.BinlogTransaction{ + want := []*binlogdatapb.BinlogTransaction{ { Statements: []*binlogdatapb.BinlogTransaction_Statement{ { @@ -1136,7 +1136,7 @@ func TestStreamerParseEventsMariadbStandaloneGTID(t *testing.T) { events := make(chan mysql.BinlogEvent) - want := []binlogdatapb.BinlogTransaction{ + want := []*binlogdatapb.BinlogTransaction{ { Statements: []*binlogdatapb.BinlogTransaction_Statement{ {Category: binlogdatapb.BinlogTransaction_Statement_BL_SET, Charset: &binlogdatapb.Charset{Client: 8, Conn: 8, Server: 33}, Sql: []byte("SET TIMESTAMP=1409892744")}, diff --git a/go/vt/binlog/binlogplayer/binlog_player.go b/go/vt/binlog/binlogplayer/binlog_player.go index 42142cd08d6..bde74f0690f 100644 --- a/go/vt/binlog/binlogplayer/binlog_player.go +++ b/go/vt/binlog/binlogplayer/binlog_player.go @@ -21,18 +21,21 @@ package binlogplayer import ( "bytes" + "compress/zlib" + "encoding/binary" "encoding/hex" "fmt" + "io" "math" "sync" "time" + "google.golang.org/protobuf/proto" + "vitess.io/vitess/go/vt/vtgate/evalengine" "context" - "github.com/golang/protobuf/proto" - "vitess.io/vitess/go/history" "vitess.io/vitess/go/mysql" "vitess.io/vitess/go/sqltypes" @@ -420,10 +423,10 @@ func (blp *BinlogPlayer) processTransaction(tx *binlogdatapb.BinlogTransaction) // needed during event playback. Here we also adjust so that playback // proceeds, but in Vitess-land this usually means a misconfigured // server or a misbehaving client, so we spam the logs with warnings. - log.Warningf("BinlogPlayer changing charset from %v to %v for statement %d in transaction %v", blp.currentCharset, stmtCharset, i, *tx) + log.Warningf("BinlogPlayer changing charset from %v to %v for statement %d in transaction %v", blp.currentCharset, stmtCharset, i, tx) err = mysql.SetCharset(dbClient.dbConn, stmtCharset) if err != nil { - return false, fmt.Errorf("can't set charset for statement %d in transaction %v: %v", i, *tx, err) + return false, fmt.Errorf("can't set charset for statement %d in transaction %v: %v", i, tx, err) } blp.currentCharset = stmtCharset } @@ -475,13 +478,13 @@ func (blp *BinlogPlayer) exec(sql string) (*sqltypes.Result, error) { // transaction_timestamp alone (keeping the old value), and we don't // change SecondsBehindMaster func (blp *BinlogPlayer) writeRecoveryPosition(tx *binlogdatapb.BinlogTransaction) error { - position, err := mysql.DecodePosition(tx.EventToken.Position) + position, err := DecodePosition(tx.EventToken.Position) if err != nil { return err } now := time.Now().Unix() - updateRecovery := GenerateUpdatePos(blp.uid, position, now, tx.EventToken.Timestamp) + updateRecovery := GenerateUpdatePos(blp.uid, position, now, tx.EventToken.Timestamp, blp.blplStats.CopyRowCount.Get(), false) qr, err := blp.exec(updateRecovery) if err != nil { @@ -559,6 +562,13 @@ var AlterVReplicationTable = []string{ "ALTER TABLE _vt.vreplication ADD COLUMN db_name VARBINARY(255) NOT NULL", "ALTER TABLE _vt.vreplication MODIFY source BLOB NOT NULL", "ALTER TABLE _vt.vreplication ADD KEY workflow_idx (workflow(64))", + "ALTER TABLE _vt.vreplication ADD COLUMN rows_copied BIGINT(20) NOT NULL DEFAULT 0", +} + +// WithDDLInitialQueries contains the queries to be expected by the mock db client during tests +var WithDDLInitialQueries = []string{ + "SELECT db_name FROM _vt.vreplication LIMIT 0", + "SELECT rows_copied FROM _vt.vreplication LIMIT 0", } // VRSettings contains the settings of a vreplication table. @@ -592,7 +602,7 @@ func ReadVRSettings(dbClient DBClient, uid uint32) (VRSettings, error) { if err != nil { return VRSettings{}, fmt.Errorf("failed to parse max_replication_lag column: %v", err) } - startPos, err := mysql.DecodePosition(vrRow[0].ToString()) + startPos, err := DecodePosition(vrRow[0].ToString()) if err != nil { return VRSettings{}, fmt.Errorf("failed to parse pos column: %v", err) } @@ -629,16 +639,23 @@ func CreateVReplicationState(workflow string, source *binlogdatapb.BinlogSource, // GenerateUpdatePos returns a statement to update a value in the // _vt.vreplication table. -func GenerateUpdatePos(uid uint32, pos mysql.Position, timeUpdated int64, txTimestamp int64) string { +func GenerateUpdatePos(uid uint32, pos mysql.Position, timeUpdated int64, txTimestamp int64, rowsCopied int64, compress bool) string { + strGTID := encodeString(mysql.EncodePosition(pos)) + if compress { + strGTID = fmt.Sprintf("compress(%s)", strGTID) + } if txTimestamp != 0 { return fmt.Sprintf( - "update _vt.vreplication set pos=%v, time_updated=%v, transaction_timestamp=%v, message='' where id=%v", - encodeString(mysql.EncodePosition(pos)), timeUpdated, txTimestamp, uid) + "update _vt.vreplication set pos=%v, time_updated=%v, transaction_timestamp=%v, rows_copied=%v, message='' where id=%v", + strGTID, timeUpdated, txTimestamp, rowsCopied, uid) } - return fmt.Sprintf( - "update _vt.vreplication set pos=%v, time_updated=%v, message='' where id=%v", - encodeString(mysql.EncodePosition(pos)), timeUpdated, uid) + "update _vt.vreplication set pos=%v, time_updated=%v, rows_copied=%v, message='' where id=%v", strGTID, timeUpdated, rowsCopied, uid) +} + +// GenerateUpdateRowsCopied returns a statement to update the rows_copied value in the _vt.vreplication table. +func GenerateUpdateRowsCopied(uid uint32, rowsCopied int64) string { + return fmt.Sprintf("update _vt.vreplication set rows_copied=%v where id=%v", rowsCopied, uid) } // GenerateUpdateTime returns a statement to update time_updated in the _vt.vreplication table. @@ -678,10 +695,7 @@ func DeleteVReplication(uid uint32) string { // MessageTruncate truncates the message string to a safe length. func MessageTruncate(msg string) string { // message length is 1000 bytes. - if len(msg) > 950 { - return msg[:950] + "..." - } - return msg + return LimitString(msg, 950) } func encodeString(in string) string { @@ -702,6 +716,49 @@ func ReadVReplicationStatus(index uint32) string { return fmt.Sprintf("select pos, state, message from _vt.vreplication where id=%v", index) } +// MysqlUncompress will uncompress a binary string in the format stored by mysql's compress() function +// The first four bytes represent the size of the original string passed to compress() +// Remaining part is the compressed string using zlib, which we uncompress here using golang's zlib library +func MysqlUncompress(input string) []byte { + // consistency check + inputBytes := []byte(input) + if len(inputBytes) < 5 { + return nil + } + + // determine length + dataLength := uint32(inputBytes[0]) + uint32(inputBytes[1])<<8 + uint32(inputBytes[2])<<16 + uint32(inputBytes[3])<<24 + dataLengthBytes := make([]byte, 4) + binary.LittleEndian.PutUint32(dataLengthBytes, dataLength) + dataLength = binary.LittleEndian.Uint32(dataLengthBytes) + + // uncompress using zlib + inputData := inputBytes[4:] + inputDataBuf := bytes.NewBuffer(inputData) + reader, err := zlib.NewReader(inputDataBuf) + if err != nil { + return nil + } + var outputBytes bytes.Buffer + io.Copy(&outputBytes, reader) + if outputBytes.Len() == 0 { + return nil + } + if dataLength != uint32(outputBytes.Len()) { // double check that the stored and uncompressed lengths match + return nil + } + return outputBytes.Bytes() +} + +// DecodePosition attempts to uncompress the passed value first and if it fails tries to decode it as a valid GTID +func DecodePosition(gtid string) (mysql.Position, error) { + b := MysqlUncompress(gtid) + if b != nil { + gtid = string(b) + } + return mysql.DecodePosition(gtid) +} + // StatsHistoryRecord is used to store a Message with timestamp type StatsHistoryRecord struct { Time time.Time diff --git a/go/vt/binlog/binlogplayer/binlog_player_test.go b/go/vt/binlog/binlogplayer/binlog_player_test.go index df3000862eb..d96b199b8c8 100644 --- a/go/vt/binlog/binlogplayer/binlog_player_test.go +++ b/go/vt/binlog/binlogplayer/binlog_player_test.go @@ -322,7 +322,7 @@ func applyEvents(blp *BinlogPlayer) func() error { func TestCreateVReplicationKeyRange(t *testing.T) { want := "insert into _vt.vreplication " + "(workflow, source, pos, max_tps, max_replication_lag, time_updated, transaction_timestamp, state, db_name) " + - `values ('Resharding', 'keyspace:\"ks\" shard:\"0\" key_range: ', 'MariaDB/0-1-1083', 9223372036854775807, 9223372036854775807, 481823, 0, 'Running', 'db')` + `values ('Resharding', 'keyspace:\"ks\" shard:\"0\" key_range:{end:\"\\x80\"}', 'MariaDB/0-1-1083', 9223372036854775807, 9223372036854775807, 481823, 0, 'Running', 'db')` bls := binlogdatapb.BinlogSource{ Keyspace: "ks", @@ -341,7 +341,7 @@ func TestCreateVReplicationKeyRange(t *testing.T) { func TestCreateVReplicationTables(t *testing.T) { want := "insert into _vt.vreplication " + "(workflow, source, pos, max_tps, max_replication_lag, time_updated, transaction_timestamp, state, db_name) " + - `values ('Resharding', 'keyspace:\"ks\" shard:\"0\" tables:\"a\" tables:\"b\" ', 'MariaDB/0-1-1083', 9223372036854775807, 9223372036854775807, 481823, 0, 'Running', 'db')` + `values ('Resharding', 'keyspace:\"ks\" shard:\"0\" tables:\"a\" tables:\"b\"', 'MariaDB/0-1-1083', 9223372036854775807, 9223372036854775807, 481823, 0, 'Running', 'db')` bls := binlogdatapb.BinlogSource{ Keyspace: "ks", @@ -358,10 +358,10 @@ func TestCreateVReplicationTables(t *testing.T) { func TestUpdateVReplicationPos(t *testing.T) { gtid := mysql.MustParseGTID("MariaDB", "0-1-8283") want := "update _vt.vreplication " + - "set pos='MariaDB/0-1-8283', time_updated=88822, message='' " + + "set pos='MariaDB/0-1-8283', time_updated=88822, rows_copied=0, message='' " + "where id=78522" - got := GenerateUpdatePos(78522, mysql.Position{GTIDSet: gtid.GTIDSet()}, 88822, 0) + got := GenerateUpdatePos(78522, mysql.Position{GTIDSet: gtid.GTIDSet()}, 88822, 0, 0, false) if got != want { t.Errorf("updateVReplicationPos() = %#v, want %#v", got, want) } @@ -370,10 +370,10 @@ func TestUpdateVReplicationPos(t *testing.T) { func TestUpdateVReplicationTimestamp(t *testing.T) { gtid := mysql.MustParseGTID("MariaDB", "0-2-582") want := "update _vt.vreplication " + - "set pos='MariaDB/0-2-582', time_updated=88822, transaction_timestamp=481828, message='' " + + "set pos='MariaDB/0-2-582', time_updated=88822, transaction_timestamp=481828, rows_copied=0, message='' " + "where id=78522" - got := GenerateUpdatePos(78522, mysql.Position{GTIDSet: gtid.GTIDSet()}, 88822, 481828) + got := GenerateUpdatePos(78522, mysql.Position{GTIDSet: gtid.GTIDSet()}, 88822, 481828, 0, false) if got != want { t.Errorf("updateVReplicationPos() = %#v, want %#v", got, want) } diff --git a/go/vt/binlog/binlogplayer/dbclient.go b/go/vt/binlog/binlogplayer/dbclient.go index 7dfeb36f2a4..5ad73b686a6 100644 --- a/go/vt/binlog/binlogplayer/dbclient.go +++ b/go/vt/binlog/binlogplayer/dbclient.go @@ -74,7 +74,7 @@ func (dc *dbClientImpl) Connect() error { func (dc *dbClientImpl) Begin() error { _, err := dc.dbConn.ExecuteFetch("begin", 1, false) if err != nil { - log.Errorf("BEGIN failed w/ error %v", err) + LogError("BEGIN failed w/ error", err) dc.handleError(err) } return err @@ -83,7 +83,7 @@ func (dc *dbClientImpl) Begin() error { func (dc *dbClientImpl) Commit() error { _, err := dc.dbConn.ExecuteFetch("commit", 1, false) if err != nil { - log.Errorf("COMMIT failed w/ error %v", err) + LogError("COMMIT failed w/ error", err) dc.dbConn.Close() } return err @@ -92,7 +92,7 @@ func (dc *dbClientImpl) Commit() error { func (dc *dbClientImpl) Rollback() error { _, err := dc.dbConn.ExecuteFetch("rollback", 1, false) if err != nil { - log.Errorf("ROLLBACK failed w/ error %v", err) + LogError("ROLLBACK failed w/ error", err) dc.dbConn.Close() } return err @@ -102,10 +102,22 @@ func (dc *dbClientImpl) Close() { dc.dbConn.Close() } +// LogError logs a message after truncating it to avoid spamming logs +func LogError(msg string, err error) { + log.Errorf("%s: %s", msg, MessageTruncate(err.Error())) +} + +// LimitString truncates string to specified size +func LimitString(s string, limit int) string { + if len(s) > limit { + return s[:limit] + } + return s +} + func (dc *dbClientImpl) ExecuteFetch(query string, maxrows int) (*sqltypes.Result, error) { mqr, err := dc.dbConn.ExecuteFetch(query, maxrows, true) if err != nil { - log.Errorf("ExecuteFetch failed w/ error %v", err) dc.handleError(err) return nil, err } diff --git a/go/vt/binlog/binlogplayer/fake_dbclient.go b/go/vt/binlog/binlogplayer/fake_dbclient.go index 3b794dafd72..186722cf12f 100644 --- a/go/vt/binlog/binlogplayer/fake_dbclient.go +++ b/go/vt/binlog/binlogplayer/fake_dbclient.go @@ -68,10 +68,10 @@ func (dc *fakeDBClient) ExecuteFetch(query string, maxrows int) (qr *sqltypes.Re if strings.Contains(query, "where") { return sqltypes.MakeTestResult( sqltypes.MakeTestFields( - "id|state|source", - "int64|varchar|varchar", + "id|state|source|message", + "int64|varchar|varchar|varchar", ), - `1|Running|keyspace:"ks" shard:"0" key_range: `, + `1|Running|keyspace:"ks" shard:"0" key_range: |`, ), nil } return &sqltypes.Result{}, nil diff --git a/go/vt/binlog/binlogplayer/framework_test.go b/go/vt/binlog/binlogplayer/framework_test.go index e5fe5b40742..228d95e991e 100644 --- a/go/vt/binlog/binlogplayer/framework_test.go +++ b/go/vt/binlog/binlogplayer/framework_test.go @@ -25,7 +25,7 @@ import ( "context" - "github.com/golang/protobuf/proto" + "google.golang.org/protobuf/proto" binlogdatapb "vitess.io/vitess/go/vt/proto/binlogdata" querypb "vitess.io/vitess/go/vt/proto/query" diff --git a/go/vt/binlog/binlogplayer/mock_dbclient.go b/go/vt/binlog/binlogplayer/mock_dbclient.go index 86d4200f6c3..140cbcfff61 100644 --- a/go/vt/binlog/binlogplayer/mock_dbclient.go +++ b/go/vt/binlog/binlogplayer/mock_dbclient.go @@ -31,12 +31,13 @@ const mockClientUNameDba = "Dba" // MockDBClient mocks a DBClient. // It must be configured to expect requests in a specific order. type MockDBClient struct { - t *testing.T - UName string - expect []*mockExpect - currentResult int - done chan struct{} - invariants map[string]*sqltypes.Result + t *testing.T + UName string + expect []*mockExpect + currentResult int + done chan struct{} + queriesToIgnore []*mockExpect // these queries will return a standard nil result, you SHOULD NOT expect them in the tests + invariants map[string]*sqltypes.Result } type mockExpect struct { @@ -46,12 +47,28 @@ type mockExpect struct { err error } +func getQueriesToIgnore() []*mockExpect { + var queriesToIgnore []*mockExpect + for _, query := range WithDDLInitialQueries { + exp := &mockExpect{ + query: query, + re: nil, + result: &sqltypes.Result{}, + err: nil, + } + queriesToIgnore = append(queriesToIgnore, exp) + + } + return queriesToIgnore +} + // NewMockDBClient returns a new DBClientMock with the default "Filtered" UName. func NewMockDBClient(t *testing.T) *MockDBClient { return &MockDBClient{ - t: t, - UName: mockClientUNameFiltered, - done: make(chan struct{}), + t: t, + UName: mockClientUNameFiltered, + done: make(chan struct{}), + queriesToIgnore: getQueriesToIgnore(), invariants: map[string]*sqltypes.Result{ "CREATE TABLE IF NOT EXISTS _vt.vreplication_log": {}, "select id, type, state, message from _vt.vreplication_log": {}, @@ -60,11 +77,13 @@ func NewMockDBClient(t *testing.T) *MockDBClient { } } +// NewMockDbaClient returns a new DBClientMock with the default "Dba" UName. func NewMockDbaClient(t *testing.T) *MockDBClient { return &MockDBClient{ - t: t, - UName: mockClientUNameDba, - done: make(chan struct{}), + t: t, + UName: mockClientUNameDba, + done: make(chan struct{}), + queriesToIgnore: getQueriesToIgnore(), } } @@ -150,6 +169,11 @@ func (dc *MockDBClient) ExecuteFetch(query string, maxrows int) (qr *sqltypes.Re dc.t.Helper() dc.t.Logf("DBClient query: %v", query) + for _, q := range dc.queriesToIgnore { + if strings.EqualFold(q.query, query) || strings.Contains(strings.ToLower(query), strings.ToLower(q.query)) { + return q.result, q.err + } + } for q, result := range dc.invariants { if strings.Contains(query, q) { return result, nil diff --git a/go/vt/binlog/binlogplayertest/player.go b/go/vt/binlog/binlogplayertest/player.go index 2969eea96d3..bfbd234f939 100644 --- a/go/vt/binlog/binlogplayertest/player.go +++ b/go/vt/binlog/binlogplayertest/player.go @@ -24,7 +24,7 @@ import ( "context" - "github.com/golang/protobuf/proto" + "google.golang.org/protobuf/proto" "vitess.io/vitess/go/vt/binlog/binlogplayer" "vitess.io/vitess/go/vt/key" @@ -126,7 +126,7 @@ func testStreamKeyRange(t *testing.T, bpc binlogplayer.Client) { t.Fatalf("got error: %v", err) } else { if !proto.Equal(se, testBinlogTransaction) { - t.Errorf("got wrong result, got %v expected %v", *se, *testBinlogTransaction) + t.Errorf("got wrong result, got %v expected %v", se, testBinlogTransaction) } } if se, err := stream.Recv(); err == nil { @@ -192,7 +192,7 @@ func testStreamTables(t *testing.T, bpc binlogplayer.Client) { t.Fatalf("got error: %v", err) } else { if !proto.Equal(se, testBinlogTransaction) { - t.Errorf("got wrong result, got %v expected %v", *se, *testBinlogTransaction) + t.Errorf("got wrong result, got %v expected %v", se, testBinlogTransaction) } } if se, err := stream.Recv(); err == nil { diff --git a/go/vt/binlog/event_streamer_test.go b/go/vt/binlog/event_streamer_test.go index 62a329ad0dd..38e50240d1c 100644 --- a/go/vt/binlog/event_streamer_test.go +++ b/go/vt/binlog/event_streamer_test.go @@ -17,11 +17,12 @@ limitations under the License. package binlog import ( - "fmt" "testing" - "github.com/golang/protobuf/proto" + "vitess.io/vitess/go/test/utils" + "github.com/stretchr/testify/require" + "google.golang.org/protobuf/proto" binlogdatapb "vitess.io/vitess/go/vt/proto/binlogdata" querypb "vitess.io/vitess/go/vt/proto/query" @@ -134,27 +135,18 @@ func TestDMLEvent(t *testing.T) { for _, statement := range event.Statements { switch statement.Category { case querypb.StreamEvent_Statement_DML: - want := `category:DML table_name:"_table_" primary_key_fields: primary_key_fields: primary_key_fields: primary_key_values: primary_key_values: ` - got := fmt.Sprintf("%v", statement) - if got != want { - t.Errorf("got \n%s, want \n%s", got, want) - } + want := `category:DML table_name:"_table_" primary_key_fields:{name:"eid" type:INT64} primary_key_fields:{name:"id" type:UINT64} primary_key_fields:{name:"name" type:VARBINARY} primary_key_values:{lengths:2 lengths:1 lengths:4 values:"101name"} primary_key_values:{lengths:2 lengths:20 lengths:4 values:"1118446744073709551615name"}` + utils.MustMatchPB(t, want, statement) case querypb.StreamEvent_Statement_Error: - want := `sql:"query" ` - got := fmt.Sprintf("%v", statement) - if got != want { - t.Errorf("got %s, want %s", got, want) - } + want := `sql:"query"` + utils.MustMatchPB(t, want, statement) default: t.Errorf("unexpected: %#v", event) } } // then test the position - want := `timestamp:1 position:"MariaDB/0-41983-20" ` - got := fmt.Sprintf("%v", event.EventToken) - if got != want { - t.Errorf("got %s, want %s", got, want) - } + want := `timestamp:1 position:"MariaDB/0-41983-20"` + utils.MustMatchPB(t, want, event.EventToken) return nil }, } @@ -186,21 +178,15 @@ func TestDDLEvent(t *testing.T) { for _, statement := range event.Statements { switch statement.Category { case querypb.StreamEvent_Statement_DDL: - want := `category:DDL sql:"DDL" ` - got := fmt.Sprintf("%v", statement) - if got != want { - t.Errorf("got %s, want %s", got, want) - } + want := `category:DDL sql:"DDL"` + utils.MustMatchPB(t, want, statement) default: t.Errorf("unexpected: %#v", event) } } // then test the position - want := `timestamp:1 position:"MariaDB/0-41983-20" ` - got := fmt.Sprintf("%v", event.EventToken) - if got != want { - t.Errorf("got %s, want %s", got, want) - } + want := `timestamp:1 position:"MariaDB/0-41983-20"` + utils.MustMatchPB(t, want, event.EventToken) return nil }, } diff --git a/go/vt/binlog/grpcbinlogstreamer/streamer.go b/go/vt/binlog/grpcbinlogstreamer/streamer.go index 274846247a4..eaa5bdd162e 100644 --- a/go/vt/binlog/grpcbinlogstreamer/streamer.go +++ b/go/vt/binlog/grpcbinlogstreamer/streamer.go @@ -28,12 +28,13 @@ import ( // UpdateStream is the gRPC UpdateStream server type UpdateStream struct { + binlogservicepb.UnimplementedUpdateStreamServer updateStream binlog.UpdateStream } // New returns a new go rpc server implementation stub for UpdateStream func New(updateStream binlog.UpdateStream) *UpdateStream { - return &UpdateStream{updateStream} + return &UpdateStream{updateStream: updateStream} } // StreamKeyRange is part of the binlogservicepb.UpdateStreamServer interface diff --git a/go/vt/callerid/testsuite/testsuite.go b/go/vt/callerid/testsuite/testsuite.go index e2b559ad7ec..99f22883a99 100644 --- a/go/vt/callerid/testsuite/testsuite.go +++ b/go/vt/callerid/testsuite/testsuite.go @@ -21,7 +21,7 @@ import ( "context" - "github.com/golang/protobuf/proto" + "google.golang.org/protobuf/proto" "vitess.io/vitess/go/vt/callerid" diff --git a/go/vt/dbconnpool/connection.go b/go/vt/dbconnpool/connection.go index d55acfda17d..bdf74b8a429 100644 --- a/go/vt/dbconnpool/connection.go +++ b/go/vt/dbconnpool/connection.go @@ -77,7 +77,7 @@ func (dbc *DBConnection) ExecuteStreamFetch(query string, callback func(*sqltype qr := alloc() byteCount := 0 for { - row, err := dbc.FetchNext() + row, err := dbc.FetchNext(nil) if err != nil { dbc.handleError(err) return err diff --git a/go/vt/discovery/fake_healthcheck.go b/go/vt/discovery/fake_healthcheck.go index 4407ae50ebc..2f17e752b42 100644 --- a/go/vt/discovery/fake_healthcheck.go +++ b/go/vt/discovery/fake_healthcheck.go @@ -21,7 +21,7 @@ import ( "sort" "sync" - "github.com/golang/protobuf/proto" + "google.golang.org/protobuf/proto" "vitess.io/vitess/go/sync2" "vitess.io/vitess/go/vt/topo" diff --git a/go/vt/discovery/healthcheck.go b/go/vt/discovery/healthcheck.go index 83a4048fe87..9a34c45e620 100644 --- a/go/vt/discovery/healthcheck.go +++ b/go/vt/discovery/healthcheck.go @@ -406,7 +406,7 @@ func (hc *HealthCheckImpl) deleteTablet(tablet *topodata.Tablet) { } } -func (hc *HealthCheckImpl) updateHealth(th *TabletHealth, prevTarget *query.Target, trivialUpdate bool, isPrimaryUp bool) { +func (hc *HealthCheckImpl) updateHealth(th *TabletHealth, prevTarget *query.Target, trivialUpdate bool, up bool) { // hc.healthByAlias is authoritative, it should be updated hc.mu.Lock() defer hc.mu.Unlock() @@ -431,7 +431,7 @@ func (hc *HealthCheckImpl) updateHealth(th *TabletHealth, prevTarget *query.Targ isPrimary := th.Target.TabletType == topodata.TabletType_MASTER switch { - case isPrimary && isPrimaryUp: + case isPrimary && up: if len(hc.healthy[targetKey]) == 0 { hc.healthy[targetKey] = append(hc.healthy[targetKey], th) } else { @@ -449,7 +449,7 @@ func (hc *HealthCheckImpl) updateHealth(th *TabletHealth, prevTarget *query.Targ hc.healthy[targetKey][0] = th } } - case isPrimary && !isPrimaryUp: + case isPrimary && !up: if healthy, ok := hc.healthy[targetKey]; ok && len(healthy) > 0 { // isPrimary is true here therefore we should only have 1 tablet in healthy alias := tabletAliasString(topoproto.TabletAliasString(healthy[0].Tablet.Alias)) diff --git a/go/vt/discovery/healthcheck_test.go b/go/vt/discovery/healthcheck_test.go index 54d3c303a12..90a6205d4d9 100644 --- a/go/vt/discovery/healthcheck_test.go +++ b/go/vt/discovery/healthcheck_test.go @@ -18,6 +18,7 @@ package discovery import ( "bytes" + "context" "flag" "fmt" "html/template" @@ -38,8 +39,6 @@ import ( "vitess.io/vitess/go/vt/topo/memorytopo" "vitess.io/vitess/go/vt/topo/topoproto" - "context" - "vitess.io/vitess/go/vt/grpcclient" "vitess.io/vitess/go/vt/status" "vitess.io/vitess/go/vt/topo" @@ -57,7 +56,7 @@ var connMapMu sync.Mutex func init() { tabletconn.RegisterDialer("fake_gateway", tabletDialer) - //log error + // log error if err := flag.Set("tablet_protocol", "fake_gateway"); err != nil { log.Errorf("failed to set flag \"tablet_protocol\" to \"fake_gateway\":%v", err) } @@ -196,7 +195,7 @@ func TestHealthCheck(t *testing.T) { } input <- shr result = <-resultChan - //TODO: figure out how to compare objects that contain errors using utils.MustMatch + // TODO: figure out how to compare objects that contain errors using utils.MustMatch assert.True(t, want.DeepEqual(result), "Wrong TabletHealth data\n Expected: %v\n Actual: %v", want, result) testChecksum(t, 1027934207, hc.stateChecksum()) // unchanged @@ -257,7 +256,7 @@ func TestHealthCheckStreamError(t *testing.T) { LastError: fmt.Errorf("some stream error"), } result = <-resultChan - //TODO: figure out how to compare objects that contain errors using utils.MustMatch + // TODO: figure out how to compare objects that contain errors using utils.MustMatch assert.True(t, want.DeepEqual(result), "Wrong TabletHealth data\n Expected: %v\n Actual: %v", want, result) // tablet should be removed from healthy list a := hc.GetHealthyTabletStats(&querypb.Target{Keyspace: "k", Shard: "s", TabletType: topodatapb.TabletType_REPLICA}) @@ -317,7 +316,7 @@ func TestHealthCheckErrorOnPrimary(t *testing.T) { LastError: fmt.Errorf("some stream error"), } result = <-resultChan - //TODO: figure out how to compare objects that contain errors using utils.MustMatch + // TODO: figure out how to compare objects that contain errors using utils.MustMatch assert.True(t, want.DeepEqual(result), "Wrong TabletHealth data\n Expected: %v\n Actual: %v", want, result) // tablet should be removed from healthy list a := hc.GetHealthyTabletStats(&querypb.Target{Keyspace: "k", Shard: "s", TabletType: topodatapb.TabletType_MASTER}) @@ -1158,7 +1157,7 @@ func TestTemplate(t *testing.T) { } func TestDebugURLFormatting(t *testing.T) { - //log error + // log error if err2 := flag.Set("tablet_url_template", "https://{{.GetHostNameLevel 0}}.bastion.{{.Tablet.Alias.Cell}}.corp"); err2 != nil { log.Errorf("flag.Set(\"tablet_url_template\", \"https://{{.GetHostNameLevel 0}}.bastion.{{.Tablet.Alias.Cell}}.corp\") failed : %v", err2) } @@ -1310,9 +1309,4 @@ func createTestTablet(uid uint32, cell, host string) *topodatapb.Tablet { return tablet } -var mustMatch = utils.MustMatchFn( - []interface{}{ // types with unexported fields - TabletHealth{}, - }, - []string{".Conn"}, // ignored fields -) +var mustMatch = utils.MustMatchFn(".Conn" /* ignored fields*/) diff --git a/go/vt/discovery/legacy_healthcheck.go b/go/vt/discovery/legacy_healthcheck.go index fa1f5baa3aa..ae3754942c8 100644 --- a/go/vt/discovery/legacy_healthcheck.go +++ b/go/vt/discovery/legacy_healthcheck.go @@ -50,7 +50,7 @@ import ( "context" - "github.com/golang/protobuf/proto" + "google.golang.org/protobuf/proto" "vitess.io/vitess/go/netutil" "vitess.io/vitess/go/stats" diff --git a/go/vt/discovery/legacy_healthcheck_flaky_test.go b/go/vt/discovery/legacy_healthcheck_flaky_test.go index 40e8d7ad6bc..59c70ecd250 100644 --- a/go/vt/discovery/legacy_healthcheck_flaky_test.go +++ b/go/vt/discovery/legacy_healthcheck_flaky_test.go @@ -21,11 +21,12 @@ import ( "flag" "fmt" "html/template" - "reflect" "strings" "testing" "time" + "vitess.io/vitess/go/test/utils" + "context" querypb "vitess.io/vitess/go/vt/proto/query" @@ -64,9 +65,7 @@ func TestLegacyHealthCheck(t *testing.T) { Serving: false, } res := <-l.output - if !reflect.DeepEqual(res, want) { - t.Errorf(`<-l.output: %+v; want %+v`, res, want) - } + utils.MustMatch(t, want, res) testChecksum(t, 401258919, hc.stateChecksum()) // one tablet after receiving a StreamHealthResponse @@ -90,9 +89,7 @@ func TestLegacyHealthCheck(t *testing.T) { input <- shr t.Logf(`input <- {{Keyspace: "k", Shard: "s", TabletType: MASTER}, Serving: true, TabletExternallyReparentedTimestamp: 10, {SecondsBehindMaster: 1, CpuUsage: 0.2}}`) res = <-l.output - if !reflect.DeepEqual(res, want) { - t.Errorf(`<-l.output: %+v; want %+v`, res, want) - } + utils.MustMatch(t, want, res) // Verify that the error count is initialized to 0 after the first tablet response. if err := checkErrorCounter("k", "s", topodatapb.TabletType_MASTER, 0); err != nil { @@ -114,9 +111,7 @@ func TestLegacyHealthCheck(t *testing.T) { TabletExternallyReparentedTimestamp: 10, }}, }} - if !reflect.DeepEqual(tcsl, tcslWant) { - t.Errorf("hc.CacheStatus() =\n%+v; want\n%+v", tcsl[0], tcslWant[0]) - } + utils.MustMatch(t, tcslWant, tcsl) testChecksum(t, 1562785705, hc.stateChecksum()) // TabletType changed, should get both old and new event @@ -140,9 +135,7 @@ func TestLegacyHealthCheck(t *testing.T) { TabletExternallyReparentedTimestamp: 10, } res = <-l.output - if !reflect.DeepEqual(res, want) { - t.Errorf(`<-l.output: %+v; want %+v`, res, want) - } + utils.MustMatch(t, want, res) want = &LegacyTabletStats{ Key: "a,vt:1", Tablet: tablet, @@ -153,9 +146,7 @@ func TestLegacyHealthCheck(t *testing.T) { TabletExternallyReparentedTimestamp: 0, } res = <-l.output - if !reflect.DeepEqual(res, want) { - t.Errorf(`<-l.output: %+v; want %+v`, res, want) - } + utils.MustMatch(t, want, res) if err := checkErrorCounter("k", "s", topodatapb.TabletType_REPLICA, 0); err != nil { t.Errorf("%v", err) @@ -181,9 +172,7 @@ func TestLegacyHealthCheck(t *testing.T) { input <- shr t.Logf(`input <- {{Keyspace: "k", Shard: "s", TabletType: REPLICA}, TabletExternallyReparentedTimestamp: 0, {SecondsBehindMaster: 1, CpuUsage: 0.3}}`) res = <-l.output - if !reflect.DeepEqual(res, want) { - t.Errorf(`<-l.output: %+v; want %+v`, res, want) - } + utils.MustMatch(t, want, res) testChecksum(t, 1200695592, hc.stateChecksum()) // HealthError @@ -206,9 +195,7 @@ func TestLegacyHealthCheck(t *testing.T) { input <- shr t.Logf(`input <- {{Keyspace: "k", Shard: "s", TabletType: REPLICA}, Serving: true, TabletExternallyReparentedTimestamp: 0, {HealthError: "some error", SecondsBehindMaster: 1, CpuUsage: 0.3}}`) res = <-l.output - if !reflect.DeepEqual(res, want) { - t.Errorf(`<-l.output: %+v; want %+v`, res, want) - } + utils.MustMatch(t, want, res) testChecksum(t, 1200695592, hc.stateChecksum()) // unchanged // remove tablet @@ -225,9 +212,7 @@ func TestLegacyHealthCheck(t *testing.T) { LastError: context.Canceled, } res = <-l.output - if !reflect.DeepEqual(res, want) { - t.Errorf("<-l.output:\n%+v; want\n%+v", res, want) - } + utils.MustMatch(t, want, res) testChecksum(t, 0, hc.stateChecksum()) // close healthcheck @@ -256,9 +241,7 @@ func TestLegacyHealthCheckStreamError(t *testing.T) { Serving: false, } res := <-l.output - if !reflect.DeepEqual(res, want) { - t.Errorf(`<-l.output: %+v; want %+v`, res, want) - } + utils.MustMatch(t, want, res) // one tablet after receiving a StreamHealthResponse shr := &querypb.StreamHealthResponse{ @@ -279,9 +262,7 @@ func TestLegacyHealthCheckStreamError(t *testing.T) { input <- shr t.Logf(`input <- {{Keyspace: "k", Shard: "s", TabletType: MASTER}, Serving: true, TabletExternallyReparentedTimestamp: 10, {SecondsBehindMaster: 1, CpuUsage: 0.2}}`) res = <-l.output - if !reflect.DeepEqual(res, want) { - t.Errorf(`<-l.output: %+v; want %+v`, res, want) - } + utils.MustMatch(t, want, res) // Stream error fc.errCh <- fmt.Errorf("some stream error") @@ -296,9 +277,7 @@ func TestLegacyHealthCheckStreamError(t *testing.T) { LastError: fmt.Errorf("some stream error"), } res = <-l.output - if !reflect.DeepEqual(res, want) { - t.Errorf("<-l.output:\n%+v; want\n%+v", res, want) - } + utils.MustMatch(t, want, res) // close healthcheck hc.Close() @@ -328,9 +307,7 @@ func TestLegacyHealthCheckVerifiesTabletAlias(t *testing.T) { Serving: false, } res := <-l.output - if !reflect.DeepEqual(res, want) { - t.Errorf(`<-l.output: %+v; want %+v`, res, want) - } + utils.MustMatch(t, want, res) input <- &querypb.StreamHealthResponse{ Target: &querypb.Target{Keyspace: "k", Shard: "s", TabletType: topodatapb.TabletType_MASTER}, @@ -394,9 +371,7 @@ func TestLegacyHealthCheckCloseWaitsForGoRoutines(t *testing.T) { Serving: false, } res := <-l.output - if !reflect.DeepEqual(res, want) { - t.Errorf(`<-l.output: %+v; want %+v`, res, want) - } + utils.MustMatch(t, want, res) // Verify that the listener works in general. shr := &querypb.StreamHealthResponse{ @@ -417,9 +392,7 @@ func TestLegacyHealthCheckCloseWaitsForGoRoutines(t *testing.T) { input <- shr t.Logf(`input <- %v`, shr) res = <-l.output - if !reflect.DeepEqual(res, want) { - t.Errorf(`<-l.output: %+v; want %+v`, res, want) - } + utils.MustMatch(t, want, res) // Change input to distinguish between stats sent before and after Close(). shr.TabletExternallyReparentedTimestamp = 11 @@ -489,9 +462,7 @@ func TestLegacyHealthCheckTimeout(t *testing.T) { Serving: false, } res := <-l.output - if !reflect.DeepEqual(res, want) { - t.Errorf(`<-l.output: %+v; want %+v`, res, want) - } + utils.MustMatch(t, want, res) // one tablet after receiving a StreamHealthResponse shr := &querypb.StreamHealthResponse{ @@ -512,9 +483,7 @@ func TestLegacyHealthCheckTimeout(t *testing.T) { input <- shr t.Logf(`input <- {{Keyspace: "k", Shard: "s", TabletType: MASTER}, Serving: true, TabletExternallyReparentedTimestamp: 10, {SecondsBehindMaster: 1, CpuUsage: 0.2}}`) res = <-l.output - if !reflect.DeepEqual(res, want) { - t.Errorf(`<-l.output: %+v; want %+v`, res, want) - } + utils.MustMatch(t, want, res) if err := checkErrorCounter("k", "s", topodatapb.TabletType_MASTER, 0); err != nil { t.Errorf("%v", err) @@ -562,9 +531,7 @@ func TestLegacyHealthCheckTimeout(t *testing.T) { // wait for the exponential backoff to wear off and health monitoring to resume. time.Sleep(timeout) res = <-l.output - if !reflect.DeepEqual(res, want) { - t.Errorf(`<-l.output: %+v; want %+v`, res, want) - } + utils.MustMatch(t, want, res) // close healthcheck hc.Close() diff --git a/go/vt/discovery/legacy_topology_watcher_test.go b/go/vt/discovery/legacy_topology_watcher_test.go index 268bb8d57f4..b828289b1bd 100644 --- a/go/vt/discovery/legacy_topology_watcher_test.go +++ b/go/vt/discovery/legacy_topology_watcher_test.go @@ -23,7 +23,7 @@ import ( "context" - "github.com/golang/protobuf/proto" + "google.golang.org/protobuf/proto" "vitess.io/vitess/go/vt/logutil" topodatapb "vitess.io/vitess/go/vt/proto/topodata" @@ -241,8 +241,8 @@ func checkLegacyWatcher(t *testing.T, cellTablets, refreshKnownTablets bool) { // tablet2 happens to land on the host:port that tablet 1 used to be on. // This can only be tested when we refresh known tablets. if refreshKnownTablets { - origTablet := *tablet - origTablet2 := *tablet2 + origTablet := proto.Clone(tablet).(*topodatapb.Tablet) + origTablet2 := proto.Clone(tablet2).(*topodatapb.Tablet) if _, err := ts.UpdateTabletFields(context.Background(), tablet2.Alias, func(t *topodatapb.Tablet) error { t.Hostname = tablet.Hostname diff --git a/go/vt/discovery/replicationlag.go b/go/vt/discovery/replicationlag.go index 4840bd6205f..a78765e1d4c 100644 --- a/go/vt/discovery/replicationlag.go +++ b/go/vt/discovery/replicationlag.go @@ -31,6 +31,36 @@ var ( legacyReplicationLagAlgorithm = flag.Bool("legacy_replication_lag_algorithm", true, "use the legacy algorithm when selecting the vttablets for serving") ) +// GetLowReplicationLag getter for use by debugenv +func GetLowReplicationLag() time.Duration { + return *lowReplicationLag +} + +// SetLowReplicationLag setter for use by debugenv +func SetLowReplicationLag(lag time.Duration) { + lowReplicationLag = &lag +} + +// GetHighReplicationLagMinServing getter for use by debugenv +func GetHighReplicationLagMinServing() time.Duration { + return *highReplicationLagMinServing +} + +// SetHighReplicationLagMinServing setter for use by debugenv +func SetHighReplicationLagMinServing(lag time.Duration) { + highReplicationLagMinServing = &lag +} + +// GetMinNumTablets getter for use by debugenv +func GetMinNumTablets() int { + return *minNumTablets +} + +// SetMinNumTablets setter for use by debugenv +func SetMinNumTablets(numTablets int) { + minNumTablets = &numTablets +} + // IsReplicationLagHigh verifies that the given LegacytabletHealth refers to a tablet with high // replication lag, i.e. higher than the configured discovery_low_replication_lag flag. func IsReplicationLagHigh(tabletHealth *TabletHealth) bool { diff --git a/go/vt/discovery/tablet_health.go b/go/vt/discovery/tablet_health.go index 1b376b41761..1a28639348d 100644 --- a/go/vt/discovery/tablet_health.go +++ b/go/vt/discovery/tablet_health.go @@ -22,7 +22,7 @@ import ( "vitess.io/vitess/go/vt/vttablet/queryservice" - "github.com/golang/protobuf/proto" + "google.golang.org/protobuf/proto" "vitess.io/vitess/go/netutil" "vitess.io/vitess/go/vt/proto/query" diff --git a/go/vt/discovery/tablet_health_check.go b/go/vt/discovery/tablet_health_check.go index 664e1924870..fd865f9db53 100644 --- a/go/vt/discovery/tablet_health_check.go +++ b/go/vt/discovery/tablet_health_check.go @@ -34,7 +34,7 @@ import ( "vitess.io/vitess/go/vt/vttablet/queryservice" "vitess.io/vitess/go/vt/vttablet/tabletconn" - "github.com/golang/protobuf/proto" + "google.golang.org/protobuf/proto" "vitess.io/vitess/go/vt/proto/query" "vitess.io/vitess/go/vt/proto/topodata" @@ -77,7 +77,7 @@ type tabletHealthCheck struct { // String is defined because we want to print a []*tabletHealthCheck array nicely. func (thc *tabletHealthCheck) String() string { return fmt.Sprintf("tabletHealthCheck{Tablet: %v,Target: %v,Serving: %v, MasterTermStartTime: %v, Stats: %v, LastError: %v", - thc.Tablet, thc.Target, thc.Serving, thc.MasterTermStartTime, *thc.Stats, thc.LastError) + thc.Tablet, thc.Target, thc.Serving, thc.MasterTermStartTime, thc.Stats, thc.LastError) } // SimpleCopy returns a TabletHealth with all the necessary fields copied from tabletHealthCheck. @@ -200,7 +200,7 @@ func (thc *tabletHealthCheck) processResponse(hc *HealthCheckImpl, shr *query.St thc.setServingState(serving, reason) // notify downstream for master change - hc.updateHealth(thc.SimpleCopy(), prevTarget, trivialUpdate, true) + hc.updateHealth(thc.SimpleCopy(), prevTarget, trivialUpdate, thc.Serving) return nil } @@ -294,7 +294,7 @@ func (thc *tabletHealthCheck) checkConn(hc *HealthCheckImpl) { return } // trivialUpdate = false because this is an error - // isPrimaryUp = false because we did not get a healthy response + // up = false because we did not get a healthy response hc.updateHealth(thc.SimpleCopy(), thc.Target, false, false) } // If there was a timeout send an error. We do this after stream has returned. @@ -305,7 +305,7 @@ func (thc *tabletHealthCheck) checkConn(hc *HealthCheckImpl) { thc.setServingState(false, thc.LastError.Error()) hcErrorCounters.Add([]string{thc.Target.Keyspace, thc.Target.Shard, topoproto.TabletTypeLString(thc.Target.TabletType)}, 1) // trivialUpdate = false because this is an error - // isPrimaryUp = false because we did not get a healthy response within the timeout + // up = false because we did not get a healthy response within the timeout hc.updateHealth(thc.SimpleCopy(), thc.Target, false, false) } diff --git a/go/vt/discovery/tablet_picker.go b/go/vt/discovery/tablet_picker.go index 0c727162236..8b9c45aae80 100644 --- a/go/vt/discovery/tablet_picker.go +++ b/go/vt/discovery/tablet_picker.go @@ -23,6 +23,8 @@ import ( "sync" "time" + "vitess.io/vitess/go/stats" + "vitess.io/vitess/go/vt/topo/topoproto" vtrpcpb "vitess.io/vitess/go/vt/proto/vtrpc" @@ -41,6 +43,7 @@ import ( var ( tabletPickerRetryDelay = 30 * time.Second muTabletPickerRetryDelay sync.Mutex + globalTPStats *tabletPickerStats ) // GetTabletPickerRetryDelay synchronizes changes to tabletPickerRetryDelay. Used in tests only at the moment @@ -107,12 +110,12 @@ func (tp *TabletPicker) PickForStreaming(ctx context.Context) (*topodatapb.Table return nil, vterrors.Errorf(vtrpcpb.Code_CANCELED, "context has expired") default: } - candidates := tp.getMatchingTablets(ctx) - + candidates := tp.GetMatchingTablets(ctx) if len(candidates) == 0 { // if no candidates were found, sleep and try again - log.Infof("No tablet found for streaming, shard %s.%s, cells %v, tabletTypes %v, sleeping for %d seconds", - tp.keyspace, tp.shard, tp.cells, tp.tabletTypes, int(GetTabletPickerRetryDelay()/1e9)) + tp.incNoTabletFoundStat() + log.Infof("No tablet found for streaming, shard %s.%s, cells %v, tabletTypes %v, sleeping for %.3f seconds", + tp.keyspace, tp.shard, tp.cells, tp.tabletTypes, float64(GetTabletPickerRetryDelay().Milliseconds())/1000.0) timer := time.NewTimer(GetTabletPickerRetryDelay()) select { case <-ctx.Done(): @@ -133,6 +136,7 @@ func (tp *TabletPicker) PickForStreaming(ctx context.Context) (*topodatapb.Table log.Warningf("unable to connect to tablet for alias %v", ti.Alias) candidates = append(candidates[:idx], candidates[idx+1:]...) if len(candidates) == 0 { + tp.incNoTabletFoundStat() break } continue @@ -145,9 +149,9 @@ func (tp *TabletPicker) PickForStreaming(ctx context.Context) (*topodatapb.Table } } -// getMatchingTablets returns a list of TabletInfo for tablets +// GetMatchingTablets returns a list of TabletInfo for tablets // that match the cells, keyspace, shard and tabletTypes for this TabletPicker -func (tp *TabletPicker) getMatchingTablets(ctx context.Context) []*topo.TabletInfo { +func (tp *TabletPicker) GetMatchingTablets(ctx context.Context) []*topo.TabletInfo { // Special handling for MASTER tablet type // Since there is only one master, we ignore cell and find the master aliases := make([]*topodatapb.TabletAlias, 0) @@ -226,4 +230,25 @@ func (tp *TabletPicker) getMatchingTablets(ctx context.Context) []*topo.TabletIn func init() { // TODO(sougou): consolidate this call to be once per process. rand.Seed(time.Now().UnixNano()) + globalTPStats = newTabletPickerStats() +} + +type tabletPickerStats struct { + mu sync.Mutex + noTabletFoundError *stats.CountersWithMultiLabels +} + +func newTabletPickerStats() *tabletPickerStats { + tpStats := &tabletPickerStats{} + tpStats.noTabletFoundError = stats.NewCountersWithMultiLabels("TabletPickerNoTabletFoundErrorCount", "", []string{"cells", "keyspace", "shard", "types"}) + return tpStats +} + +func (tp *TabletPicker) incNoTabletFoundStat() { + globalTPStats.mu.Lock() + defer globalTPStats.mu.Unlock() + cells := strings.Join(tp.cells, "_") + tabletTypes := strings.Join(topoproto.MakeStringTypeList(tp.tabletTypes), "_") + labels := []string{cells, tp.keyspace, tp.shard, tabletTypes} + globalTPStats.noTabletFoundError.Add(labels, 1) } diff --git a/go/vt/discovery/tablet_picker_test.go b/go/vt/discovery/tablet_picker_test.go index b9e55d92914..64e8cea0f44 100644 --- a/go/vt/discovery/tablet_picker_test.go +++ b/go/vt/discovery/tablet_picker_test.go @@ -24,9 +24,9 @@ import ( "context" - "github.com/golang/protobuf/proto" "github.com/stretchr/testify/assert" "github.com/stretchr/testify/require" + "google.golang.org/protobuf/proto" querypb "vitess.io/vitess/go/vt/proto/query" topodatapb "vitess.io/vitess/go/vt/proto/topodata" @@ -329,6 +329,7 @@ func TestPickError(t *testing.T) { defer cancel() _, err = tp.PickForStreaming(ctx) require.EqualError(t, err, "context has expired") + require.Greater(t, globalTPStats.noTabletFoundError.Counts()["cell.ks.0.replica"], int64(0)) } type pickerTestEnv struct { diff --git a/go/vt/discovery/tablets_cache_status.go b/go/vt/discovery/tablets_cache_status.go index 60e2c65fee9..ad6e44e78db 100644 --- a/go/vt/discovery/tablets_cache_status.go +++ b/go/vt/discovery/tablets_cache_status.go @@ -6,7 +6,7 @@ import ( "sort" "strings" - "github.com/golang/protobuf/proto" + "google.golang.org/protobuf/proto" querypb "vitess.io/vitess/go/vt/proto/query" topodatapb "vitess.io/vitess/go/vt/proto/topodata" diff --git a/go/vt/discovery/topology_watcher_test.go b/go/vt/discovery/topology_watcher_test.go index 36d4b999229..974c70ac837 100644 --- a/go/vt/discovery/topology_watcher_test.go +++ b/go/vt/discovery/topology_watcher_test.go @@ -23,7 +23,7 @@ import ( "context" - "github.com/golang/protobuf/proto" + "google.golang.org/protobuf/proto" "vitess.io/vitess/go/vt/logutil" topodatapb "vitess.io/vitess/go/vt/proto/topodata" @@ -194,8 +194,8 @@ func checkWatcher(t *testing.T, refreshKnownTablets bool) { // tablet2 happens to land on the host:port that tablet 1 used to be on. // This can only be tested when we refresh known tablets. if refreshKnownTablets { - origTablet := *tablet - origTablet2 := *tablet2 + origTablet := proto.Clone(tablet).(*topodatapb.Tablet) + origTablet2 := proto.Clone(tablet2).(*topodatapb.Tablet) if _, err := ts.UpdateTabletFields(context.Background(), tablet2.Alias, func(t *topodatapb.Tablet) error { t.Hostname = tablet.Hostname diff --git a/go/vt/dtids/dtids_test.go b/go/vt/dtids/dtids_test.go index acb00bdfe84..63449a78949 100644 --- a/go/vt/dtids/dtids_test.go +++ b/go/vt/dtids/dtids_test.go @@ -19,8 +19,8 @@ package dtids import ( "testing" - "github.com/golang/protobuf/proto" "github.com/stretchr/testify/require" + "google.golang.org/protobuf/proto" querypb "vitess.io/vitess/go/vt/proto/query" topodatapb "vitess.io/vitess/go/vt/proto/topodata" diff --git a/go/vt/key/key.go b/go/vt/key/key.go index 9cedad6f409..523d154042b 100644 --- a/go/vt/key/key.go +++ b/go/vt/key/key.go @@ -20,11 +20,14 @@ import ( "bytes" "encoding/binary" "encoding/hex" + "errors" "fmt" "math" "regexp" "strings" + "google.golang.org/protobuf/proto" + topodatapb "vitess.io/vitess/go/vt/proto/topodata" ) @@ -191,8 +194,30 @@ func KeyRangeEqual(left, right *topodatapb.KeyRange) bool { if right == nil { return len(left.Start) == 0 && len(left.End) == 0 } - return bytes.Equal(left.Start, right.Start) && - bytes.Equal(left.End, right.End) + return bytes.Equal(addPadding(left.Start), addPadding(right.Start)) && + bytes.Equal(addPadding(left.End), addPadding(right.End)) +} + +// addPadding adds padding to make sure keyrange represents an 8 byte integer. +// From Vitess docs: +// A hash vindex produces an 8-byte number. +// This means that all numbers less than 0x8000000000000000 will fall in shard -80. +// Any number with the highest bit set will be >= 0x8000000000000000, and will therefore +// belong to shard 80-. +// This means that from a keyrange perspective -80 == 00-80 == 0000-8000 == 000000-800000 +// If we don't add this padding, we could run into issues when transitioning from keyranges +// that use 2 bytes to 4 bytes. +func addPadding(kr []byte) []byte { + paddedKr := make([]byte, 8) + + for i := 0; i < len(kr); i++ { + paddedKr = append(paddedKr, kr[i]) + } + + for i := len(kr); i < 8; i++ { + paddedKr = append(paddedKr, 0) + } + return paddedKr } // KeyRangeStartSmaller returns true if right's keyrange start is _after_ left's start @@ -214,7 +239,7 @@ func KeyRangeStartEqual(left, right *topodatapb.KeyRange) bool { if right == nil { return len(left.Start) == 0 } - return bytes.Equal(left.Start, right.Start) + return bytes.Equal(addPadding(left.Start), addPadding(right.Start)) } // KeyRangeEndEqual returns true if both key ranges have the same end @@ -225,7 +250,7 @@ func KeyRangeEndEqual(left, right *topodatapb.KeyRange) bool { if right == nil { return len(left.End) == 0 } - return bytes.Equal(left.End, right.End) + return bytes.Equal(addPadding(left.End), addPadding(right.End)) } // For more info on the following functions, see: @@ -257,7 +282,7 @@ func KeyRangesOverlap(first, second *topodatapb.KeyRange) (*topodatapb.KeyRange, } // compute max(c,a) and min(b,d) // start with (a,b) - result := *first + result := proto.Clone(first).(*topodatapb.KeyRange) // if c > a, then use c if bytes.Compare(second.Start, first.Start) > 0 { result.Start = second.Start @@ -269,7 +294,7 @@ func KeyRangesOverlap(first, second *topodatapb.KeyRange) (*topodatapb.KeyRange, if len(first.End) == 0 || (len(second.End) != 0 && bytes.Compare(second.End, first.End) < 0) { result.End = second.End } - return &result, nil + return result, nil } // KeyRangeIncludes returns true if the first provided KeyRange, big, @@ -346,3 +371,74 @@ var krRegexp = regexp.MustCompile(`^[0-9a-fA-F]*-[0-9a-fA-F]*$`) func IsKeyRange(kr string) bool { return krRegexp.MatchString(kr) } + +// GenerateShardRanges returns shard ranges assuming a keyspace with N shards. +func GenerateShardRanges(shards int) ([]string, error) { + var format string + var maxShards int + + switch { + case shards <= 0: + return nil, errors.New("shards must be greater than zero") + case shards <= 256: + format = "%02x" + maxShards = 256 + case shards <= 65536: + format = "%04x" + maxShards = 65536 + default: + return nil, errors.New("this function does not support more than 65336 shards in a single keyspace") + } + + rangeFormatter := func(start, end int) string { + var ( + startKid string + endKid string + ) + + if start != 0 { + startKid = fmt.Sprintf(format, start) + } + + if end != maxShards { + endKid = fmt.Sprintf(format, end) + } + + return fmt.Sprintf("%s-%s", startKid, endKid) + } + + start := 0 + end := 0 + + // If shards does not divide evenly into maxShards, then there is some lossiness, + // where each shard is smaller than it should technically be (if, for example, size == 25.6). + // If we choose to keep everything in ints, then we have two choices: + // - Have every shard in #numshards be a uniform size, tack on an additional shard + // at the end of the range to account for the loss. This is bad because if you ask for + // 7 shards, you'll actually get 7 uniform shards with 1 small shard, for 8 total shards. + // It's also bad because one shard will have much different data distribution than the rest. + // - Expand the final shard to include whatever is left in the keyrange. This will give the + // correct number of shards, which is good, but depending on how lossy each individual shard is, + // you could end with that final shard being significantly larger than the rest of the shards, + // so this doesn't solve the data distribution problem. + // + // By tracking the "real" end (both in the real number sense, and in the truthfulness of the value sense), + // we can re-truncate the integer end on each iteration, which spreads the lossiness more + // evenly across the shards. + // + // This implementation has no impact on shard numbers that are powers of 2, even at large numbers, + // which you can see in the tests. + size := float64(maxShards) / float64(shards) + realEnd := float64(0) + shardRanges := make([]string, 0, shards) + + for i := 1; i <= shards; i++ { + realEnd = float64(i) * size + + end = int(realEnd) + shardRanges = append(shardRanges, rangeFormatter(start, end)) + start = end + } + + return shardRanges, nil +} diff --git a/go/vt/key/key_test.go b/go/vt/key/key_test.go index 8643d0bcc73..d8d98e92f2b 100644 --- a/go/vt/key/key_test.go +++ b/go/vt/key/key_test.go @@ -21,8 +21,9 @@ import ( "strings" "testing" - "github.com/golang/protobuf/proto" "github.com/stretchr/testify/assert" + "github.com/stretchr/testify/require" + "google.golang.org/protobuf/proto" topodatapb "vitess.io/vitess/go/vt/proto/topodata" ) @@ -240,6 +241,167 @@ func TestKeyRangeAdd(t *testing.T) { } } +func TestKeyRangeEndEqual(t *testing.T) { + testcases := []struct { + first string + second string + out bool + }{{ + first: "", + second: "", + out: true, + }, { + first: "", + second: "-80", + out: false, + }, { + first: "40-", + second: "10-", + out: true, + }, { + first: "-8000", + second: "-80", + out: true, + }, { + first: "-8000", + second: "-8000000000000000", + out: true, + }, { + first: "-80", + second: "-8000", + out: true, + }} + stringToKeyRange := func(spec string) *topodatapb.KeyRange { + if spec == "" { + return nil + } + parts := strings.Split(spec, "-") + if len(parts) != 2 { + panic("invalid spec") + } + kr, err := ParseKeyRangeParts(parts[0], parts[1]) + if err != nil { + panic(err) + } + return kr + } + + for _, tcase := range testcases { + first := stringToKeyRange(tcase.first) + second := stringToKeyRange(tcase.second) + out := KeyRangeEndEqual(first, second) + if out != tcase.out { + t.Fatalf("KeyRangeEndEqual(%q, %q) expected %t, got %t", tcase.first, tcase.second, tcase.out, out) + } + } +} + +func TestKeyRangeStartEqual(t *testing.T) { + testcases := []struct { + first string + second string + out bool + }{{ + first: "", + second: "", + out: true, + }, { + first: "", + second: "-80", + out: true, + }, { + first: "40-", + second: "20-", + out: false, + }, { + first: "-8000", + second: "-80", + out: true, + }, { + first: "-8000", + second: "-8000000000000000", + out: true, + }, { + first: "-80", + second: "-8000", + out: true, + }} + stringToKeyRange := func(spec string) *topodatapb.KeyRange { + if spec == "" { + return nil + } + parts := strings.Split(spec, "-") + if len(parts) != 2 { + panic("invalid spec") + } + kr, err := ParseKeyRangeParts(parts[0], parts[1]) + if err != nil { + panic(err) + } + return kr + } + + for _, tcase := range testcases { + first := stringToKeyRange(tcase.first) + second := stringToKeyRange(tcase.second) + out := KeyRangeStartEqual(first, second) + if out != tcase.out { + t.Fatalf("KeyRangeStartEqual(%q, %q) expected %t, got %t", tcase.first, tcase.second, tcase.out, out) + } + } +} + +func TestKeyRangeEqual(t *testing.T) { + testcases := []struct { + first string + second string + out bool + }{{ + first: "", + second: "", + out: true, + }, { + first: "", + second: "-80", + out: false, + }, { + first: "-8000", + second: "-80", + out: true, + }, { + first: "-8000", + second: "-8000000000000000", + out: true, + }, { + first: "-80", + second: "-8000", + out: true, + }} + stringToKeyRange := func(spec string) *topodatapb.KeyRange { + if spec == "" { + return nil + } + parts := strings.Split(spec, "-") + if len(parts) != 2 { + panic("invalid spec") + } + kr, err := ParseKeyRangeParts(parts[0], parts[1]) + if err != nil { + panic(err) + } + return kr + } + + for _, tcase := range testcases { + first := stringToKeyRange(tcase.first) + second := stringToKeyRange(tcase.second) + out := KeyRangeEqual(first, second) + if out != tcase.out { + t.Fatalf("KeyRangeEqual(%q, %q) expected %t, got %t", tcase.first, tcase.second, tcase.out, out) + } + } +} + func TestEvenShardsKeyRange_Error(t *testing.T) { testCases := []struct { i, n int @@ -579,3 +741,75 @@ func TestIsKeyRange(t *testing.T) { assert.Equal(t, IsKeyRange(tcase.in), tcase.out, tcase.in) } } + +func TestGenerateShardRanges(t *testing.T) { + type args struct { + shards int + } + + tests := []struct { + name string + args args + want []string + wantErr bool + }{ + { + "errors for shards less than 0", + args{0}, + nil, + true, + }, + { + "errors for shards more than 65536", + args{65537}, + nil, + true, + }, + { + "works for a single shard", + args{1}, + []string{"-"}, + false, + }, + { + "works for more than one shard", + args{2}, + []string{"-80", "80-"}, + false, + }, + { + "works for an odd number of shards", + args{7}, + []string{"-24", "24-49", "49-6d", "6d-92", "92-b6", "b6-db", "db-"}, + false, + }, + { + "works for large number of shards", + args{256}, + []string{"-01", "01-02", "02-03", "03-04", "04-05", "05-06", "06-07", "07-08", "08-09", "09-0a", "0a-0b", "0b-0c", "0c-0d", "0d-0e", "0e-0f", "0f-10", "10-11", "11-12", "12-13", "13-14", "14-15", "15-16", "16-17", "17-18", "18-19", "19-1a", "1a-1b", "1b-1c", "1c-1d", "1d-1e", "1e-1f", "1f-20", "20-21", "21-22", "22-23", "23-24", "24-25", "25-26", "26-27", "27-28", "28-29", "29-2a", "2a-2b", "2b-2c", "2c-2d", "2d-2e", "2e-2f", "2f-30", "30-31", "31-32", "32-33", "33-34", "34-35", "35-36", "36-37", "37-38", "38-39", "39-3a", "3a-3b", "3b-3c", "3c-3d", "3d-3e", "3e-3f", "3f-40", "40-41", "41-42", "42-43", "43-44", "44-45", "45-46", "46-47", "47-48", "48-49", "49-4a", "4a-4b", "4b-4c", "4c-4d", "4d-4e", "4e-4f", "4f-50", "50-51", "51-52", "52-53", "53-54", "54-55", "55-56", "56-57", "57-58", "58-59", "59-5a", "5a-5b", "5b-5c", "5c-5d", "5d-5e", "5e-5f", "5f-60", "60-61", "61-62", "62-63", "63-64", "64-65", "65-66", "66-67", "67-68", "68-69", "69-6a", "6a-6b", "6b-6c", "6c-6d", "6d-6e", "6e-6f", "6f-70", "70-71", "71-72", "72-73", "73-74", "74-75", "75-76", "76-77", "77-78", "78-79", "79-7a", "7a-7b", "7b-7c", "7c-7d", "7d-7e", "7e-7f", "7f-80", "80-81", "81-82", "82-83", "83-84", "84-85", "85-86", "86-87", "87-88", "88-89", "89-8a", "8a-8b", "8b-8c", "8c-8d", "8d-8e", "8e-8f", "8f-90", "90-91", "91-92", "92-93", "93-94", "94-95", "95-96", "96-97", "97-98", "98-99", "99-9a", "9a-9b", "9b-9c", "9c-9d", "9d-9e", "9e-9f", "9f-a0", "a0-a1", "a1-a2", "a2-a3", "a3-a4", "a4-a5", "a5-a6", "a6-a7", "a7-a8", "a8-a9", "a9-aa", "aa-ab", "ab-ac", "ac-ad", "ad-ae", "ae-af", "af-b0", "b0-b1", "b1-b2", "b2-b3", "b3-b4", "b4-b5", "b5-b6", "b6-b7", "b7-b8", "b8-b9", "b9-ba", "ba-bb", "bb-bc", "bc-bd", "bd-be", "be-bf", "bf-c0", "c0-c1", "c1-c2", "c2-c3", "c3-c4", "c4-c5", "c5-c6", "c6-c7", "c7-c8", "c8-c9", "c9-ca", "ca-cb", "cb-cc", "cc-cd", "cd-ce", "ce-cf", "cf-d0", "d0-d1", "d1-d2", "d2-d3", "d3-d4", "d4-d5", "d5-d6", "d6-d7", "d7-d8", "d8-d9", "d9-da", "da-db", "db-dc", "dc-dd", "dd-de", "de-df", "df-e0", "e0-e1", "e1-e2", "e2-e3", "e3-e4", "e4-e5", "e5-e6", "e6-e7", "e7-e8", "e8-e9", "e9-ea", "ea-eb", "eb-ec", "ec-ed", "ed-ee", "ee-ef", "ef-f0", "f0-f1", "f1-f2", "f2-f3", "f3-f4", "f4-f5", "f5-f6", "f6-f7", "f7-f8", "f8-f9", "f9-fa", "fa-fb", "fb-fc", "fc-fd", "fd-fe", "fe-ff", "ff-"}, + false, + }, + } + + for _, tt := range tests { + t.Run(tt.name, func(t *testing.T) { + got, err := GenerateShardRanges(tt.args.shards) + if tt.wantErr { + assert.Error(t, err) + return + } + + require.NoError(t, err) + assert.Equal(t, got, tt.want) + }) + } +} + +func TestShardCalculatorForShardsGreaterThan512(t *testing.T) { + got, err := GenerateShardRanges(512) + assert.NoError(t, err) + + want := "ff80-" + + assert.Equal(t, want, got[511], "Invalid mapping for a 512-shard keyspace. Expected %v, got %v", want, got[511]) +} diff --git a/go/vt/logutil/proto3_test.go b/go/vt/logutil/proto3_test.go index f9a02258759..58a78dea2ef 100644 --- a/go/vt/logutil/proto3_test.go +++ b/go/vt/logutil/proto3_test.go @@ -21,7 +21,7 @@ import ( "testing" "time" - "github.com/golang/protobuf/proto" + "google.golang.org/protobuf/proto" "vitess.io/vitess/go/vt/proto/vttime" ) diff --git a/go/vt/mysqlctl/backup.go b/go/vt/mysqlctl/backup.go index e8df43cf8af..80861f7b3d4 100644 --- a/go/vt/mysqlctl/backup.go +++ b/go/vt/mysqlctl/backup.go @@ -17,6 +17,7 @@ limitations under the License. package mysqlctl import ( + "context" "errors" "flag" "fmt" @@ -25,14 +26,15 @@ import ( "strings" "time" - "context" - "vitess.io/vitess/go/mysql" "vitess.io/vitess/go/stats" "vitess.io/vitess/go/vt/log" "vitess.io/vitess/go/vt/mysqlctl/backupstorage" "vitess.io/vitess/go/vt/proto/vtrpc" + "vitess.io/vitess/go/vt/topo/topoproto" "vitess.io/vitess/go/vt/vterrors" + + topodatapb "vitess.io/vitess/go/vt/proto/topodata" ) // This file handles the backup and restore related code @@ -138,6 +140,39 @@ func Backup(ctx context.Context, params BackupParams) error { return finishErr } +// ParseBackupName parses the backup name for a given dir/name, according to +// the format generated by mysqlctl.Backup. An error is returned only if the +// backup name does not have the expected number of parts; errors parsing the +// timestamp and tablet alias are logged, and a nil value is returned for those +// fields in case of error. +func ParseBackupName(dir string, name string) (backupTime *time.Time, alias *topodatapb.TabletAlias, err error) { + parts := strings.Split(name, ".") + if len(parts) != 3 { + return nil, nil, vterrors.Errorf(vtrpc.Code_INVALID_ARGUMENT, "cannot backup name %s, expected .