Skip to content

Binlog: Improve ZstdInMemoryDecompressorMaxSize management #26260

Binlog: Improve ZstdInMemoryDecompressorMaxSize management

Binlog: Improve ZstdInMemoryDecompressorMaxSize management #26260

name: Backups - Manual - Next Release - Upgrade Downgrade Testing
on:
push:
pull_request:
concurrency:
group: format('{0}-{1}', ${{ github.ref }}, 'Upgrade Downgrade Testing - Backups - Manual - Next Release')
cancel-in-progress: true
permissions: read-all
jobs:
# This job usually execute in ± 20 minutes
upgrade_downgrade_test_manual:
timeout-minutes: 40
name: Run Upgrade Downgrade Test - Backups - Manual - Next Release
runs-on: gh-hosted-runners-16cores-1
steps:
- name: Skip CI
run: |
if [[ "${{contains( github.event.pull_request.labels.*.name, 'Skip CI')}}" == "true" ]]; then
echo "skipping CI due to the 'Skip CI' label"
exit 1
fi
# Checkout to this build's commit
- name: Checkout to commit's code
uses: actions/checkout@692973e3d937129bcbf40652eb9f2f61becf3332 # v4.1.7
with:
fetch-depth: 0
- name: Set output with latest release branch
id: output-next-release-ref
run: |
next_release_ref=$(./tools/get_next_release.sh ${{github.base_ref}} ${{github.ref}})
echo $next_release_ref
echo "next_release_ref=${next_release_ref}" >> $GITHUB_OUTPUT
- name: Check if workflow needs to be skipped
id: skip-workflow
run: |
skip='false'
if [[ "${{github.event.pull_request}}" == "" ]] && [[ "${{github.ref}}" != "refs/heads/main" ]] && [[ ! "${{github.ref}}" =~ ^refs/heads/release-[0-9]+\.[0-9]$ ]] && [[ ! "${{github.ref}}" =~ "refs/tags/.*" ]]; then
skip='true'
fi
if [[ "${{steps.output-next-release-ref.outputs.next_release_ref}}" == "" ]]; then
skip='true'
fi
echo Skip ${skip}
echo "skip-workflow=${skip}" >> $GITHUB_OUTPUT
- name: Check for changes in relevant files
if: steps.skip-workflow.outputs.skip-workflow == 'false'
uses: dorny/paths-filter@ebc4d7e9ebcb0b1eb21480bb8f43113e996ac77a # v3.0.1
id: changes
with:
token: ''
filters: |
end_to_end:
- 'go/**'
- 'go/**/*.go'
- 'test.go'
- 'Makefile'
- 'build.env'
- 'go.sum'
- 'go.mod'
- 'proto/*.proto'
- 'tools/**'
- 'config/**'
- 'bootstrap.sh'
- '.github/workflows/upgrade_downgrade_test_backups_manual_next_release.yml'
- 'examples/**'
- name: Set up Go
if: steps.skip-workflow.outputs.skip-workflow == 'false' && steps.changes.outputs.end_to_end == 'true'
uses: actions/setup-go@0a12ed9d6a96ab950c8f026ed9f722fe0da7ef32 # v5.0.2
with:
go-version-file: go.mod
- name: Set up python
if: steps.skip-workflow.outputs.skip-workflow == 'false' && steps.changes.outputs.end_to_end == 'true'
uses: actions/setup-python@39cd14951b08e74b54015e9e001cdefcf80e669f # v5.1.1
- name: Tune the OS
if: steps.skip-workflow.outputs.skip-workflow == 'false' && steps.changes.outputs.end_to_end == 'true'
run: |
sudo sysctl -w net.ipv4.ip_local_port_range="22768 65535"
- name: Get base dependencies
if: steps.skip-workflow.outputs.skip-workflow == 'false' && steps.changes.outputs.end_to_end == 'true'
run: |
sudo DEBIAN_FRONTEND="noninteractive" apt-get update
# Uninstall any nextly installed MySQL first
sudo systemctl stop apparmor
sudo DEBIAN_FRONTEND="noninteractive" apt-get remove -y --purge mysql-server mysql-client mysql-common
sudo apt-get -y autoremove
sudo apt-get -y autoclean
sudo deluser mysql
sudo rm -rf /var/lib/mysql
sudo rm -rf /etc/mysql
# Install MySQL 8.0
sudo apt-key adv --keyserver keyserver.ubuntu.com --recv-keys A8D3785C
wget -c https://dev.mysql.com/get/mysql-apt-config_0.8.32-1_all.deb
echo mysql-apt-config mysql-apt-config/select-server select mysql-8.0 | sudo debconf-set-selections
sudo DEBIAN_FRONTEND="noninteractive" dpkg -i mysql-apt-config*
sudo apt-get update
sudo DEBIAN_FRONTEND="noninteractive" apt-get install -y mysql-server mysql-client
# Install everything else we need, and configure
sudo apt-get install -y make unzip g++ etcd curl git wget eatmydata grep
sudo service mysql stop
sudo service etcd stop
sudo bash -c "echo '/usr/sbin/mysqld { }' > /etc/apparmor.d/usr.sbin.mysqld" # https://bugs.launchpad.net/ubuntu/+source/mariadb-10.1/+bug/1806263
sudo ln -s /etc/apparmor.d/usr.sbin.mysqld /etc/apparmor.d/disable/
sudo apparmor_parser -R /etc/apparmor.d/usr.sbin.mysqld || echo "could not remove mysqld profile"
# install JUnit report formatter
go install github.com/vitessio/go-junit-report@HEAD
wget https://repo.percona.com/apt/percona-release_latest.$(lsb_release -sc)_all.deb
sudo apt-get install -y gnupg2
sudo dpkg -i percona-release_latest.$(lsb_release -sc)_all.deb
sudo percona-release enable-only tools
sudo apt-get update
sudo apt-get install -y percona-xtrabackup-80
# Checkout to the next release of Vitess
- name: Checkout to the other version's code (${{ steps.output-next-release-ref.outputs.next_release_ref }})
if: steps.skip-workflow.outputs.skip-workflow == 'false' && steps.changes.outputs.end_to_end == 'true'
uses: actions/checkout@692973e3d937129bcbf40652eb9f2f61becf3332 # v4.1.7
with:
ref: ${{ steps.output-next-release-ref.outputs.next_release_ref }}
- name: Get dependencies for the next release
if: steps.skip-workflow.outputs.skip-workflow == 'false' && steps.changes.outputs.end_to_end == 'true'
run: |
go mod download
- name: Building next release's binaries
if: steps.skip-workflow.outputs.skip-workflow == 'false' && steps.changes.outputs.end_to_end == 'true'
timeout-minutes: 5
run: |
source build.env
NOVTADMINBUILD=1 make build
mkdir -p /tmp/vitess-build-other/
cp -R bin /tmp/vitess-build-other/
rm -Rf bin/*
# Checkout to this build's commit
- name: Checkout to commit's code
if: steps.skip-workflow.outputs.skip-workflow == 'false' && steps.changes.outputs.end_to_end == 'true'
uses: actions/checkout@692973e3d937129bcbf40652eb9f2f61becf3332 # v4.1.7
- name: Get dependencies for this commit
if: steps.skip-workflow.outputs.skip-workflow == 'false' && steps.changes.outputs.end_to_end == 'true'
run: |
go mod download
- name: Run make minimaltools
if: steps.skip-workflow.outputs.skip-workflow == 'false' && steps.changes.outputs.end_to_end == 'true'
run: |
make minimaltools
- name: Building the binaries for this commit
if: steps.skip-workflow.outputs.skip-workflow == 'false' && steps.changes.outputs.end_to_end == 'true'
timeout-minutes: 5
run: |
source build.env
NOVTADMINBUILD=1 make build
mkdir -p /tmp/vitess-build-current/
cp -R bin /tmp/vitess-build-current/
# We create a sharded Vitess cluster following the local example.
# We also insert a few rows in our three tables.
- name: Create the example Vitess cluster with all components using version N
if: steps.skip-workflow.outputs.skip-workflow == 'false' && steps.changes.outputs.end_to_end == 'true'
timeout-minutes: 10
run: |
source build.env ; cd examples/backups
./start_cluster.sh
# Taking a backup
- name: Take a backup of all the shards
if: steps.skip-workflow.outputs.skip-workflow == 'false' && steps.changes.outputs.end_to_end == 'true'
timeout-minutes: 5
run: |
source build.env ; cd examples/backups
./take_backups.sh
# We insert more data in every table after the backup.
# When we restore the backup made in the next step, we do not want to see the rows we are about to insert now.
# The initial number of rows for each table is:
# - customer: 5
# - product: 2
# - corder: 5
# We shall see the same number of rows after restoring the backup.
- name: Insert more data after the backup
if: steps.skip-workflow.outputs.skip-workflow == 'false' && steps.changes.outputs.end_to_end == 'true'
run: |
source build.env ; cd examples ; source ./common/env.sh
echo "insert into customer(email) values('[email protected]');" | mysql
echo "insert into product(sku, description, price) values('SKU-1009', 'description', 89);" | mysql
echo "insert into corder(customer_id, sku, price) values(1, 'SKU-1009', 100);" | mysql
# Stop all the tablets and remove their data
- name: Stop tablets
if: steps.skip-workflow.outputs.skip-workflow == 'false' && steps.changes.outputs.end_to_end == 'true'
timeout-minutes: 10
run: |
source build.env ; cd examples/backups
./stop_tablets.sh
# We downgrade: we use the version N+1 of vttablet
- name: Downgrade - Swap binaries, use VTTablet N+1
if: steps.skip-workflow.outputs.skip-workflow == 'false' && steps.changes.outputs.end_to_end == 'true'
run: |
source build.env
rm -f $PWD/bin/vttablet $PWD/bin/mysqlctl $PWD/bin/mysqlctld
cp /tmp/vitess-build-other/bin/vttablet $PWD/bin/vttablet
cp /tmp/vitess-build-other/bin/mysqlctl $PWD/bin/mysqlctl
cp /tmp/vitess-build-other/bin/mysqlctld $PWD/bin/mysqlctld
vttablet --version
# Starting the tablets again, they will automatically start restoring the last backup.
- name: Start new tablets and restore
if: steps.skip-workflow.outputs.skip-workflow == 'false' && steps.changes.outputs.end_to_end == 'true'
timeout-minutes: 10
run: |
source build.env ; cd examples/backups
./restart_tablets.sh
# give enough time to the tablets to restore the backup
sleep 60
# Count the number of rows in each table to make sure the restoration is successful.
- name: Assert the number of rows in every table
if: steps.skip-workflow.outputs.skip-workflow == 'false' && steps.changes.outputs.end_to_end == 'true'
run: |
source build.env ; cd examples ; source ./common/env.sh
echo "select count(sku) from product;" | mysql 2>&1| grep 2
echo "select count(email) from customer;" | mysql 2>&1| grep 5
echo "select count(sku) from corder;" | mysql 2>&1| grep 5
# We insert one more row in every table.
- name: Insert more rows in the tables
if: steps.skip-workflow.outputs.skip-workflow == 'false' && steps.changes.outputs.end_to_end == 'true'
run: |
source build.env ; cd examples ; source ./common/env.sh
echo "insert into customer(email) values('[email protected]');" | mysql
echo "insert into product(sku, description, price) values('SKU-1011', 'description', 111);" | mysql
echo "insert into corder(customer_id, sku, price) values(1, 'SKU-1011', 111);" | mysql
# Taking a second backup of the cluster.
- name: Take a second backup of all the shards
if: steps.skip-workflow.outputs.skip-workflow == 'false' && steps.changes.outputs.end_to_end == 'true'
timeout-minutes: 10
run: |
source build.env ; cd examples/backups
./take_backups.sh
# We upgrade: we swap binaries and use the version N of the tablet.
- name: Upgrade - Swap binaries, use VTTablet N
if: steps.skip-workflow.outputs.skip-workflow == 'false' && steps.changes.outputs.end_to_end == 'true'
run: |
source build.env
rm -f $PWD/bin/vttablet $PWD/bin/mysqlctl $PWD/bin/mysqlctld
cp /tmp/vitess-build-current/bin/vttablet $PWD/bin/vttablet
cp /tmp/vitess-build-current/bin/mysqlctl $PWD/bin/mysqlctl
cp /tmp/vitess-build-current/bin/mysqlctld $PWD/bin/mysqlctld
vttablet --version
# Starting the tablets again and restoring the next backup.
- name: Start new tablets and restore
if: steps.skip-workflow.outputs.skip-workflow == 'false' && steps.changes.outputs.end_to_end == 'true'
timeout-minutes: 10
run: |
source build.env ; cd examples/backups
./upgrade_cluster.sh
# We count the number of rows in every table to check that the restore step was successful.
- name: Assert the number of rows in every table
if: steps.skip-workflow.outputs.skip-workflow == 'false' && steps.changes.outputs.end_to_end == 'true'
run: |
source build.env ; cd examples ; source ./common/env.sh
echo "select count(sku) from product;" | mysql 2>&1| grep 3
echo "select count(email) from customer;" | mysql 2>&1| grep 6
echo "select count(sku) from corder;" | mysql 2>&1| grep 6
- name: Stop the Vitess cluster
if: always() && steps.skip-workflow.outputs.skip-workflow == 'false' && steps.changes.outputs.end_to_end == 'true'
run: |
source build.env ; cd examples/local
./401_teardown.sh || true