From a4f7e0fe4bb25c1448f19eb9073a38d1d7e38aa8 Mon Sep 17 00:00:00 2001 From: Aolin Date: Thu, 21 Sep 2023 22:28:57 +0800 Subject: [PATCH] feat: support synchronizing docs from a PR (#17) --- sync_pr.sh | 162 ++ .../markdown-pages/en/tidbcloud/master/TOC.md | 420 ++++ .../master/tidb-cloud/api-overview.md | 36 + .../en/tidb/release-6.5/TOC-tidb-cloud.md | 541 ++++++ .../markdown-pages/en/tidb/release-6.5/TOC.md | 1123 +++++++++++ .../develop/dev-guide-choose-driver-or-orm.md | 300 +++ .../develop/dev-guide-insert-data.md | 304 +++ .../release-6.5/develop/dev-guide-overview.md | 85 + .../develop/dev-guide-playground-gitpod.md | 169 ++ .../develop/dev-guide-prepared-statement.md | 226 +++ ...ev-guide-sample-application-golang-gorm.md | 326 ++++ ...de-sample-application-golang-sql-driver.md | 557 ++++++ ...guide-sample-application-java-hibernate.md | 426 ++++ .../dev-guide-sample-application-java-jdbc.md | 591 ++++++ ...v-guide-sample-application-java-mybatis.md | 741 +++++++ ...ide-sample-application-java-spring-boot.md | 1024 ++++++++++ ...mple-application-python-mysql-connector.md | 291 +++ ...e-sample-application-python-mysqlclient.md | 292 +++ ...-guide-sample-application-python-peewee.md | 255 +++ ...guide-sample-application-python-pymysql.md | 287 +++ ...de-sample-application-python-sqlalchemy.md | 249 +++ .../develop/dev-guide-third-party-support.md | 318 +++ ...idb-cloud-guide-sample-application-java.md | 1730 +++++++++++++++++ .../data/markdown-pages/zh/tidb/master/TOC.md | 1188 +++++++++++ .../develop/dev-guide-choose-driver-or-orm.md | 303 +++ .../master/develop/dev-guide-insert-data.md | 293 +++ .../tidb/master/develop/dev-guide-overview.md | 49 + .../develop/dev-guide-playground-gitpod.md | 169 ++ .../develop/dev-guide-prepared-statement.md | 233 +++ ...ev-guide-sample-application-golang-gorm.md | 291 +++ ...de-sample-application-golang-sql-driver.md | 537 +++++ ...guide-sample-application-java-hibernate.md | 419 ++++ .../dev-guide-sample-application-java-jdbc.md | 576 ++++++ ...v-guide-sample-application-java-mybatis.md | 738 +++++++ ...ide-sample-application-java-spring-boot.md | 1019 ++++++++++ ...-guide-sample-application-python-django.md | 783 ++++++++ ...mple-application-python-mysql-connector.md | 283 +++ ...e-sample-application-python-mysqlclient.md | 282 +++ ...-guide-sample-application-python-peewee.md | 245 +++ ...guide-sample-application-python-pymysql.md | 277 +++ ...de-sample-application-python-sqlalchemy.md | 238 +++ .../en/tidb-in-kubernetes/master/TOC.md | 214 ++ .../grant-permissions-to-remote-storage.md | 207 ++ .../master/releases/release-1.5.0.md | 41 + .../master/tidb-operator-overview.md | 69 + .../master/whats-new-in-v1.5.md | 31 + .../zh/tidb-in-kubernetes/master/TOC.md | 214 ++ .../grant-permissions-to-remote-storage.md | 205 ++ .../master/releases/release-1.5.0.md | 41 + .../master/tidb-operator-overview.md | 71 + .../master/whats-new-in-v1.5.md | 31 + test_config.toml | 29 + 52 files changed, 19529 insertions(+) create mode 100755 sync_pr.sh create mode 100644 test/sync_pr_cloud/data/markdown-pages/en/tidbcloud/master/TOC.md create mode 100644 test/sync_pr_cloud/data/markdown-pages/en/tidbcloud/master/tidb-cloud/api-overview.md create mode 100644 test/sync_pr_docs/data/markdown-pages/en/tidb/release-6.5/TOC-tidb-cloud.md create mode 100644 test/sync_pr_docs/data/markdown-pages/en/tidb/release-6.5/TOC.md create mode 100644 test/sync_pr_docs/data/markdown-pages/en/tidb/release-6.5/develop/dev-guide-choose-driver-or-orm.md create mode 100644 test/sync_pr_docs/data/markdown-pages/en/tidb/release-6.5/develop/dev-guide-insert-data.md create mode 100644 test/sync_pr_docs/data/markdown-pages/en/tidb/release-6.5/develop/dev-guide-overview.md create mode 100644 test/sync_pr_docs/data/markdown-pages/en/tidb/release-6.5/develop/dev-guide-playground-gitpod.md create mode 100644 test/sync_pr_docs/data/markdown-pages/en/tidb/release-6.5/develop/dev-guide-prepared-statement.md create mode 100644 test/sync_pr_docs/data/markdown-pages/en/tidb/release-6.5/develop/dev-guide-sample-application-golang-gorm.md create mode 100644 test/sync_pr_docs/data/markdown-pages/en/tidb/release-6.5/develop/dev-guide-sample-application-golang-sql-driver.md create mode 100644 test/sync_pr_docs/data/markdown-pages/en/tidb/release-6.5/develop/dev-guide-sample-application-java-hibernate.md create mode 100644 test/sync_pr_docs/data/markdown-pages/en/tidb/release-6.5/develop/dev-guide-sample-application-java-jdbc.md create mode 100644 test/sync_pr_docs/data/markdown-pages/en/tidb/release-6.5/develop/dev-guide-sample-application-java-mybatis.md create mode 100644 test/sync_pr_docs/data/markdown-pages/en/tidb/release-6.5/develop/dev-guide-sample-application-java-spring-boot.md create mode 100644 test/sync_pr_docs/data/markdown-pages/en/tidb/release-6.5/develop/dev-guide-sample-application-python-mysql-connector.md create mode 100644 test/sync_pr_docs/data/markdown-pages/en/tidb/release-6.5/develop/dev-guide-sample-application-python-mysqlclient.md create mode 100644 test/sync_pr_docs/data/markdown-pages/en/tidb/release-6.5/develop/dev-guide-sample-application-python-peewee.md create mode 100644 test/sync_pr_docs/data/markdown-pages/en/tidb/release-6.5/develop/dev-guide-sample-application-python-pymysql.md create mode 100644 test/sync_pr_docs/data/markdown-pages/en/tidb/release-6.5/develop/dev-guide-sample-application-python-sqlalchemy.md create mode 100644 test/sync_pr_docs/data/markdown-pages/en/tidb/release-6.5/develop/dev-guide-third-party-support.md create mode 100644 test/sync_pr_docs/data/markdown-pages/en/tidb/release-6.5/tidb-cloud/tidb-cloud-guide-sample-application-java.md create mode 100644 test/sync_pr_docs_cn/data/markdown-pages/zh/tidb/master/TOC.md create mode 100644 test/sync_pr_docs_cn/data/markdown-pages/zh/tidb/master/develop/dev-guide-choose-driver-or-orm.md create mode 100644 test/sync_pr_docs_cn/data/markdown-pages/zh/tidb/master/develop/dev-guide-insert-data.md create mode 100644 test/sync_pr_docs_cn/data/markdown-pages/zh/tidb/master/develop/dev-guide-overview.md create mode 100644 test/sync_pr_docs_cn/data/markdown-pages/zh/tidb/master/develop/dev-guide-playground-gitpod.md create mode 100644 test/sync_pr_docs_cn/data/markdown-pages/zh/tidb/master/develop/dev-guide-prepared-statement.md create mode 100644 test/sync_pr_docs_cn/data/markdown-pages/zh/tidb/master/develop/dev-guide-sample-application-golang-gorm.md create mode 100644 test/sync_pr_docs_cn/data/markdown-pages/zh/tidb/master/develop/dev-guide-sample-application-golang-sql-driver.md create mode 100644 test/sync_pr_docs_cn/data/markdown-pages/zh/tidb/master/develop/dev-guide-sample-application-java-hibernate.md create mode 100644 test/sync_pr_docs_cn/data/markdown-pages/zh/tidb/master/develop/dev-guide-sample-application-java-jdbc.md create mode 100644 test/sync_pr_docs_cn/data/markdown-pages/zh/tidb/master/develop/dev-guide-sample-application-java-mybatis.md create mode 100644 test/sync_pr_docs_cn/data/markdown-pages/zh/tidb/master/develop/dev-guide-sample-application-java-spring-boot.md create mode 100644 test/sync_pr_docs_cn/data/markdown-pages/zh/tidb/master/develop/dev-guide-sample-application-python-django.md create mode 100644 test/sync_pr_docs_cn/data/markdown-pages/zh/tidb/master/develop/dev-guide-sample-application-python-mysql-connector.md create mode 100644 test/sync_pr_docs_cn/data/markdown-pages/zh/tidb/master/develop/dev-guide-sample-application-python-mysqlclient.md create mode 100644 test/sync_pr_docs_cn/data/markdown-pages/zh/tidb/master/develop/dev-guide-sample-application-python-peewee.md create mode 100644 test/sync_pr_docs_cn/data/markdown-pages/zh/tidb/master/develop/dev-guide-sample-application-python-pymysql.md create mode 100644 test/sync_pr_docs_cn/data/markdown-pages/zh/tidb/master/develop/dev-guide-sample-application-python-sqlalchemy.md create mode 100644 test/sync_pr_operator/data/markdown-pages/en/tidb-in-kubernetes/master/TOC.md create mode 100644 test/sync_pr_operator/data/markdown-pages/en/tidb-in-kubernetes/master/grant-permissions-to-remote-storage.md create mode 100644 test/sync_pr_operator/data/markdown-pages/en/tidb-in-kubernetes/master/releases/release-1.5.0.md create mode 100644 test/sync_pr_operator/data/markdown-pages/en/tidb-in-kubernetes/master/tidb-operator-overview.md create mode 100644 test/sync_pr_operator/data/markdown-pages/en/tidb-in-kubernetes/master/whats-new-in-v1.5.md create mode 100644 test/sync_pr_operator/data/markdown-pages/zh/tidb-in-kubernetes/master/TOC.md create mode 100644 test/sync_pr_operator/data/markdown-pages/zh/tidb-in-kubernetes/master/grant-permissions-to-remote-storage.md create mode 100644 test/sync_pr_operator/data/markdown-pages/zh/tidb-in-kubernetes/master/releases/release-1.5.0.md create mode 100644 test/sync_pr_operator/data/markdown-pages/zh/tidb-in-kubernetes/master/tidb-operator-overview.md create mode 100644 test/sync_pr_operator/data/markdown-pages/zh/tidb-in-kubernetes/master/whats-new-in-v1.5.md diff --git a/sync_pr.sh b/sync_pr.sh new file mode 100755 index 00000000..482974ab --- /dev/null +++ b/sync_pr.sh @@ -0,0 +1,162 @@ +#!/bin/bash + +# Synchronize the content of a PR to the markdown-pages folder to deploy a preview website. + +# Usage: ./sync_pr.sh [BRANCH_NAME] + +# BRANCH_NAME is optional and defaults to the current branch name. +# The branch name should follow the pattern r"preview(-cloud|-operator)?/pingcap/docs(-cn|-tidb-operator)?/[0-9]+". +# Examples: +# preview/pingcap/docs/1234: sync pingcap/docs/pull/1234 to markdown-pages/en/tidb/{PR_BASE_BRANCH} +# preview/pingcap/docs-cn/1234: sync pingcap/docs-cn/pull/1234 to markdown-pages/zh/tidb/{PR_BASE_BRANCH} +# preview-cloud/pingcap/docs/1234: sync pingcap/docs/pull/1234 to markdown-pages/en/tidbcloud/{PR_BASE_BRANCH} +# preview-operator/pingcap/docs-tidb-operator/1234: sync pingcap/docs-tidb-operator/pull/1234 to markdown-pages/en/tidb-in-kubernetes/{PR_BASE_BRANCH} and markdown-pages/zh/tidb-in-kubernetes/{PR_BASE_BRANCH} + +# Prerequisites: +# 1. Install jq +# 2. Set the GITHUB_TOKEN environment variable + +set -e + +test -n "$TEST" && set -x + +check_prerequisites() { + # Verify if jq is installed and GITHUB_TOKEN is set. + which jq &>/dev/null || (echo "Error: jq is required but not installed. You can download and install jq from ." && exit 1) + + test -n "$TEST" && set +x + + test -n "$GITHUB_TOKEN" || (echo "Error: GITHUB_TOKEN (repo scope) is required but not set." && exit 1) + + test -n "$TEST" && set -x +} + +get_pr_base_branch() { + # Get the base branch of a PR using GitHub API + test -n "$TEST" && set +x + + BASE_BRANCH=$(curl -fsSL -H "Authorization: token $GITHUB_TOKEN" \ + "https://api.github.com/repos/$REPO_OWNER/$REPO_NAME/pulls/$PR_NUMBER" | + jq -r '.base.ref') + + test -n "$TEST" && set -x + + # Ensure that BASE_BRANCH is not empty + test -n "$BASE_BRANCH" || (echo "Error: Cannot get BASE_BRANCH." && exit 1) + +} + +get_destination_suffix() { + # Determine the product name based on PREVIEW_PRODUCT. + case "$PREVIEW_PRODUCT" in + preview) + DIR_SUFFIX="tidb/${BASE_BRANCH}" + ;; + preview-cloud) + DIR_SUFFIX="tidbcloud/master" + IS_CLOUD=true + ;; + preview-operator) + DIR_SUFFIX="tidb-in-kubernetes/${BASE_BRANCH}" + ;; + *) + echo "Error: Branch name must start with preview/, preview-cloud/, or preview-operator/." + exit 1 + ;; + esac +} + +generate_sync_tasks() { + # Define sync tasks for different repositories. + case "$REPO_NAME" in + docs) + # Sync all modified or added files from the root dir to markdown-pages/en/. + SYNC_TASKS=("./,en/") + ;; + docs-cn) + # sync all modified or added files from the root dir to markdown-pages/zh/. + SYNC_TASKS=("./,zh/") + ;; + docs-tidb-operator) + # Task 1: sync all modified or added files from en/ to markdown-pages/en/. + # Task 2: sync all modified or added files from zh/ to markdown-pages/zh/. + SYNC_TASKS=("en/,en/" "zh/,zh/") + ;; + *) + echo "Error: Invalid repo name. Only docs, docs-cn, and docs-tidb-operator are supported." + exit 1 + ;; + esac +} + +clone_repo() { + + # Clone repo if it doesn't exist already. + test -e "$REPO_DIR/.git" || git clone "https://github.com/$REPO_OWNER/$REPO_NAME.git" "$REPO_DIR" + # --update-head-ok: By default git fetch refuses to update the head which corresponds to the current branch. This flag disables the check. This is purely for the internal use for git pull to communicate with git fetch, and unless you are implementing your own Porcelain you are not supposed to use it. + # use --force to overwrite local branch when remote branch is force pushed. + git -C "$REPO_DIR" fetch origin "$BASE_BRANCH" # + git -C "$REPO_DIR" fetch origin pull/"$PR_NUMBER"/head:PR-"$PR_NUMBER" --update-head-ok --force + git -C "$REPO_DIR" checkout PR-"$PR_NUMBER" +} + +process_cloud_toc() { + DIR=$1 + mv "$DIR/TOC-tidb-cloud.md" "$DIR/TOC.md" +} + +perform_sync_task() { + generate_sync_tasks + # Perform sync tasks. + for TASK in "${SYNC_TASKS[@]}"; do + + SRC_DIR="$REPO_DIR/$(echo "$TASK" | cut -d',' -f1)" + DEST_DIR="markdown-pages/$(echo "$TASK" | cut -d',' -f2)/$DIR_SUFFIX" + mkdir -p "$DEST_DIR" + # Only sync modified or added files. + git -C "$SRC_DIR" diff --merge-base --name-only --diff-filter=AMR origin/"$BASE_BRANCH" --relative | tee /dev/fd/2 | + rsync -av --files-from=- "$SRC_DIR" "$DEST_DIR" + + if [[ "$IS_CLOUD" && -f "$DEST_DIR/TOC-tidb-cloud.md" ]]; then + process_cloud_toc "$DEST_DIR" + fi + + done + +} + +commit_changes() { + # Exit if TEST is set and not empty. + test -n "$TEST" && echo "Test mode, exiting..." && exit 0 + # Handle untracked files. + git add . + # Commit changes, if any. + git commit -m "$COMMIT_MESS" || echo "No changes to commit" +} + +# Get the directory of this script. +SCRIPT_DIR=$(cd -- "$(dirname -- "${BASH_SOURCE[0]}")" &>/dev/null && pwd) +cd "$SCRIPT_DIR" + +check_prerequisites + +# If the branch name is not provided as an argument, use the current branch. +BRANCH_NAME=${1:-$(git branch --show-current)} + +# Extract product, repo owner, repo name, and PR number from the branch name. +PREVIEW_PRODUCT=$(echo "$BRANCH_NAME" | cut -d'/' -f1) +REPO_OWNER=$(echo "$BRANCH_NAME" | cut -d'/' -f2) +REPO_NAME=$(echo "$BRANCH_NAME" | cut -d'/' -f3) +PR_NUMBER=$(echo "$BRANCH_NAME" | cut -d'/' -f4) +REPO_DIR="temp/$REPO_NAME" + +get_pr_base_branch +get_destination_suffix +clone_repo +perform_sync_task + +# Get the current commit SHA +CURRENT_COMMIT=$(git -C "$REPO_DIR" rev-parse HEAD) +COMMIT_MESS="Preview PR https://github.com/$REPO_OWNER/$REPO_NAME/pull/$PR_NUMBER and this preview is triggered from commit https://github.com/$REPO_OWNER/$REPO_NAME/pull/$PR_NUMBER/commits/$CURRENT_COMMIT" + +commit_changes diff --git a/test/sync_pr_cloud/data/markdown-pages/en/tidbcloud/master/TOC.md b/test/sync_pr_cloud/data/markdown-pages/en/tidbcloud/master/TOC.md new file mode 100644 index 00000000..3e740e1a --- /dev/null +++ b/test/sync_pr_cloud/data/markdown-pages/en/tidbcloud/master/TOC.md @@ -0,0 +1,420 @@ + + + +- [Docs Home](https://docs.pingcap.com/) +- About TiDB Cloud + - [Why TiDB Cloud](/tidb-cloud/tidb-cloud-intro.md) + - [Architecture](/tidb-cloud/tidb-cloud-intro.md#architecture) + - [High Availability](/tidb-cloud/high-availability-with-multi-az.md) + - [MySQL Compatibility](/mysql-compatibility.md) +- Get Started + - [Try Out TiDB Cloud](/tidb-cloud/tidb-cloud-quickstart.md) + - [Try Out HTAP](/tidb-cloud/tidb-cloud-htap-quickstart.md) + - [Perform a PoC](/tidb-cloud/tidb-cloud-poc.md) +- Develop Applications + - [Overview](/develop/dev-guide-overview.md) + - Quick Start + - [Build a TiDB Developer Cluster](/develop/dev-guide-build-cluster-in-cloud.md) + - [CRUD SQL in TiDB](/develop/dev-guide-tidb-crud-sql.md) + - Build a Simple CRUD App with TiDB + - [Java](/develop/dev-guide-sample-application-java.md) + - [Golang](/develop/dev-guide-sample-application-golang.md) + - Example Applications + - [Build a TiDB Application using Spring Boot](/develop/dev-guide-sample-application-spring-boot.md) + - Connect to TiDB + - [Choose Driver or ORM](/develop/dev-guide-choose-driver-or-orm.md) + - [Connect to TiDB](/develop/dev-guide-connect-to-tidb.md) + - [Connection Pools and Connection Parameters](/develop/dev-guide-connection-parameters.md) + - Design Database Schema + - [Overview](/develop/dev-guide-schema-design-overview.md) + - [Create a Database](/develop/dev-guide-create-database.md) + - [Create a Table](/develop/dev-guide-create-table.md) + - [Create a Secondary Index](/develop/dev-guide-create-secondary-indexes.md) + - Write Data + - [Insert Data](/develop/dev-guide-insert-data.md) + - [Update Data](/develop/dev-guide-update-data.md) + - [Delete Data](/develop/dev-guide-delete-data.md) + - [Prepared Statements](/develop/dev-guide-prepared-statement.md) + - Read Data + - [Query Data from a Single Table](/develop/dev-guide-get-data-from-single-table.md) + - [Multi-table Join Queries](/develop/dev-guide-join-tables.md) + - [Subquery](/develop/dev-guide-use-subqueries.md) + - [Paginate Results](/develop/dev-guide-paginate-results.md) + - [Views](/develop/dev-guide-use-views.md) + - [Temporary Tables](/develop/dev-guide-use-temporary-tables.md) + - [Common Table Expression](/develop/dev-guide-use-common-table-expression.md) + - Read Replica Data + - [Follower Read](/develop/dev-guide-use-follower-read.md) + - [Stale Read](/develop/dev-guide-use-stale-read.md) + - [HTAP Queries](/develop/dev-guide-hybrid-oltp-and-olap-queries.md) + - Transaction + - [Overview](/develop/dev-guide-transaction-overview.md) + - [Optimistic and Pessimistic Transactions](/develop/dev-guide-optimistic-and-pessimistic-transaction.md) + - [Transaction Restraints](/develop/dev-guide-transaction-restraints.md) + - [Handle Transaction Errors](/develop/dev-guide-transaction-troubleshoot.md) + - Optimize + - [Overview](/develop/dev-guide-optimize-sql-overview.md) + - [SQL Performance Tuning](/develop/dev-guide-optimize-sql.md) + - [Best Practices for Performance Tuning](/develop/dev-guide-optimize-sql-best-practices.md) + - [Best Practices for Indexing](/develop/dev-guide-index-best-practice.md) + - Other Optimization Methods + - [Avoid Implicit Type Conversions](/develop/dev-guide-implicit-type-conversion.md) + - [Unique Serial Number Generation](/develop/dev-guide-unique-serial-number-generation.md) + - Troubleshoot + - [SQL or Transaction Issues](/develop/dev-guide-troubleshoot-overview.md) + - [Unstable Result Set](/develop/dev-guide-unstable-result-set.md) + - [Timeouts](/develop/dev-guide-timeouts-in-tidb.md) + - Reference + - [Bookshop Example Application](/develop/dev-guide-bookshop-schema-design.md) + - Guidelines + - [Object Naming Convention](/develop/dev-guide-object-naming-guidelines.md) + - [SQL Development Specifications](/develop/dev-guide-sql-development-specification.md) + - Cloud Native Development Environment + - [Gitpod](/develop/dev-guide-playground-gitpod.md) +- Manage Cluster + - Plan Your Cluster + - [Select Your Cluster Tier](/tidb-cloud/select-cluster-tier.md) + - [Determine Your TiDB Size](/tidb-cloud/size-your-cluster.md) + - [Create a TiDB Cluster](/tidb-cloud/create-tidb-cluster.md) + - Connect to Your TiDB Cluster + - [Connect via a SQL Client](/tidb-cloud/connect-to-tidb-cluster.md) + - [Connect via SQL Shell](/tidb-cloud/connect-to-tidb-cluster.md#connect-via-sql-shell) + - [Set Up VPC Peering Connections](/tidb-cloud/set-up-vpc-peering-connections.md) + - Use an HTAP Cluster with TiFlash + - [TiFlash Overview](/tiflash/tiflash-overview.md) + - [Create TiFlash Replicas](/tiflash/create-tiflash-replicas.md) + - [Read Data from TiFlash](/tiflash/use-tidb-to-read-tiflash.md) + - [Use MPP Mode](/tiflash/use-tiflash-mpp-mode.md) + - [Supported Push-down Calculations](/tiflash/tiflash-supported-pushdown-calculations.md) + - [Compatibility](/tiflash/tiflash-compatibility.md) + - [Scale a TiDB Cluster](/tidb-cloud/scale-tidb-cluster.md) + - [Upgrade a TiDB Cluster](/tidb-cloud/upgrade-tidb-cluster.md) + - [Delete a TiDB Cluster](/tidb-cloud/delete-tidb-cluster.md) + - [Use TiDB Cloud API (Beta)](/tidb-cloud/api-overview.md) +- Migrate Data + - [Import Sample Data](/tidb-cloud/import-sample-data.md) + - Migrate Data into TiDB + - [Configure Amazon S3 Access and GCS Access](/tidb-cloud/config-s3-and-gcs-access.md) + - [Migrate from MySQL-Compatible Databases](/tidb-cloud/migrate-data-into-tidb.md) + - [Migrate Incremental Data from MySQL-Compatible Databases](/tidb-cloud/migrate-incremental-data-from-mysql.md) + - [Migrate from Amazon Aurora MySQL in Bulk](/tidb-cloud/migrate-from-aurora-bulk-import.md) + - [Import or Migrate from Amazon S3 or GCS to TiDB Cloud](/tidb-cloud/migrate-from-amazon-s3-or-gcs.md) + - [Import CSV Files from Amazon S3 or GCS into TiDB Cloud](/tidb-cloud/import-csv-files.md) + - [Import Apache Parquet Files from Amazon S3 or GCS into TiDB Cloud](/tidb-cloud/import-parquet-files.md) + - [Troubleshoot Access Denied Errors during Data Import from Amazon S3](/tidb-cloud/troubleshoot-import-access-denied-error.md) + - [Export Data from TiDB](/tidb-cloud/export-data-from-tidb-cloud.md) +- Back Up and Restore + - [Automatic Backup](/tidb-cloud/backup-and-restore.md) + - [Manual Backup](/tidb-cloud/backup-and-restore.md#manual-backup) + - [Restore](/tidb-cloud/backup-and-restore.md#restore) +- Monitor and Alert + - [Overview](/tidb-cloud/monitor-tidb-cluster.md) + - [Built-in Monitoring](/tidb-cloud/built-in-monitoring.md) + - [Built-in Alerting](/tidb-cloud/monitor-built-in-alerting.md) + - Third-Party Monitoring Integrations + - [Datadog Integration](/tidb-cloud/monitor-datadog-integration.md) + - [Prometheus and Grafana Integration](/tidb-cloud/monitor-prometheus-and-grafana-integration.md) +- Tune Performance + - [Overview](/tidb-cloud/tidb-cloud-tune-performance-overview.md) + - Analyze Performance + - [Statement Analysis](/tidb-cloud/tune-performance.md) + - [Key Visualizer](/tidb-cloud/tune-performance.md#key-visualizer) + - [Statement Summary Tables](/statement-summary-tables.md) + - SQL Tuning + - [Overview](/tidb-cloud/tidb-cloud-sql-tuning-overview.md) + - Understanding the Query Execution Plan + - [Overview](/explain-overview.md) + - [`EXPLAIN` Walkthrough](/explain-walkthrough.md) + - [Indexes](/explain-indexes.md) + - [Joins](/explain-joins.md) + - [MPP Queries](/explain-mpp.md) + - [Subqueries](/explain-subqueries.md) + - [Aggregation](/explain-aggregation.md) + - [Views](/explain-views.md) + - [Partitions](/explain-partitions.md) + - SQL Optimization Process + - [Overview](/sql-optimization-concepts.md) + - Logic Optimization + - [Overview](/sql-logical-optimization.md) + - [Subquery Related Optimizations](/subquery-optimization.md) + - [Column Pruning](/column-pruning.md) + - [Decorrelation of Correlated Subquery](/correlated-subquery-optimization.md) + - [Eliminate Max/Min](/max-min-eliminate.md) + - [Predicates Push Down](/predicate-push-down.md) + - [Partition Pruning](/partition-pruning.md) + - [TopN and Limit Push Down](/topn-limit-push-down.md) + - [Join Reorder](/join-reorder.md) + - Physical Optimization + - [Overview](/sql-physical-optimization.md) + - [Index Selection](/choose-index.md) + - [Statistics](/statistics.md) + - [Wrong Index Solution](/wrong-index-solution.md) + - [Distinct Optimization](/agg-distinct-optimization.md) + - [Prepare Execution Plan Cache](/sql-prepared-plan-cache.md) + - Control Execution Plans + - [Overview](/control-execution-plan.md) + - [Optimizer Hints](/optimizer-hints.md) + - [SQL Plan Management](/sql-plan-management.md) + - [The Blocklist of Optimization Rules and Expression Pushdown](/blocklist-control-plan.md) + - [TiKV Follower Read](/follower-read.md) + - [Coprocessor Cache](/coprocessor-cache.md) + - Garbage Collection (GC) + - [Overview](/garbage-collection-overview.md) + - [Configuration](/garbage-collection-configuration.md) + - [Tune TiFlash performance](/tiflash/tune-tiflash-performance.md) +- Manage User Access + - [Manage Console User Access](/tidb-cloud/manage-user-access.md) + - [Configure Cluster Security Settings](/tidb-cloud/configure-security-settings.md) +- Billing + - [Node Cost](/tidb-cloud/tidb-cloud-billing.md) + - [Backup Storage Cost](/tidb-cloud/tidb-cloud-billing.md#backup-storage-cost) + - [Data Transfer Cost](/tidb-cloud/tidb-cloud-billing.md#data-transfer-cost) + - [Invoices](/tidb-cloud/tidb-cloud-billing.md#invoices) + - [Billing Details](/tidb-cloud/tidb-cloud-billing.md#billing-details) + - [Credits](/tidb-cloud/tidb-cloud-billing.md#credits) + - [Payment Method Setting](/tidb-cloud/tidb-cloud-billing.md#payment-method) +- Reference + - TiDB Cluster Architecture + - [Overview](/tidb-architecture.md) + - [Storage](/tidb-storage.md) + - [Computing](/tidb-computing.md) + - [Scheduling](/tidb-scheduling.md) + - [TiDB Cloud Cluster Limits and Quotas](/tidb-cloud/limitations-and-quotas.md) + - [TiDB Limitations](/tidb-limitations.md) + - SQL + - [Explore SQL with TiDB](/basic-sql-operations.md) + - SQL Language Structure and Syntax + - Attributes + - [AUTO_INCREMENT](/auto-increment.md) + - [AUTO_RANDOM](/auto-random.md) + - [SHARD_ROW_ID_BITS](/shard-row-id-bits.md) + - [Literal Values](/literal-values.md) + - [Schema Object Names](/schema-object-names.md) + - [Keywords and Reserved Words](/keywords.md) + - [User-Defined Variables](/user-defined-variables.md) + - [Expression Syntax](/expression-syntax.md) + - [Comment Syntax](/comment-syntax.md) + - SQL Statements + - [`ADD COLUMN`](/sql-statements/sql-statement-add-column.md) + - [`ADD INDEX`](/sql-statements/sql-statement-add-index.md) + - [`ADMIN`](/sql-statements/sql-statement-admin.md) + - [`ADMIN CANCEL DDL`](/sql-statements/sql-statement-admin-cancel-ddl.md) + - [`ADMIN CHECKSUM TABLE`](/sql-statements/sql-statement-admin-checksum-table.md) + - [`ADMIN CHECK [TABLE|INDEX]`](/sql-statements/sql-statement-admin-check-table-index.md) + - [`ADMIN SHOW DDL [JOBS|QUERIES]`](/sql-statements/sql-statement-admin-show-ddl.md) + - [`ALTER DATABASE`](/sql-statements/sql-statement-alter-database.md) + - [`ALTER INDEX`](/sql-statements/sql-statement-alter-index.md) + - [`ALTER TABLE`](/sql-statements/sql-statement-alter-table.md) + - [`ALTER TABLE COMPACT`](/sql-statements/sql-statement-alter-table-compact.md) + - [`ALTER USER`](/sql-statements/sql-statement-alter-user.md) + - [`ANALYZE TABLE`](/sql-statements/sql-statement-analyze-table.md) + - [`BATCH`](/sql-statements/sql-statement-batch.md) + - [`BEGIN`](/sql-statements/sql-statement-begin.md) + - [`CHANGE COLUMN`](/sql-statements/sql-statement-change-column.md) + - [`COMMIT`](/sql-statements/sql-statement-commit.md) + - [`CHANGE DRAINER`](/sql-statements/sql-statement-change-drainer.md) + - [`CHANGE PUMP`](/sql-statements/sql-statement-change-pump.md) + - [`CREATE [GLOBAL|SESSION] BINDING`](/sql-statements/sql-statement-create-binding.md) + - [`CREATE DATABASE`](/sql-statements/sql-statement-create-database.md) + - [`CREATE INDEX`](/sql-statements/sql-statement-create-index.md) + - [`CREATE ROLE`](/sql-statements/sql-statement-create-role.md) + - [`CREATE SEQUENCE`](/sql-statements/sql-statement-create-sequence.md) + - [`CREATE TABLE LIKE`](/sql-statements/sql-statement-create-table-like.md) + - [`CREATE TABLE`](/sql-statements/sql-statement-create-table.md) + - [`CREATE USER`](/sql-statements/sql-statement-create-user.md) + - [`CREATE VIEW`](/sql-statements/sql-statement-create-view.md) + - [`DEALLOCATE`](/sql-statements/sql-statement-deallocate.md) + - [`DELETE`](/sql-statements/sql-statement-delete.md) + - [`DESC`](/sql-statements/sql-statement-desc.md) + - [`DESCRIBE`](/sql-statements/sql-statement-describe.md) + - [`DO`](/sql-statements/sql-statement-do.md) + - [`DROP [GLOBAL|SESSION] BINDING`](/sql-statements/sql-statement-drop-binding.md) + - [`DROP COLUMN`](/sql-statements/sql-statement-drop-column.md) + - [`DROP DATABASE`](/sql-statements/sql-statement-drop-database.md) + - [`DROP INDEX`](/sql-statements/sql-statement-drop-index.md) + - [`DROP ROLE`](/sql-statements/sql-statement-drop-role.md) + - [`DROP SEQUENCE`](/sql-statements/sql-statement-drop-sequence.md) + - [`DROP STATS`](/sql-statements/sql-statement-drop-stats.md) + - [`DROP TABLE`](/sql-statements/sql-statement-drop-table.md) + - [`DROP USER`](/sql-statements/sql-statement-drop-user.md) + - [`DROP VIEW`](/sql-statements/sql-statement-drop-view.md) + - [`EXECUTE`](/sql-statements/sql-statement-execute.md) + - [`EXPLAIN ANALYZE`](/sql-statements/sql-statement-explain-analyze.md) + - [`EXPLAIN`](/sql-statements/sql-statement-explain.md) + - [`FLASHBACK TABLE`](/sql-statements/sql-statement-flashback-table.md) + - [`FLUSH PRIVILEGES`](/sql-statements/sql-statement-flush-privileges.md) + - [`FLUSH STATUS`](/sql-statements/sql-statement-flush-status.md) + - [`FLUSH TABLES`](/sql-statements/sql-statement-flush-tables.md) + - [`GRANT `](/sql-statements/sql-statement-grant-privileges.md) + - [`GRANT `](/sql-statements/sql-statement-grant-role.md) + - [`INSERT`](/sql-statements/sql-statement-insert.md) + - [`KILL [TIDB]`](/sql-statements/sql-statement-kill.md) + - [`MODIFY COLUMN`](/sql-statements/sql-statement-modify-column.md) + - [`PREPARE`](/sql-statements/sql-statement-prepare.md) + - [`RECOVER TABLE`](/sql-statements/sql-statement-recover-table.md) + - [`RENAME INDEX`](/sql-statements/sql-statement-rename-index.md) + - [`RENAME TABLE`](/sql-statements/sql-statement-rename-table.md) + - [`REPLACE`](/sql-statements/sql-statement-replace.md) + - [`REVOKE `](/sql-statements/sql-statement-revoke-privileges.md) + - [`REVOKE `](/sql-statements/sql-statement-revoke-role.md) + - [`ROLLBACK`](/sql-statements/sql-statement-rollback.md) + - [`SELECT`](/sql-statements/sql-statement-select.md) + - [`SET DEFAULT ROLE`](/sql-statements/sql-statement-set-default-role.md) + - [`SET [NAMES|CHARACTER SET]`](/sql-statements/sql-statement-set-names.md) + - [`SET PASSWORD`](/sql-statements/sql-statement-set-password.md) + - [`SET ROLE`](/sql-statements/sql-statement-set-role.md) + - [`SET TRANSACTION`](/sql-statements/sql-statement-set-transaction.md) + - [`SET [GLOBAL|SESSION] `](/sql-statements/sql-statement-set-variable.md) + - [`SHOW ANALYZE STATUS`](/sql-statements/sql-statement-show-analyze-status.md) + - [`SHOW [GLOBAL|SESSION] BINDINGS`](/sql-statements/sql-statement-show-bindings.md) + - [`SHOW BUILTINS`](/sql-statements/sql-statement-show-builtins.md) + - [`SHOW CHARACTER SET`](/sql-statements/sql-statement-show-character-set.md) + - [`SHOW COLLATION`](/sql-statements/sql-statement-show-collation.md) + - [`SHOW [FULL] COLUMNS FROM`](/sql-statements/sql-statement-show-columns-from.md) + - [`SHOW CREATE SEQUENCE`](/sql-statements/sql-statement-show-create-sequence.md) + - [`SHOW CREATE TABLE`](/sql-statements/sql-statement-show-create-table.md) + - [`SHOW CREATE USER`](/sql-statements/sql-statement-show-create-user.md) + - [`SHOW DATABASES`](/sql-statements/sql-statement-show-databases.md) + - [`SHOW DRAINER STATUS`](/sql-statements/sql-statement-show-drainer-status.md) + - [`SHOW ENGINES`](/sql-statements/sql-statement-show-engines.md) + - [`SHOW ERRORS`](/sql-statements/sql-statement-show-errors.md) + - [`SHOW [FULL] FIELDS FROM`](/sql-statements/sql-statement-show-fields-from.md) + - [`SHOW GRANTS`](/sql-statements/sql-statement-show-grants.md) + - [`SHOW INDEX [FROM|IN]`](/sql-statements/sql-statement-show-index.md) + - [`SHOW INDEXES [FROM|IN]`](/sql-statements/sql-statement-show-indexes.md) + - [`SHOW KEYS [FROM|IN]`](/sql-statements/sql-statement-show-keys.md) + - [`SHOW MASTER STATUS`](/sql-statements/sql-statement-show-master-status.md) + - [`SHOW PLUGINS`](/sql-statements/sql-statement-show-plugins.md) + - [`SHOW PRIVILEGES`](/sql-statements/sql-statement-show-privileges.md) + - [`SHOW [FULL] PROCESSSLIST`](/sql-statements/sql-statement-show-processlist.md) + - [`SHOW PROFILES`](/sql-statements/sql-statement-show-profiles.md) + - [`SHOW PUMP STATUS`](/sql-statements/sql-statement-show-pump-status.md) + - [`SHOW SCHEMAS`](/sql-statements/sql-statement-show-schemas.md) + - [`SHOW STATS_HEALTHY`](/sql-statements/sql-statement-show-stats-healthy.md) + - [`SHOW STATS_HISTOGRAMS`](/sql-statements/sql-statement-show-histograms.md) + - [`SHOW STATS_META`](/sql-statements/sql-statement-show-stats-meta.md) + - [`SHOW STATUS`](/sql-statements/sql-statement-show-status.md) + - [`SHOW TABLE NEXT_ROW_ID`](/sql-statements/sql-statement-show-table-next-rowid.md) + - [`SHOW TABLE REGIONS`](/sql-statements/sql-statement-show-table-regions.md) + - [`SHOW TABLE STATUS`](/sql-statements/sql-statement-show-table-status.md) + - [`SHOW [FULL] TABLES`](/sql-statements/sql-statement-show-tables.md) + - [`SHOW [GLOBAL|SESSION] VARIABLES`](/sql-statements/sql-statement-show-variables.md) + - [`SHOW WARNINGS`](/sql-statements/sql-statement-show-warnings.md) + - [`SHUTDOWN`](/sql-statements/sql-statement-shutdown.md) + - [`SPLIT REGION`](/sql-statements/sql-statement-split-region.md) + - [`START TRANSACTION`](/sql-statements/sql-statement-start-transaction.md) + - [`TABLE`](/sql-statements/sql-statement-table.md) + - [`TRACE`](/sql-statements/sql-statement-trace.md) + - [`TRUNCATE`](/sql-statements/sql-statement-truncate.md) + - [`UPDATE`](/sql-statements/sql-statement-update.md) + - [`USE`](/sql-statements/sql-statement-use.md) + - [`WITH`](/sql-statements/sql-statement-with.md) + - Data Types + - [Overview](/data-type-overview.md) + - [Default Values](/data-type-default-values.md) + - [Numeric Types](/data-type-numeric.md) + - [Date and Time Types](/data-type-date-and-time.md) + - [String Types](/data-type-string.md) + - [JSON Type](/data-type-json.md) + - Functions and Operators + - [Overview](/functions-and-operators/functions-and-operators-overview.md) + - [Type Conversion in Expression Evaluation](/functions-and-operators/type-conversion-in-expression-evaluation.md) + - [Operators](/functions-and-operators/operators.md) + - [Control Flow Functions](/functions-and-operators/control-flow-functions.md) + - [String Functions](/functions-and-operators/string-functions.md) + - [Numeric Functions and Operators](/functions-and-operators/numeric-functions-and-operators.md) + - [Date and Time Functions](/functions-and-operators/date-and-time-functions.md) + - [Bit Functions and Operators](/functions-and-operators/bit-functions-and-operators.md) + - [Cast Functions and Operators](/functions-and-operators/cast-functions-and-operators.md) + - [Encryption and Compression Functions](/functions-and-operators/encryption-and-compression-functions.md) + - [Locking Functions](/functions-and-operators/locking-functions.md) + - [Information Functions](/functions-and-operators/information-functions.md) + - [JSON Functions](/functions-and-operators/json-functions.md) + - [Aggregate (GROUP BY) Functions](/functions-and-operators/aggregate-group-by-functions.md) + - [Window Functions](/functions-and-operators/window-functions.md) + - [Miscellaneous Functions](/functions-and-operators/miscellaneous-functions.md) + - [Precision Math](/functions-and-operators/precision-math.md) + - [Set Operations](/functions-and-operators/set-operators.md) + - [List of Expressions for Pushdown](/functions-and-operators/expressions-pushed-down.md) + - [TiDB Specific Functions](/functions-and-operators/tidb-functions.md) + - [Clustered Indexes](/clustered-indexes.md) + - [Constraints](/constraints.md) + - [Generated Columns](/generated-columns.md) + - [SQL Mode](/sql-mode.md) + - [Table Attributes](/table-attributes.md) + - Transactions + - [Overview](/transaction-overview.md) + - [Isolation Levels](/transaction-isolation-levels.md) + - [Optimistic Transactions](/optimistic-transaction.md) + - [Pessimistic Transactions](/pessimistic-transaction.md) + - [Non-Transactional DML Statements](/non-transactional-dml.md) + - [Views](/views.md) + - [Partitioning](/partitioned-table.md) + - [Temporary Tables](/temporary-tables.md) + - [Cached Tables](/cached-tables.md) + - Character Set and Collation + - [Overview](/character-set-and-collation.md) + - [GBK](/character-set-gbk.md) + - Read Historical Data + - Use Stale Read (Recommended) + - [Usage Scenarios of Stale Read](/stale-read.md) + - [Perform Stale Read Using `As OF TIMESTAMP`](/as-of-timestamp.md) + - [Perform Stale Read Using `tidb_read_staleness`](/tidb-read-staleness.md) + - [Use the `tidb_snapshot` System Variable](/read-historical-data.md) + - System Tables + - [`mysql`](/mysql-schema.md) + - INFORMATION_SCHEMA + - [Overview](/information-schema/information-schema.md) + - [`ANALYZE_STATUS`](/information-schema/information-schema-analyze-status.md) + - [`CLIENT_ERRORS_SUMMARY_BY_HOST`](/information-schema/client-errors-summary-by-host.md) + - [`CLIENT_ERRORS_SUMMARY_BY_USER`](/information-schema/client-errors-summary-by-user.md) + - [`CLIENT_ERRORS_SUMMARY_GLOBAL`](/information-schema/client-errors-summary-global.md) + - [`CHARACTER_SETS`](/information-schema/information-schema-character-sets.md) + - [`CLUSTER_INFO`](/information-schema/information-schema-cluster-info.md) + - [`COLLATIONS`](/information-schema/information-schema-collations.md) + - [`COLLATION_CHARACTER_SET_APPLICABILITY`](/information-schema/information-schema-collation-character-set-applicability.md) + - [`COLUMNS`](/information-schema/information-schema-columns.md) + - [`DATA_LOCK_WAITS`](/information-schema/information-schema-data-lock-waits.md) + - [`DDL_JOBS`](/information-schema/information-schema-ddl-jobs.md) + - [`DEADLOCKS`](/information-schema/information-schema-deadlocks.md) + - [`ENGINES`](/information-schema/information-schema-engines.md) + - [`KEY_COLUMN_USAGE`](/information-schema/information-schema-key-column-usage.md) + - [`PARTITIONS`](/information-schema/information-schema-partitions.md) + - [`PROCESSLIST`](/information-schema/information-schema-processlist.md) + - [`REFERENTIAL_CONSTRAINTS`](/information-schema/information-schema-referential-constraints.md) + - [`SCHEMATA`](/information-schema/information-schema-schemata.md) + - [`SEQUENCES`](/information-schema/information-schema-sequences.md) + - [`SESSION_VARIABLES`](/information-schema/information-schema-session-variables.md) + - [`SLOW_QUERY`](/information-schema/information-schema-slow-query.md) + - [`STATISTICS`](/information-schema/information-schema-statistics.md) + - [`TABLES`](/information-schema/information-schema-tables.md) + - [`TABLE_CONSTRAINTS`](/information-schema/information-schema-table-constraints.md) + - [`TABLE_STORAGE_STATS`](/information-schema/information-schema-table-storage-stats.md) + - [`TIDB_HOT_REGIONS_HISTORY`](/information-schema/information-schema-tidb-hot-regions-history.md) + - [`TIDB_INDEXES`](/information-schema/information-schema-tidb-indexes.md) + - [`TIDB_SERVERS_INFO`](/information-schema/information-schema-tidb-servers-info.md) + - [`TIDB_TRX`](/information-schema/information-schema-tidb-trx.md) + - [`TIFLASH_REPLICA`](/information-schema/information-schema-tiflash-replica.md) + - [`TIKV_REGION_PEERS`](/information-schema/information-schema-tikv-region-peers.md) + - [`TIKV_REGION_STATUS`](/information-schema/information-schema-tikv-region-status.md) + - [`TIKV_STORE_STATUS`](/information-schema/information-schema-tikv-store-status.md) + - [`USER_PRIVILEGES`](/information-schema/information-schema-user-privileges.md) + - [`VIEWS`](/information-schema/information-schema-views.md) + - [System Variables](/system-variables.md) + - [API Reference](https://docs.pingcap.com/tidbcloud/api/v1beta) + - Storage Engines + - TiKV + - [TiKV Overview](/tikv-overview.md) + - [RocksDB Overview](/storage-engine/rocksdb-overview.md) + - TiFlash + - [TiFlash Overview](/tiflash/tiflash-overview.md) + - [Dumpling](/dumpling-overview.md) + - [Table Filter](/table-filter.md) + - [Troubleshoot Inconsistency Between Data and Indexes](/troubleshoot-data-inconsistency-errors.md) +- [FAQs](/tidb-cloud/tidb-cloud-faq.md) +- Release Notes + - [2022](/tidb-cloud/release-notes-2022.md) + - [2021](/tidb-cloud/release-notes-2021.md) + - [2020](/tidb-cloud/release-notes-2020.md) +- [Support](/tidb-cloud/tidb-cloud-support.md) +- [Glossary](/tidb-cloud/tidb-cloud-glossary.md) diff --git a/test/sync_pr_cloud/data/markdown-pages/en/tidbcloud/master/tidb-cloud/api-overview.md b/test/sync_pr_cloud/data/markdown-pages/en/tidbcloud/master/tidb-cloud/api-overview.md new file mode 100644 index 00000000..7416ae07 --- /dev/null +++ b/test/sync_pr_cloud/data/markdown-pages/en/tidbcloud/master/tidb-cloud/api-overview.md @@ -0,0 +1,36 @@ +--- +title: TiDB Cloud API Overview +summary: Learn about what is TiDB Cloud API, its features, and how to use API to manage your TiDB Cloud clusters. +--- + +# TiDB Cloud API Overview Beta + +> **Note:** +> +> TiDB Cloud API is still in beta and only available upon request. You can apply for API access by submitting a request: +> +> - Click **Help** in the lower-right corner of TiDB Cloud console. +> - In the dialog, fill in "Apply for TiDB Cloud API" in the **Description** field and click **Send**. +> +> You will receive an email for notification when the API is available for you. + +The TiDB Cloud API is a [REST interface](https://en.wikipedia.org/wiki/Representational_state_transfer) that provides you with programmatic access to manage administrative objects within TiDB Cloud. Through this API, you can manage resources automatically and efficiently: + +* Projects +* Clusters +* Backups +* Restores + +The API has the following features: + +- **JSON entities.** All entities are expressed in JSON. +- **HTTPS-only.** You can only access the API via HTTPS, ensuring all the data sent over the network is encrypted with TLS. +- **Key-based access and digest authentication.** Before you access TiDB Cloud API, you must generate an API key. All requests are authenticated through [HTTP Digest Authentication](https://en.wikipedia.org/wiki/Digest_access_authentication), ensuring the API key is never sent over the network. + +To start using TiDB Cloud API, refer to the following resources: + +- [Get Started](https://docs.pingcap.com/tidbcloud/api/v1beta#section/Get-Started) +- [Authentication](https://docs.pingcap.com/tidbcloud/api/v1beta#section/Authentication) +- [Rate Limiting](https://docs.pingcap.com/tidbcloud/api/v1beta#section/Rate-Limiting) +- [API Full References](https://docs.pingcap.com/tidbcloud/api/v1beta#tag/Project) +- [Changelog](https://docs.pingcap.com/tidbcloud/api/v1beta#section/API-Changelog) diff --git a/test/sync_pr_docs/data/markdown-pages/en/tidb/release-6.5/TOC-tidb-cloud.md b/test/sync_pr_docs/data/markdown-pages/en/tidb/release-6.5/TOC-tidb-cloud.md new file mode 100644 index 00000000..c070db03 --- /dev/null +++ b/test/sync_pr_docs/data/markdown-pages/en/tidb/release-6.5/TOC-tidb-cloud.md @@ -0,0 +1,541 @@ + + + +- [Docs Home](https://docs.pingcap.com/) +- About TiDB Cloud + - [Why TiDB Cloud](/tidb-cloud/tidb-cloud-intro.md) + - [Architecture](/tidb-cloud/tidb-cloud-intro.md#architecture) + - [High Availability](/tidb-cloud/high-availability-with-multi-az.md) + - [MySQL Compatibility](/mysql-compatibility.md) + - [Roadmap](/tidb-cloud/tidb-cloud-roadmap.md) +- Get Started + - [Try Out TiDB Cloud](/tidb-cloud/tidb-cloud-quickstart.md) + - [Try Out HTAP](/tidb-cloud/tidb-cloud-htap-quickstart.md) + - [Try Out TiDB Cloud CLI](/tidb-cloud/get-started-with-cli.md) + - [Perform a PoC](/tidb-cloud/tidb-cloud-poc.md) +- Develop Applications + - [Overview](/develop/dev-guide-overview.md) + - Quick Start + - [Build a TiDB Cluster in TiDB Cloud (Serverless Tier)](/develop/dev-guide-build-cluster-in-cloud.md) + - [CRUD SQL in TiDB](/develop/dev-guide-tidb-crud-sql.md) + - Connect to TiDB Cloud + - [Choose Driver or ORM](/develop/dev-guide-choose-driver-or-orm.md) + - Java + - [JDBC](/develop/dev-guide-sample-application-java-jdbc.md) + - [MyBatis](/develop/dev-guide-sample-application-java-mybatis.md) + - [Hibernate](/develop/dev-guide-sample-application-java-hibernate.md) + - [Spring Boot](/develop/dev-guide-sample-application-java-spring-boot.md) + - [Connection Pools and Connection Parameters](/develop/dev-guide-connection-parameters.md) + - Go + - [Go-MySQL-Driver](/develop/dev-guide-sample-application-golang-sql-driver.md) + - [GORM](/develop/dev-guide-sample-application-golang-gorm.md) + - Python + - [mysqlclient](/develop/dev-guide-sample-application-python-mysqlclient.md) + - [MySQL Connect/Python](/develop/dev-guide-sample-application-python-mysql-connector.md) + - [PyMySQL](/develop/dev-guide-sample-application-python-pymysql.md) + - [SQLAlchemy](/develop/dev-guide-sample-application-python-sqlalchemy.md) + - [peewee](/develop/dev-guide-sample-application-python-peewee.md) + - Third-Party Support + - [Third-Party Tools Supported by TiDB](/develop/dev-guide-third-party-support.md) + - [Known Incompatibility Issues with Third-Party Tools](/develop/dev-guide-third-party-tools-compatibility.md) + - Development Reference + - Design Database Schema + - [Overview](/develop/dev-guide-schema-design-overview.md) + - [Create a Database](/develop/dev-guide-create-database.md) + - [Create a Table](/develop/dev-guide-create-table.md) + - [Create a Secondary Index](/develop/dev-guide-create-secondary-indexes.md) + - Write Data + - [Insert Data](/develop/dev-guide-insert-data.md) + - [Update Data](/develop/dev-guide-update-data.md) + - [Delete Data](/develop/dev-guide-delete-data.md) + - [Periodically Delete Expired Data Using TTL (Time to Live)](/time-to-live.md) + - [Prepared Statements](/develop/dev-guide-prepared-statement.md) + - Read Data + - [Query Data from a Single Table](/develop/dev-guide-get-data-from-single-table.md) + - [Multi-Table Join Queries](/develop/dev-guide-join-tables.md) + - [Subquery](/develop/dev-guide-use-subqueries.md) + - [Paginate Results](/develop/dev-guide-paginate-results.md) + - [Views](/develop/dev-guide-use-views.md) + - [Temporary Tables](/develop/dev-guide-use-temporary-tables.md) + - [Common Table Expression](/develop/dev-guide-use-common-table-expression.md) + - Read Replica Data + - [Follower Read](/develop/dev-guide-use-follower-read.md) + - [Stale Read](/develop/dev-guide-use-stale-read.md) + - [HTAP Queries](/develop/dev-guide-hybrid-oltp-and-olap-queries.md) + - [FastScan](/develop/dev-guide-use-fastscan.md) + - Transaction + - [Overview](/develop/dev-guide-transaction-overview.md) + - [Optimistic and Pessimistic Transactions](/develop/dev-guide-optimistic-and-pessimistic-transaction.md) + - [Transaction Restraints](/develop/dev-guide-transaction-restraints.md) + - [Handle Transaction Errors](/develop/dev-guide-transaction-troubleshoot.md) + - Optimize + - [Overview](/develop/dev-guide-optimize-sql-overview.md) + - [SQL Performance Tuning](/develop/dev-guide-optimize-sql.md) + - [Best Practices for Performance Tuning](/develop/dev-guide-optimize-sql-best-practices.md) + - [Best Practices for Indexing](/develop/dev-guide-index-best-practice.md) + - Other Optimization Methods + - [Avoid Implicit Type Conversions](/develop/dev-guide-implicit-type-conversion.md) + - [Unique Serial Number Generation](/develop/dev-guide-unique-serial-number-generation.md) + - Troubleshoot + - [SQL or Transaction Issues](/develop/dev-guide-troubleshoot-overview.md) + - [Unstable Result Set](/develop/dev-guide-unstable-result-set.md) + - [Timeouts](/develop/dev-guide-timeouts-in-tidb.md) + - Development Guidelines + - [Object Naming Convention](/develop/dev-guide-object-naming-guidelines.md) + - [SQL Development Specifications](/develop/dev-guide-sql-development-specification.md) + - [Bookshop Example Application](/develop/dev-guide-bookshop-schema-design.md) +- Manage Cluster + - Plan Your Cluster + - [Select Your Cluster Tier](/tidb-cloud/select-cluster-tier.md) + - [Determine Your TiDB Size](/tidb-cloud/size-your-cluster.md) + - [TiDB Cloud Performance Reference](/tidb-cloud/tidb-cloud-performance-reference.md) + - [Create a TiDB Cluster](/tidb-cloud/create-tidb-cluster.md) + - Connect to Your TiDB Cluster + - [Connection Method Overview](/tidb-cloud/connect-to-tidb-cluster.md) + - [Connect via Standard Connection](/tidb-cloud/connect-via-standard-connection.md) + - [Connect via Private Endpoint](/tidb-cloud/set-up-private-endpoint-connections.md) + - [Connect via VPC Peering](/tidb-cloud/set-up-vpc-peering-connections.md) + - [Connect via SQL Shell](/tidb-cloud/connect-via-sql-shell.md) + - Use an HTAP Cluster with TiFlash + - [TiFlash Overview](/tiflash/tiflash-overview.md) + - [Create TiFlash Replicas](/tiflash/create-tiflash-replicas.md) + - [Read Data from TiFlash](/tiflash/use-tidb-to-read-tiflash.md) + - [Use MPP Mode](/tiflash/use-tiflash-mpp-mode.md) + - [Supported Push-down Calculations](/tiflash/tiflash-supported-pushdown-calculations.md) + - [TiFlash Query Result Materialization](/tiflash/tiflash-results-materialization.md) + - [Compatibility](/tiflash/tiflash-compatibility.md) + - [Scale a TiDB Cluster](/tidb-cloud/scale-tidb-cluster.md) + - [Pause or Resume a TiDB Cluster](/tidb-cloud/pause-or-resume-tidb-cluster.md) + - [Upgrade a TiDB Cluster](/tidb-cloud/upgrade-tidb-cluster.md) + - [Delete a TiDB Cluster](/tidb-cloud/delete-tidb-cluster.md) +- Migrate or Import Data + - [Overview](/tidb-cloud/tidb-cloud-migration-overview.md) + - Migrate Data into TiDB Cloud + - [Migrate from MySQL-Compatible Databases Using Data Migration](/tidb-cloud/migrate-from-mysql-using-data-migration.md) + - [Migrate and Merge MySQL Shards of Large Datasets](/tidb-cloud/migrate-sql-shards.md) + - [Migrate from On-Premises TiDB to TiDB Cloud](/tidb-cloud/migrate-from-op-tidb.md) + - [Migrate from MySQL-Compatible Databases Using AWS DMS](/tidb-cloud/migrate-from-mysql-using-aws-dms.md) + - [Migrate from Amazon RDS for Oracle Using AWS DMS](/tidb-cloud/migrate-from-oracle-using-aws-dms.md) + - Import Data into TiDB Cloud + - [Import Local Files](/tidb-cloud/tidb-cloud-import-local-files.md) + - [Import Sample Data (SQL File)](/tidb-cloud/import-sample-data.md) + - [Import CSV Files from Amazon S3 or GCS](/tidb-cloud/import-csv-files.md) + - [Import Apache Parquet Files from Amazon S3 or GCS](/tidb-cloud/import-parquet-files.md) + - [Export Data from TiDB](/tidb-cloud/export-data-from-tidb-cloud.md) + - Reference + - [Configure Amazon S3 Access and GCS Access](/tidb-cloud/config-s3-and-gcs-access.md) + - [Naming Conventions for Data Import](/tidb-cloud/naming-conventions-for-data-import.md) + - [CSV Configurations for Importing Data](/tidb-cloud/csv-config-for-import-data.md) + - [Troubleshoot Access Denied Errors during Data Import from Amazon S3](/tidb-cloud/troubleshoot-import-access-denied-error.md) + - [Precheck Errors, Migration Errors, and Alerts for Data Migration](/tidb-cloud/tidb-cloud-dm-precheck-and-troubleshooting.md) +- Explore Data + - [Chat2Query (Beta)](/tidb-cloud/explore-data-with-chat2query.md) +- Data Service (Beta) + - [Overview](/tidb-cloud/data-service-overview.md) + - [Get Started](/tidb-cloud/data-service-get-started.md) + - [Try Out Chat2Query API](/tidb-cloud/use-chat2query-api.md) + - [Manage Data App](/tidb-cloud/data-service-manage-data-app.md) + - [Manage Endpoint](/tidb-cloud/data-service-manage-endpoint.md) + - [API Key](/tidb-cloud/data-service-api-key.md) + - [Response and Status Code](/tidb-cloud/data-service-response-and-status-code.md) +- Stream Data + - [Changefeed Overview](/tidb-cloud/changefeed-overview.md) + - [To MySQL Sink](/tidb-cloud/changefeed-sink-to-mysql.md) + - [To Kafka Sink](/tidb-cloud/changefeed-sink-to-apache-kafka.md) +- Back Up and Restore + - [Automatic Backup](/tidb-cloud/backup-and-restore.md) + - [Manual Backup](/tidb-cloud/backup-and-restore.md#manual-backup) + - [Restore](/tidb-cloud/backup-and-restore.md#restore) +- Monitor and Alert + - [Overview](/tidb-cloud/monitor-tidb-cluster.md) + - [Built-in Monitoring](/tidb-cloud/built-in-monitoring.md) + - [Built-in Alerting](/tidb-cloud/monitor-built-in-alerting.md) + - [Cluster Events](/tidb-cloud/tidb-cloud-events.md)  + - [Third-Party Monitoring Integrations](/tidb-cloud/third-party-monitoring-integrations.md) +- Tune Performance + - [Overview](/tidb-cloud/tidb-cloud-tune-performance-overview.md) + - Analyze Performance + - [Use the Diagnosis Tab](/tidb-cloud/tune-performance.md) + - [Use Statement Summary Tables](/statement-summary-tables.md) + - SQL Tuning + - [Overview](/tidb-cloud/tidb-cloud-sql-tuning-overview.md) + - Understanding the Query Execution Plan + - [Overview](/explain-overview.md) + - [`EXPLAIN` Walkthrough](/explain-walkthrough.md) + - [Indexes](/explain-indexes.md) + - [Joins](/explain-joins.md) + - [MPP Queries](/explain-mpp.md) + - [Subqueries](/explain-subqueries.md) + - [Aggregation](/explain-aggregation.md) + - [Views](/explain-views.md) + - [Partitions](/explain-partitions.md) + - [Index Merge](/explain-index-merge.md) + - SQL Optimization Process + - [Overview](/sql-optimization-concepts.md) + - Logic Optimization + - [Overview](/sql-logical-optimization.md) + - [Subquery Related Optimizations](/subquery-optimization.md) + - [Column Pruning](/column-pruning.md) + - [Decorrelation of Correlated Subquery](/correlated-subquery-optimization.md) + - [Eliminate Max/Min](/max-min-eliminate.md) + - [Predicates Push Down](/predicate-push-down.md) + - [Partition Pruning](/partition-pruning.md) + - [TopN and Limit Push Down](/topn-limit-push-down.md) + - [Join Reorder](/join-reorder.md) + - Physical Optimization + - [Overview](/sql-physical-optimization.md) + - [Index Selection](/choose-index.md) + - [Statistics](/statistics.md) + - [Extended Statistics](/extended-statistics.md) + - [Wrong Index Solution](/wrong-index-solution.md) + - [Distinct Optimization](/agg-distinct-optimization.md) + - [Cost Model](/cost-model.md) + - [Prepare Execution Plan Cache](/sql-prepared-plan-cache.md) + - Control Execution Plans + - [Overview](/control-execution-plan.md) + - [Optimizer Hints](/optimizer-hints.md) + - [SQL Plan Management](/sql-plan-management.md) + - [The Blocklist of Optimization Rules and Expression Pushdown](/blocklist-control-plan.md) + - [TiKV Follower Read](/follower-read.md) + - [Coprocessor Cache](/coprocessor-cache.md) + - Garbage Collection (GC) + - [Overview](/garbage-collection-overview.md) + - [Configuration](/garbage-collection-configuration.md) + - [Tune TiFlash Performance](/tiflash/tune-tiflash-performance.md) +- Security + - Identity Access Control + - [Password Authentication](/tidb-cloud/tidb-cloud-password-authentication.md) + - [SSO Authentication](/tidb-cloud/tidb-cloud-sso-authentication.md) + - [Identity Access Management](/tidb-cloud/manage-user-access.md) + - Network Access Control + - [Configure an IP Access List](/tidb-cloud/configure-ip-access-list.md) + - [Connect via Private Endpoint](/tidb-cloud/set-up-private-endpoint-connections.md) + - [Connect via VPC Peering](/tidb-cloud/set-up-vpc-peering-connections.md) + - [TLS Connections to Serverless Tier](/tidb-cloud/secure-connections-to-serverless-tier-clusters.md) + - [TLS Connections to Dedicated Tier](/tidb-cloud/tidb-cloud-tls-connect-to-dedicated-tier.md) + - Database Access Control + - [Configure Cluster Security Settings](/tidb-cloud/configure-security-settings.md) + - Audit Management + - [Database Audit Logging](/tidb-cloud/tidb-cloud-auditing.md) + - [Console Audit Logging](/tidb-cloud/tidb-cloud-console-auditing.md) +- Billing + - [Invoices](/tidb-cloud/tidb-cloud-billing.md#invoices) + - [Billing Details](/tidb-cloud/tidb-cloud-billing.md#billing-details) + - [Credits](/tidb-cloud/tidb-cloud-billing.md#credits) + - [Payment Method Setting](/tidb-cloud/tidb-cloud-billing.md#payment-method) + - [Billing from AWS or GCP Marketplace](/tidb-cloud/tidb-cloud-billing.md#billing-from-aws-marketplace-or-google-cloud-marketplace) + - [Billing for Changefeeed](/tidb-cloud/tidb-cloud-billing-ticdc-rcu.md) + - [Billing for Data Migration](/tidb-cloud/tidb-cloud-billing-dm.md) +- API + - [API Overview](/tidb-cloud/api-overview.md) + - [API Reference](https://docs.pingcap.com/tidbcloud/api/v1beta) +- Integrations + - [Airbyte](/tidb-cloud/integrate-tidbcloud-with-airbyte.md) + - [Amazon AppFlow](/develop/dev-guide-aws-appflow-integration.md) + - [Cloudflare](/tidb-cloud/integrate-tidbcloud-with-cloudflare.md) + - [Datadog](/tidb-cloud/monitor-datadog-integration.md) + - [dbt](/tidb-cloud/integrate-tidbcloud-with-dbt.md) + - [Gitpod](/develop/dev-guide-playground-gitpod.md) + - [n8n](/tidb-cloud/integrate-tidbcloud-with-n8n.md) + - [Netlify](/tidb-cloud/integrate-tidbcloud-with-netlify.md) + - [Prometheus and Grafana](/tidb-cloud/monitor-prometheus-and-grafana-integration.md) + - [ProxySQL](/develop/dev-guide-proxysql-integration.md) + - Terraform + - [Terraform Integration Overview](/tidb-cloud/terraform-tidbcloud-provider-overview.md) + - [Get TiDB Cloud Terraform Provider](/tidb-cloud/terraform-get-tidbcloud-provider.md) + - [Use Cluster Resource](/tidb-cloud/terraform-use-cluster-resource.md) + - [Use Backup Resource](/tidb-cloud/terraform-use-backup-resource.md) + - [Use Restore Resource](/tidb-cloud/terraform-use-restore-resource.md) + - [Use Import Resource](/tidb-cloud/terraform-use-import-resource.md) + - [Vercel](/tidb-cloud/integrate-tidbcloud-with-vercel.md) + - [Zapier](/tidb-cloud/integrate-tidbcloud-with-zapier.md) +- Reference + - TiDB Cluster Architecture + - [Overview](/tidb-architecture.md) + - [Storage](/tidb-storage.md) + - [Computing](/tidb-computing.md) + - [Scheduling](/tidb-scheduling.md) + - [Dedicated Tier Limitations and Quotas](/tidb-cloud/limitations-and-quotas.md) + - [Serverless Tier Limitations](/tidb-cloud/serverless-tier-limitations.md) + - [TiDB Limitations](/tidb-limitations.md) + - SQL + - [Explore SQL with TiDB](/basic-sql-operations.md) + - SQL Language Structure and Syntax + - Attributes + - [AUTO_INCREMENT](/auto-increment.md) + - [AUTO_RANDOM](/auto-random.md) + - [SHARD_ROW_ID_BITS](/shard-row-id-bits.md) + - [Literal Values](/literal-values.md) + - [Schema Object Names](/schema-object-names.md) + - [Keywords and Reserved Words](/keywords.md) + - [User-Defined Variables](/user-defined-variables.md) + - [Expression Syntax](/expression-syntax.md) + - [Comment Syntax](/comment-syntax.md) + - SQL Statements + - [`ADD COLUMN`](/sql-statements/sql-statement-add-column.md) + - [`ADD INDEX`](/sql-statements/sql-statement-add-index.md) + - [`ADMIN`](/sql-statements/sql-statement-admin.md) + - [`ADMIN CANCEL DDL`](/sql-statements/sql-statement-admin-cancel-ddl.md) + - [`ADMIN CHECKSUM TABLE`](/sql-statements/sql-statement-admin-checksum-table.md) + - [`ADMIN CHECK [TABLE|INDEX]`](/sql-statements/sql-statement-admin-check-table-index.md) + - [`ADMIN SHOW DDL [JOBS|JOB QUERIES]`](/sql-statements/sql-statement-admin-show-ddl.md) + - [`ALTER DATABASE`](/sql-statements/sql-statement-alter-database.md) + - [`ALTER INDEX`](/sql-statements/sql-statement-alter-index.md) + - [`ALTER TABLE`](/sql-statements/sql-statement-alter-table.md) + - [`ALTER TABLE COMPACT`](/sql-statements/sql-statement-alter-table-compact.md) + - [`ALTER USER`](/sql-statements/sql-statement-alter-user.md) + - [`ANALYZE TABLE`](/sql-statements/sql-statement-analyze-table.md) + - [`BATCH`](/sql-statements/sql-statement-batch.md) + - [`BEGIN`](/sql-statements/sql-statement-begin.md) + - [`CHANGE COLUMN`](/sql-statements/sql-statement-change-column.md) + - [`COMMIT`](/sql-statements/sql-statement-commit.md) + - [`CHANGE DRAINER`](/sql-statements/sql-statement-change-drainer.md) + - [`CHANGE PUMP`](/sql-statements/sql-statement-change-pump.md) + - [`CREATE [GLOBAL|SESSION] BINDING`](/sql-statements/sql-statement-create-binding.md) + - [`CREATE DATABASE`](/sql-statements/sql-statement-create-database.md) + - [`CREATE INDEX`](/sql-statements/sql-statement-create-index.md) + - [`CREATE ROLE`](/sql-statements/sql-statement-create-role.md) + - [`CREATE SEQUENCE`](/sql-statements/sql-statement-create-sequence.md) + - [`CREATE TABLE LIKE`](/sql-statements/sql-statement-create-table-like.md) + - [`CREATE TABLE`](/sql-statements/sql-statement-create-table.md) + - [`CREATE USER`](/sql-statements/sql-statement-create-user.md) + - [`CREATE VIEW`](/sql-statements/sql-statement-create-view.md) + - [`DEALLOCATE`](/sql-statements/sql-statement-deallocate.md) + - [`DELETE`](/sql-statements/sql-statement-delete.md) + - [`DESC`](/sql-statements/sql-statement-desc.md) + - [`DESCRIBE`](/sql-statements/sql-statement-describe.md) + - [`DO`](/sql-statements/sql-statement-do.md) + - [`DROP [GLOBAL|SESSION] BINDING`](/sql-statements/sql-statement-drop-binding.md) + - [`DROP COLUMN`](/sql-statements/sql-statement-drop-column.md) + - [`DROP DATABASE`](/sql-statements/sql-statement-drop-database.md) + - [`DROP INDEX`](/sql-statements/sql-statement-drop-index.md) + - [`DROP ROLE`](/sql-statements/sql-statement-drop-role.md) + - [`DROP SEQUENCE`](/sql-statements/sql-statement-drop-sequence.md) + - [`DROP STATS`](/sql-statements/sql-statement-drop-stats.md) + - [`DROP TABLE`](/sql-statements/sql-statement-drop-table.md) + - [`DROP USER`](/sql-statements/sql-statement-drop-user.md) + - [`DROP VIEW`](/sql-statements/sql-statement-drop-view.md) + - [`EXECUTE`](/sql-statements/sql-statement-execute.md) + - [`EXPLAIN ANALYZE`](/sql-statements/sql-statement-explain-analyze.md) + - [`EXPLAIN`](/sql-statements/sql-statement-explain.md) + - [`FLASHBACK CLUSTER TO TIMESTAMP`](/sql-statements/sql-statement-flashback-to-timestamp.md) + - [`FLASHBACK DATABASE`](/sql-statements/sql-statement-flashback-database.md) + - [`FLASHBACK TABLE`](/sql-statements/sql-statement-flashback-table.md) + - [`FLUSH PRIVILEGES`](/sql-statements/sql-statement-flush-privileges.md) + - [`FLUSH STATUS`](/sql-statements/sql-statement-flush-status.md) + - [`FLUSH TABLES`](/sql-statements/sql-statement-flush-tables.md) + - [`GRANT `](/sql-statements/sql-statement-grant-privileges.md) + - [`GRANT `](/sql-statements/sql-statement-grant-role.md) + - [`INSERT`](/sql-statements/sql-statement-insert.md) + - [`KILL [TIDB]`](/sql-statements/sql-statement-kill.md) + - [`LOCK STATS`](/sql-statements/sql-statement-lock-stats.md) + - [`MODIFY COLUMN`](/sql-statements/sql-statement-modify-column.md) + - [`PREPARE`](/sql-statements/sql-statement-prepare.md) + - [`RECOVER TABLE`](/sql-statements/sql-statement-recover-table.md) + - [`RENAME INDEX`](/sql-statements/sql-statement-rename-index.md) + - [`RENAME TABLE`](/sql-statements/sql-statement-rename-table.md) + - [`RENAME USER`](/sql-statements/sql-statement-rename-user.md) + - [`REPLACE`](/sql-statements/sql-statement-replace.md) + - [`REVOKE `](/sql-statements/sql-statement-revoke-privileges.md) + - [`REVOKE `](/sql-statements/sql-statement-revoke-role.md) + - [`ROLLBACK`](/sql-statements/sql-statement-rollback.md) + - [`SAVEPOINT`](/sql-statements/sql-statement-savepoint.md) + - [`SELECT`](/sql-statements/sql-statement-select.md) + - [`SET DEFAULT ROLE`](/sql-statements/sql-statement-set-default-role.md) + - [`SET [NAMES|CHARACTER SET]`](/sql-statements/sql-statement-set-names.md) + - [`SET PASSWORD`](/sql-statements/sql-statement-set-password.md) + - [`SET ROLE`](/sql-statements/sql-statement-set-role.md) + - [`SET TRANSACTION`](/sql-statements/sql-statement-set-transaction.md) + - [`SET [GLOBAL|SESSION] `](/sql-statements/sql-statement-set-variable.md) + - [`SHOW ANALYZE STATUS`](/sql-statements/sql-statement-show-analyze-status.md) + - [`SHOW [GLOBAL|SESSION] BINDINGS`](/sql-statements/sql-statement-show-bindings.md) + - [`SHOW BUILTINS`](/sql-statements/sql-statement-show-builtins.md) + - [`SHOW CHARACTER SET`](/sql-statements/sql-statement-show-character-set.md) + - [`SHOW COLLATION`](/sql-statements/sql-statement-show-collation.md) + - [`SHOW [FULL] COLUMNS FROM`](/sql-statements/sql-statement-show-columns-from.md) + - [`SHOW CREATE DATABASE`](/sql-statements/sql-statement-show-create-database.md) + - [`SHOW CREATE SEQUENCE`](/sql-statements/sql-statement-show-create-sequence.md) + - [`SHOW CREATE TABLE`](/sql-statements/sql-statement-show-create-table.md) + - [`SHOW CREATE USER`](/sql-statements/sql-statement-show-create-user.md) + - [`SHOW DATABASES`](/sql-statements/sql-statement-show-databases.md) + - [`SHOW DRAINER STATUS`](/sql-statements/sql-statement-show-drainer-status.md) + - [`SHOW ENGINES`](/sql-statements/sql-statement-show-engines.md) + - [`SHOW ERRORS`](/sql-statements/sql-statement-show-errors.md) + - [`SHOW [FULL] FIELDS FROM`](/sql-statements/sql-statement-show-fields-from.md) + - [`SHOW GRANTS`](/sql-statements/sql-statement-show-grants.md) + - [`SHOW INDEX [FROM|IN]`](/sql-statements/sql-statement-show-index.md) + - [`SHOW INDEXES [FROM|IN]`](/sql-statements/sql-statement-show-indexes.md) + - [`SHOW KEYS [FROM|IN]`](/sql-statements/sql-statement-show-keys.md) + - [`SHOW MASTER STATUS`](/sql-statements/sql-statement-show-master-status.md) + - [`SHOW PLUGINS`](/sql-statements/sql-statement-show-plugins.md) + - [`SHOW PRIVILEGES`](/sql-statements/sql-statement-show-privileges.md) + - [`SHOW [FULL] PROCESSSLIST`](/sql-statements/sql-statement-show-processlist.md) + - [`SHOW PROFILES`](/sql-statements/sql-statement-show-profiles.md) + - [`SHOW PUMP STATUS`](/sql-statements/sql-statement-show-pump-status.md) + - [`SHOW SCHEMAS`](/sql-statements/sql-statement-show-schemas.md) + - [`SHOW STATS_HEALTHY`](/sql-statements/sql-statement-show-stats-healthy.md) + - [`SHOW STATS_HISTOGRAMS`](/sql-statements/sql-statement-show-histograms.md) + - [`SHOW STATS_LOCKED`](/sql-statements/sql-statement-show-stats-locked.md) + - [`SHOW STATS_META`](/sql-statements/sql-statement-show-stats-meta.md) + - [`SHOW STATUS`](/sql-statements/sql-statement-show-status.md) + - [`SHOW TABLE NEXT_ROW_ID`](/sql-statements/sql-statement-show-table-next-rowid.md) + - [`SHOW TABLE REGIONS`](/sql-statements/sql-statement-show-table-regions.md) + - [`SHOW TABLE STATUS`](/sql-statements/sql-statement-show-table-status.md) + - [`SHOW [FULL] TABLES`](/sql-statements/sql-statement-show-tables.md) + - [`SHOW [GLOBAL|SESSION] VARIABLES`](/sql-statements/sql-statement-show-variables.md) + - [`SHOW WARNINGS`](/sql-statements/sql-statement-show-warnings.md) + - [`SHUTDOWN`](/sql-statements/sql-statement-shutdown.md) + - [`SPLIT REGION`](/sql-statements/sql-statement-split-region.md) + - [`START TRANSACTION`](/sql-statements/sql-statement-start-transaction.md) + - [`TABLE`](/sql-statements/sql-statement-table.md) + - [`TRACE`](/sql-statements/sql-statement-trace.md) + - [`TRUNCATE`](/sql-statements/sql-statement-truncate.md) + - [`UNLOCK STATS`](/sql-statements/sql-statement-unlock-stats.md) + - [`UPDATE`](/sql-statements/sql-statement-update.md) + - [`USE`](/sql-statements/sql-statement-use.md) + - [`WITH`](/sql-statements/sql-statement-with.md) + - Data Types + - [Overview](/data-type-overview.md) + - [Default Values](/data-type-default-values.md) + - [Numeric Types](/data-type-numeric.md) + - [Date and Time Types](/data-type-date-and-time.md) + - [String Types](/data-type-string.md) + - [JSON Type](/data-type-json.md) + - Functions and Operators + - [Overview](/functions-and-operators/functions-and-operators-overview.md) + - [Type Conversion in Expression Evaluation](/functions-and-operators/type-conversion-in-expression-evaluation.md) + - [Operators](/functions-and-operators/operators.md) + - [Control Flow Functions](/functions-and-operators/control-flow-functions.md) + - [String Functions](/functions-and-operators/string-functions.md) + - [Numeric Functions and Operators](/functions-and-operators/numeric-functions-and-operators.md) + - [Date and Time Functions](/functions-and-operators/date-and-time-functions.md) + - [Bit Functions and Operators](/functions-and-operators/bit-functions-and-operators.md) + - [Cast Functions and Operators](/functions-and-operators/cast-functions-and-operators.md) + - [Encryption and Compression Functions](/functions-and-operators/encryption-and-compression-functions.md) + - [Locking Functions](/functions-and-operators/locking-functions.md) + - [Information Functions](/functions-and-operators/information-functions.md) + - [JSON Functions](/functions-and-operators/json-functions.md) + - [Aggregate (GROUP BY) Functions](/functions-and-operators/aggregate-group-by-functions.md) + - [Window Functions](/functions-and-operators/window-functions.md) + - [Miscellaneous Functions](/functions-and-operators/miscellaneous-functions.md) + - [Precision Math](/functions-and-operators/precision-math.md) + - [Set Operations](/functions-and-operators/set-operators.md) + - [List of Expressions for Pushdown](/functions-and-operators/expressions-pushed-down.md) + - [TiDB Specific Functions](/functions-and-operators/tidb-functions.md) + - [Clustered Indexes](/clustered-indexes.md) + - [Constraints](/constraints.md) + - [Generated Columns](/generated-columns.md) + - [SQL Mode](/sql-mode.md) + - [Table Attributes](/table-attributes.md) + - Transactions + - [Overview](/transaction-overview.md) + - [Isolation Levels](/transaction-isolation-levels.md) + - [Optimistic Transactions](/optimistic-transaction.md) + - [Pessimistic Transactions](/pessimistic-transaction.md) + - [Non-Transactional DML Statements](/non-transactional-dml.md) + - [Views](/views.md) + - [Partitioning](/partitioned-table.md) + - [Temporary Tables](/temporary-tables.md) + - [Cached Tables](/cached-tables.md) + - Character Set and Collation + - [Overview](/character-set-and-collation.md) + - [GBK](/character-set-gbk.md) + - Read Historical Data + - Use Stale Read (Recommended) + - [Usage Scenarios of Stale Read](/stale-read.md) + - [Perform Stale Read Using `As OF TIMESTAMP`](/as-of-timestamp.md) + - [Perform Stale Read Using `tidb_read_staleness`](/tidb-read-staleness.md) + - [Perform Stale Read Using `tidb_external_ts`](/tidb-external-ts.md) + - [Use the `tidb_snapshot` System Variable](/read-historical-data.md) + - System Tables + - [`mysql`](/mysql-schema.md) + - INFORMATION_SCHEMA + - [Overview](/information-schema/information-schema.md) + - [`ANALYZE_STATUS`](/information-schema/information-schema-analyze-status.md) + - [`CLIENT_ERRORS_SUMMARY_BY_HOST`](/information-schema/client-errors-summary-by-host.md) + - [`CLIENT_ERRORS_SUMMARY_BY_USER`](/information-schema/client-errors-summary-by-user.md) + - [`CLIENT_ERRORS_SUMMARY_GLOBAL`](/information-schema/client-errors-summary-global.md) + - [`CHARACTER_SETS`](/information-schema/information-schema-character-sets.md) + - [`CLUSTER_INFO`](/information-schema/information-schema-cluster-info.md) + - [`COLLATIONS`](/information-schema/information-schema-collations.md) + - [`COLLATION_CHARACTER_SET_APPLICABILITY`](/information-schema/information-schema-collation-character-set-applicability.md) + - [`COLUMNS`](/information-schema/information-schema-columns.md) + - [`DATA_LOCK_WAITS`](/information-schema/information-schema-data-lock-waits.md) + - [`DDL_JOBS`](/information-schema/information-schema-ddl-jobs.md) + - [`DEADLOCKS`](/information-schema/information-schema-deadlocks.md) + - [`ENGINES`](/information-schema/information-schema-engines.md) + - [`KEY_COLUMN_USAGE`](/information-schema/information-schema-key-column-usage.md) + - [`MEMORY_USAGE`](/information-schema/information-schema-memory-usage.md) + - [`MEMORY_USAGE_OPS_HISTORY`](/information-schema/information-schema-memory-usage-ops-history.md) + - [`PARTITIONS`](/information-schema/information-schema-partitions.md) + - [`PROCESSLIST`](/information-schema/information-schema-processlist.md) + - [`REFERENTIAL_CONSTRAINTS`](/information-schema/information-schema-referential-constraints.md) + - [`SCHEMATA`](/information-schema/information-schema-schemata.md) + - [`SEQUENCES`](/information-schema/information-schema-sequences.md) + - [`SESSION_VARIABLES`](/information-schema/information-schema-session-variables.md) + - [`SLOW_QUERY`](/information-schema/information-schema-slow-query.md) + - [`STATISTICS`](/information-schema/information-schema-statistics.md) + - [`TABLES`](/information-schema/information-schema-tables.md) + - [`TABLE_CONSTRAINTS`](/information-schema/information-schema-table-constraints.md) + - [`TABLE_STORAGE_STATS`](/information-schema/information-schema-table-storage-stats.md) + - [`TIDB_HOT_REGIONS_HISTORY`](/information-schema/information-schema-tidb-hot-regions-history.md) + - [`TIDB_INDEXES`](/information-schema/information-schema-tidb-indexes.md) + - [`TIDB_SERVERS_INFO`](/information-schema/information-schema-tidb-servers-info.md) + - [`TIDB_TRX`](/information-schema/information-schema-tidb-trx.md) + - [`TIFLASH_REPLICA`](/information-schema/information-schema-tiflash-replica.md) + - [`TIKV_REGION_PEERS`](/information-schema/information-schema-tikv-region-peers.md) + - [`TIKV_REGION_STATUS`](/information-schema/information-schema-tikv-region-status.md) + - [`TIKV_STORE_STATUS`](/information-schema/information-schema-tikv-store-status.md) + - [`USER_ATTRIBUTES`](/information-schema/information-schema-user-attributes.md) + - [`USER_PRIVILEGES`](/information-schema/information-schema-user-privileges.md) + - [`VARIABLES_INFO`](/information-schema/information-schema-variables-info.md) + - [`VIEWS`](/information-schema/information-schema-views.md) + - [Metadata Lock](/metadata-lock.md) + - [Use UUIDs](/best-practices/uuid.md) + - [System Variables](/system-variables.md) + - Storage Engines + - TiKV + - [TiKV Overview](/tikv-overview.md) + - [RocksDB Overview](/storage-engine/rocksdb-overview.md) + - TiFlash + - [TiFlash Overview](/tiflash/tiflash-overview.md) + - CLI + - [Overview](/tidb-cloud/cli-reference.md) + - cluster + - [create](/tidb-cloud/ticloud-cluster-create.md) + - [delete](/tidb-cloud/ticloud-cluster-delete.md) + - [describe](/tidb-cloud/ticloud-cluster-describe.md) + - [list](/tidb-cloud/ticloud-cluster-list.md) + - [connect-info](/tidb-cloud/ticloud-cluster-connect-info.md) + - config + - [create](/tidb-cloud/ticloud-config-create.md) + - [delete](/tidb-cloud/ticloud-config-delete.md) + - [describe](/tidb-cloud/ticloud-config-describe.md) + - [edit](/tidb-cloud/ticloud-config-edit.md) + - [list](/tidb-cloud/ticloud-config-list.md) + - [set](/tidb-cloud/ticloud-config-set.md) + - [use](/tidb-cloud/ticloud-config-use.md) + - [connect](/tidb-cloud/ticloud-connect.md) + - import + - [cancel](/tidb-cloud/ticloud-import-cancel.md) + - [describe](/tidb-cloud/ticloud-import-describe.md) + - [list](/tidb-cloud/ticloud-import-list.md) + - start + - [local](/tidb-cloud/ticloud-import-start-local.md) + - [s3](/tidb-cloud/ticloud-import-start-s3.md) + - project + - [list](/tidb-cloud/ticloud-project-list.md) + - [update](/tidb-cloud/ticloud-update.md) + - [Dumpling](/dumpling-overview.md) + - [Table Filter](/table-filter.md) + - [Troubleshoot Inconsistency Between Data and Indexes](/troubleshoot-data-inconsistency-errors.md) +- FAQs + - [TiDB Cloud FAQs](/tidb-cloud/tidb-cloud-faq.md) + - [Serverless Tier FAQs](/tidb-cloud/serverless-tier-faqs.md) +- Release Notes + - [2023](/tidb-cloud/tidb-cloud-release-notes.md) + - [2022](/tidb-cloud/release-notes-2022.md) + - [2021](/tidb-cloud/release-notes-2021.md) + - [2020](/tidb-cloud/release-notes-2020.md) +- [Support](/tidb-cloud/tidb-cloud-support.md) +- [Glossary](/tidb-cloud/tidb-cloud-glossary.md) diff --git a/test/sync_pr_docs/data/markdown-pages/en/tidb/release-6.5/TOC.md b/test/sync_pr_docs/data/markdown-pages/en/tidb/release-6.5/TOC.md new file mode 100644 index 00000000..247903ec --- /dev/null +++ b/test/sync_pr_docs/data/markdown-pages/en/tidb/release-6.5/TOC.md @@ -0,0 +1,1123 @@ + + + +- [Docs Home](https://docs.pingcap.com/) +- About TiDB + - [TiDB Introduction](/overview.md) + - [TiDB 6.5 Release Notes](/releases/release-6.5.0.md) + - [Features](/basic-features.md) + - [MySQL Compatibility](/mysql-compatibility.md) + - [TiDB Limitations](/tidb-limitations.md) + - [Credits](/credits.md) +- Quick Start + - [Try Out TiDB](/quick-start-with-tidb.md) + - [Try Out HTAP](/quick-start-with-htap.md) + - [Learn TiDB SQL](/basic-sql-operations.md) + - [Learn HTAP](/explore-htap.md) + - [Import Example Database](/import-example-data.md) +- Develop + - [Overview](/develop/dev-guide-overview.md) + - Quick Start + - [Build a TiDB Cluster in TiDB Cloud (Serverless Tier)](/develop/dev-guide-build-cluster-in-cloud.md) + - [CRUD SQL in TiDB](/develop/dev-guide-tidb-crud-sql.md) + - Example Applications + - Java + - [JDBC](/develop/dev-guide-sample-application-java-jdbc.md) + - [MyBatis](/develop/dev-guide-sample-application-java-mybatis.md) + - [Hibernate](/develop/dev-guide-sample-application-java-hibernate.md) + - [Spring Boot](/develop/dev-guide-sample-application-java-spring-boot.md) + - Go + - [Go-MySQL-Driver](/develop/dev-guide-sample-application-golang-sql-driver.md) + - [GORM](/develop/dev-guide-sample-application-golang-gorm.md) + - Python + - [mysqlclient](/develop/dev-guide-sample-application-python-mysqlclient.md) + - [MySQL Connect/Python](/develop/dev-guide-sample-application-python-mysql-connector.md) + - [PyMySQL](/develop/dev-guide-sample-application-python-pymysql.md) + - [SQLAlchemy](/develop/dev-guide-sample-application-python-sqlalchemy.md) + - [peewee](/develop/dev-guide-sample-application-python-peewee.md) + - Connect to TiDB + - [Choose Driver or ORM](/develop/dev-guide-choose-driver-or-orm.md) + - [Connect to TiDB](/develop/dev-guide-connect-to-tidb.md) + - [Connection Pools and Connection Parameters](/develop/dev-guide-connection-parameters.md) + - Design Database Schema + - [Overview](/develop/dev-guide-schema-design-overview.md) + - [Create a Database](/develop/dev-guide-create-database.md) + - [Create a Table](/develop/dev-guide-create-table.md) + - [Create a Secondary Index](/develop/dev-guide-create-secondary-indexes.md) + - Write Data + - [Insert Data](/develop/dev-guide-insert-data.md) + - [Update Data](/develop/dev-guide-update-data.md) + - [Delete Data](/develop/dev-guide-delete-data.md) + - [Periodically Delete Data Using Time to Live](/time-to-live.md) + - [Prepared Statements](/develop/dev-guide-prepared-statement.md) + - Read Data + - [Query Data from a Single Table](/develop/dev-guide-get-data-from-single-table.md) + - [Multi-table Join Queries](/develop/dev-guide-join-tables.md) + - [Subquery](/develop/dev-guide-use-subqueries.md) + - [Paginate Results](/develop/dev-guide-paginate-results.md) + - [Views](/develop/dev-guide-use-views.md) + - [Temporary Tables](/develop/dev-guide-use-temporary-tables.md) + - [Common Table Expression](/develop/dev-guide-use-common-table-expression.md) + - Read Replica Data + - [Follower Read](/develop/dev-guide-use-follower-read.md) + - [Stale Read](/develop/dev-guide-use-stale-read.md) + - [HTAP Queries](/develop/dev-guide-hybrid-oltp-and-olap-queries.md) + - [FastScan](/develop/dev-guide-use-fastscan.md) + - Transaction + - [Overview](/develop/dev-guide-transaction-overview.md) + - [Optimistic and Pessimistic Transactions](/develop/dev-guide-optimistic-and-pessimistic-transaction.md) + - [Transaction Restraints](/develop/dev-guide-transaction-restraints.md) + - [Handle Transaction Errors](/develop/dev-guide-transaction-troubleshoot.md) + - Optimize + - [Overview](/develop/dev-guide-optimize-sql-overview.md) + - [SQL Performance Tuning](/develop/dev-guide-optimize-sql.md) + - [Best Practices for Performance Tuning](/develop/dev-guide-optimize-sql-best-practices.md) + - [Best Practices for Indexing](/develop/dev-guide-index-best-practice.md) + - Other Optimization Methods + - [Avoid Implicit Type Conversions](/develop/dev-guide-implicit-type-conversion.md) + - [Unique Serial Number Generation](/develop/dev-guide-unique-serial-number-generation.md) + - Troubleshoot + - [SQL or Transaction Issues](/develop/dev-guide-troubleshoot-overview.md) + - [Unstable Result Set](/develop/dev-guide-unstable-result-set.md) + - [Timeouts](/develop/dev-guide-timeouts-in-tidb.md) + - Reference + - [Bookshop Example Application](/develop/dev-guide-bookshop-schema-design.md) + - Guidelines + - [Object Naming Convention](/develop/dev-guide-object-naming-guidelines.md) + - [SQL Development Specifications](/develop/dev-guide-sql-development-specification.md) + - Legacy Docs + - [For Django](/develop/dev-guide-outdated-for-django.md) + - Cloud Native Development Environment + - [Gitpod](/develop/dev-guide-playground-gitpod.md) + - Third-Party Support + - [Third-Party Tools Supported by TiDB](/develop/dev-guide-third-party-support.md) + - [Known Incompatibility Issues with Third-Party Tools](/develop/dev-guide-third-party-tools-compatibility.md) + - [ProxySQL Integration Guide](/develop/dev-guide-proxysql-integration.md) + - [Amazon AppFlow Integration Guide](/develop/dev-guide-aws-appflow-integration.md) +- Deploy + - [Software and Hardware Requirements](/hardware-and-software-requirements.md) + - [Environment Configuration Checklist](/check-before-deployment.md) + - Plan Cluster Topology + - [Minimal Topology](/minimal-deployment-topology.md) + - [TiFlash Topology](/tiflash-deployment-topology.md) + - [TiCDC Topology](/ticdc-deployment-topology.md) + - [TiDB Binlog Topology](/tidb-binlog-deployment-topology.md) + - [TiSpark Topology](/tispark-deployment-topology.md) + - [Cross-DC Topology](/geo-distributed-deployment-topology.md) + - [Hybrid Topology](/hybrid-deployment-topology.md) + - Install and Start + - [Use TiUP](/production-deployment-using-tiup.md) + - [Deploy on Kubernetes](/tidb-in-kubernetes.md) + - [Verify Cluster Status](/post-installation-check.md) + - Test Cluster Performance + - [Test TiDB Using Sysbench](/benchmark/benchmark-tidb-using-sysbench.md) + - [Test TiDB Using TPC-C](/benchmark/benchmark-tidb-using-tpcc.md) + - [Test TiDB Using CH-benCHmark](/benchmark/benchmark-tidb-using-ch.md) +- Migrate + - [Overview](/migration-overview.md) + - [Migration Tools](/migration-tools.md) + - Migration Scenarios + - [Migrate from Aurora](/migrate-aurora-to-tidb.md) + - [Migrate MySQL of Small Datasets](/migrate-small-mysql-to-tidb.md) + - [Migrate MySQL of Large Datasets](/migrate-large-mysql-to-tidb.md) + - [Migrate and Merge MySQL Shards of Small Datasets](/migrate-small-mysql-shards-to-tidb.md) + - [Migrate and Merge MySQL Shards of Large Datasets](/migrate-large-mysql-shards-to-tidb.md) + - [Migrate from CSV Files](/migrate-from-csv-files-to-tidb.md) + - [Migrate from SQL Files](/migrate-from-sql-files-to-tidb.md) + - [Migrate from One TiDB Cluster to Another TiDB Cluster](/migrate-from-tidb-to-tidb.md) + - [Migrate from TiDB to MySQL-compatible Databases](/migrate-from-tidb-to-mysql.md) + - Advanced Migration + - [Continuous Replication with gh-ost or pt-osc](/migrate-with-pt-ghost.md) + - [Migrate to a Downstream Table with More Columns](/migrate-with-more-columns-downstream.md) + - [Filter Binlog Events](/filter-binlog-event.md) + - [Filter DML Events Using SQL Expressions](/filter-dml-event.md) +- Integrate + - [Overview](/integration-overview.md) + - Integration Scenarios + - [Integrate with Confluent and Snowflake](/ticdc/integrate-confluent-using-ticdc.md) + - [Integrate with Apache Kafka and Apache Flink](/replicate-data-to-kafka.md) +- Maintain + - Upgrade + - [Use TiUP](/upgrade-tidb-using-tiup.md) + - [Use TiDB Operator](https://docs.pingcap.com/tidb-in-kubernetes/stable/upgrade-a-tidb-cluster) + - [TiFlash v6.2.0 Upgrade Guide](/tiflash-620-upgrade-guide.md) + - Scale + - [Use TiUP (Recommended)](/scale-tidb-using-tiup.md) + - [Use TiDB Operator](https://docs.pingcap.com/tidb-in-kubernetes/stable/scale-a-tidb-cluster) + - Backup and Restore + - [Overview](/br/backup-and-restore-overview.md) + - Architecture + - [Architecture Overview](/br/backup-and-restore-design.md) + - [Snapshot Backup and Restore Architecture](/br/br-snapshot-architecture.md) + - [Log Backup and PITR Architecture](/br/br-log-architecture.md) + - Use BR + - [Use Overview](/br/br-use-overview.md) + - [Snapshot Backup and Restore Guide](/br/br-snapshot-guide.md) + - [Log Backup and PITR Guide](/br/br-pitr-guide.md) + - [Use Cases](/br/backup-and-restore-use-cases.md) + - [Backup Storages](/br/backup-and-restore-storages.md) + - BR CLI Manuals + - [Overview](/br/use-br-command-line-tool.md) + - [Snapshot Backup and Restore Command Manual](/br/br-snapshot-manual.md) + - [Log Backup and PITR Command Manual](/br/br-pitr-manual.md) + - References + - BR Features + - [Backup Auto-Tune](/br/br-auto-tune.md) + - [Batch Create Table](/br/br-batch-create-table.md) + - [Checkpoint Backup](/br/br-checkpoint.md) + - [Back up and Restore Data Using Dumpling and TiDB Lightning](/backup-and-restore-using-dumpling-lightning.md) + - [Back Up and Restore RawKV](/br/rawkv-backup-and-restore.md) + - [Incremental Backup and Restore](/br/br-incremental-guide.md) + - Cluster Disaster Recovery (DR) + - [DR Solutions Overview](/dr-solution-introduction.md) + - [Primary-Secondary DR](/dr-secondary-cluster.md) + - [Multi-Replica Cluster DR](/dr-multi-replica.md) + - [BR-based DR](/dr-backup-restore.md) + - [Configure Time Zone](/configure-time-zone.md) + - [Daily Checklist](/daily-check.md) + - [Maintain TiFlash](/tiflash/maintain-tiflash.md) + - [Maintain TiDB Using TiUP](/maintain-tidb-using-tiup.md) + - [Modify Configuration Dynamically](/dynamic-config.md) + - [Online Unsafe Recovery](/online-unsafe-recovery.md) + - [Replicate Data Between Primary and Secondary Clusters](/replicate-between-primary-and-secondary-clusters.md) +- Monitor and Alert + - [Monitoring Framework Overview](/tidb-monitoring-framework.md) + - [Monitoring API](/tidb-monitoring-api.md) + - [Deploy Monitoring Services](/deploy-monitoring-services.md) + - [Export Grafana Snapshots](/exporting-grafana-snapshots.md) + - [TiDB Cluster Alert Rules](/alert-rules.md) + - [TiFlash Alert Rules](/tiflash/tiflash-alert-rules.md) + - [Customize Configurations of Monitoring Servers](/tiup/customized-montior-in-tiup-environment.md) + - [BR Monitoring and Alert](/br/br-monitoring-and-alert.md) +- Troubleshoot + - Issue Summary + - [TiDB Troubleshooting Map](/tidb-troubleshooting-map.md) + - [Troubleshoot TiDB Cluster Setup](/troubleshoot-tidb-cluster.md) + - [Troubleshoot TiFlash](/tiflash/troubleshoot-tiflash.md) + - Issue Scenarios + - Slow Queries + - [Identify Slow Queries](/identify-slow-queries.md) + - [Analyze Slow Queries](/analyze-slow-queries.md) + - [TiDB OOM](/troubleshoot-tidb-oom.md) + - [Hotspot](/troubleshoot-hot-spot-issues.md) + - [Increased Read and Write Latency](/troubleshoot-cpu-issues.md) + - [Write Conflicts in Optimistic Transactions](/troubleshoot-write-conflicts.md) + - [High Disk I/O Usage](/troubleshoot-high-disk-io.md) + - [Lock Conflicts](/troubleshoot-lock-conflicts.md) + - [Inconsistency Between Data and Indexes](/troubleshoot-data-inconsistency-errors.md) + - Diagnostic Methods + - [SQL Diagnostics](/information-schema/information-schema-sql-diagnostics.md) + - [Statement Summary Tables](/statement-summary-tables.md) + - [Identify Expensive Queries Using Top SQL](/dashboard/top-sql.md) + - [Identify Expensive Queries Using Logs](/identify-expensive-queries.md) + - [Save and Restore the On-Site Information of a Cluster](/sql-plan-replayer.md) + - [Support Resources](/support.md) +- Performance Tuning + - Tuning Guide + - [Performance Tuning Overview](/performance-tuning-overview.md) + - [Performance Analysis and Tuning](/performance-tuning-methods.md) + - [Performance Tuning Practices for OLTP Scenarios](/performance-tuning-practices.md) + - [Latency Breakdown](/latency-breakdown.md) + - Configuration Tuning + - [Tune Operating System Performance](/tune-operating-system.md) + - [Tune TiDB Memory](/configure-memory-usage.md) + - [Tune TiKV Threads](/tune-tikv-thread-performance.md) + - [Tune TiKV Memory](/tune-tikv-memory-performance.md) + - [TiKV Follower Read](/follower-read.md) + - [Tune Region Performance](/tune-region-performance.md) + - [Tune TiFlash Performance](/tiflash/tune-tiflash-performance.md) + - [Coprocessor Cache](/coprocessor-cache.md) + - Garbage Collection (GC) + - [Overview](/garbage-collection-overview.md) + - [Configuration](/garbage-collection-configuration.md) + - SQL Tuning + - [Overview](/sql-tuning-overview.md) + - Understanding the Query Execution Plan + - [Overview](/explain-overview.md) + - [`EXPLAIN` Walkthrough](/explain-walkthrough.md) + - [Indexes](/explain-indexes.md) + - [Joins](/explain-joins.md) + - [MPP Queries](/explain-mpp.md) + - [Subqueries](/explain-subqueries.md) + - [Aggregation](/explain-aggregation.md) + - [Views](/explain-views.md) + - [Partitions](/explain-partitions.md) + - [Index Merge](/explain-index-merge.md) + - SQL Optimization Process + - [Overview](/sql-optimization-concepts.md) + - Logic Optimization + - [Overview](/sql-logical-optimization.md) + - [Subquery Related Optimizations](/subquery-optimization.md) + - [Column Pruning](/column-pruning.md) + - [Decorrelation of Correlated Subquery](/correlated-subquery-optimization.md) + - [Eliminate Max/Min](/max-min-eliminate.md) + - [Predicates Push Down](/predicate-push-down.md) + - [Partition Pruning](/partition-pruning.md) + - [TopN and Limit Push Down](/topn-limit-push-down.md) + - [Join Reorder](/join-reorder.md) + - Physical Optimization + - [Overview](/sql-physical-optimization.md) + - [Index Selection](/choose-index.md) + - [Statistics](/statistics.md) + - [Extended Statistics](/extended-statistics.md) + - [Wrong Index Solution](/wrong-index-solution.md) + - [Distinct Optimization](/agg-distinct-optimization.md) + - [Cost Model](/cost-model.md) + - [Prepare Execution Plan Cache](/sql-prepared-plan-cache.md) + - Control Execution Plans + - [Overview](/control-execution-plan.md) + - [Optimizer Hints](/optimizer-hints.md) + - [SQL Plan Management](/sql-plan-management.md) + - [The Blocklist of Optimization Rules and Expression Pushdown](/blocklist-control-plan.md) +- Tutorials + - [Multiple Availability Zones in One Region Deployment](/multi-data-centers-in-one-city-deployment.md) + - [Three Availability Zones in Two Regions Deployment](/three-data-centers-in-two-cities-deployment.md) + - [Two Availability Zones in One Region Deployment](/two-data-centers-in-one-city-deployment.md) + - Read Historical Data + - Use Stale Read (Recommended) + - [Usage Scenarios of Stale Read](/stale-read.md) + - [Perform Stale Read Using `As OF TIMESTAMP`](/as-of-timestamp.md) + - [Perform Stale Read Using `tidb_read_staleness`](/tidb-read-staleness.md) + - [Perform Stale Read Using `tidb_external_ts`](/tidb-external-ts.md) + - [Use the `tidb_snapshot` System Variable](/read-historical-data.md) + - Best Practices + - [Use TiDB](/best-practices/tidb-best-practices.md) + - [Java Application Development](/best-practices/java-app-best-practices.md) + - [Use HAProxy](/best-practices/haproxy-best-practices.md) + - [Highly Concurrent Write](/best-practices/high-concurrency-best-practices.md) + - [Grafana Monitoring](/best-practices/grafana-monitor-best-practices.md) + - [PD Scheduling](/best-practices/pd-scheduling-best-practices.md) + - [TiKV Performance Tuning with Massive Regions](/best-practices/massive-regions-best-practices.md) + - [Three-node Hybrid Deployment](/best-practices/three-nodes-hybrid-deployment.md) + - [Local Read Under Three Data Centers Deployment](/best-practices/three-dc-local-read.md) + - [Use UUIDs](/best-practices/uuid.md) + - [Use Placement Rules](/configure-placement-rules.md) + - [Use Load Base Split](/configure-load-base-split.md) + - [Use Store Limit](/configure-store-limit.md) + - [DDL Execution Principles and Best Practices](/ddl-introduction.md) +- TiDB Tools + - [Overview](/ecosystem-tool-user-guide.md) + - [Use Cases](/ecosystem-tool-user-case.md) + - [Download](/download-ecosystem-tools.md) + - TiUP + - [Documentation Map](/tiup/tiup-documentation-guide.md) + - [Overview](/tiup/tiup-overview.md) + - [Terminology and Concepts](/tiup/tiup-terminology-and-concepts.md) + - [Manage TiUP Components](/tiup/tiup-component-management.md) + - [FAQ](/tiup/tiup-faq.md) + - [Troubleshooting Guide](/tiup/tiup-troubleshooting-guide.md) + - Command Reference + - [Overview](/tiup/tiup-reference.md) + - TiUP Commands + - [tiup clean](/tiup/tiup-command-clean.md) + - [tiup completion](/tiup/tiup-command-completion.md) + - [tiup env](/tiup/tiup-command-env.md) + - [tiup help](/tiup/tiup-command-help.md) + - [tiup install](/tiup/tiup-command-install.md) + - [tiup list](/tiup/tiup-command-list.md) + - tiup mirror + - [Overview](/tiup/tiup-command-mirror.md) + - [tiup mirror clone](/tiup/tiup-command-mirror-clone.md) + - [tiup mirror genkey](/tiup/tiup-command-mirror-genkey.md) + - [tiup mirror grant](/tiup/tiup-command-mirror-grant.md) + - [tiup mirror init](/tiup/tiup-command-mirror-init.md) + - [tiup mirror merge](/tiup/tiup-command-mirror-merge.md) + - [tiup mirror modify](/tiup/tiup-command-mirror-modify.md) + - [tiup mirror publish](/tiup/tiup-command-mirror-publish.md) + - [tiup mirror rotate](/tiup/tiup-command-mirror-rotate.md) + - [tiup mirror set](/tiup/tiup-command-mirror-set.md) + - [tiup mirror sign](/tiup/tiup-command-mirror-sign.md) + - [tiup status](/tiup/tiup-command-status.md) + - [tiup telemetry](/tiup/tiup-command-telemetry.md) + - [tiup uninstall](/tiup/tiup-command-uninstall.md) + - [tiup update](/tiup/tiup-command-update.md) + - TiUP Cluster Commands + - [Overview](/tiup/tiup-component-cluster.md) + - [tiup cluster audit](/tiup/tiup-component-cluster-audit.md) + - [tiup cluster audit cleanup](/tiup/tiup-component-cluster-audit-cleanup.md) + - [tiup cluster check](/tiup/tiup-component-cluster-check.md) + - [tiup cluster clean](/tiup/tiup-component-cluster-clean.md) + - [tiup cluster deploy](/tiup/tiup-component-cluster-deploy.md) + - [tiup cluster destroy](/tiup/tiup-component-cluster-destroy.md) + - [tiup cluster disable](/tiup/tiup-component-cluster-disable.md) + - [tiup cluster display](/tiup/tiup-component-cluster-display.md) + - [tiup cluster edit-config](/tiup/tiup-component-cluster-edit-config.md) + - [tiup cluster enable](/tiup/tiup-component-cluster-enable.md) + - [tiup cluster help](/tiup/tiup-component-cluster-help.md) + - [tiup cluster import](/tiup/tiup-component-cluster-import.md) + - [tiup cluster list](/tiup/tiup-component-cluster-list.md) + - [tiup cluster meta backup](/tiup/tiup-component-cluster-meta-backup.md) + - [tiup cluster meta restore](/tiup/tiup-component-cluster-meta-restore.md) + - [tiup cluster patch](/tiup/tiup-component-cluster-patch.md) + - [tiup cluster prune](/tiup/tiup-component-cluster-prune.md) + - [tiup cluster reload](/tiup/tiup-component-cluster-reload.md) + - [tiup cluster rename](/tiup/tiup-component-cluster-rename.md) + - [tiup cluster replay](/tiup/tiup-component-cluster-replay.md) + - [tiup cluster restart](/tiup/tiup-component-cluster-restart.md) + - [tiup cluster scale-in](/tiup/tiup-component-cluster-scale-in.md) + - [tiup cluster scale-out](/tiup/tiup-component-cluster-scale-out.md) + - [tiup cluster start](/tiup/tiup-component-cluster-start.md) + - [tiup cluster stop](/tiup/tiup-component-cluster-stop.md) + - [tiup cluster template](/tiup/tiup-component-cluster-template.md) + - [tiup cluster upgrade](/tiup/tiup-component-cluster-upgrade.md) + - TiUP DM Commands + - [Overview](/tiup/tiup-component-dm.md) + - [tiup dm audit](/tiup/tiup-component-dm-audit.md) + - [tiup dm deploy](/tiup/tiup-component-dm-deploy.md) + - [tiup dm destroy](/tiup/tiup-component-dm-destroy.md) + - [tiup dm disable](/tiup/tiup-component-dm-disable.md) + - [tiup dm display](/tiup/tiup-component-dm-display.md) + - [tiup dm edit-config](/tiup/tiup-component-dm-edit-config.md) + - [tiup dm enable](/tiup/tiup-component-dm-enable.md) + - [tiup dm help](/tiup/tiup-component-dm-help.md) + - [tiup dm import](/tiup/tiup-component-dm-import.md) + - [tiup dm list](/tiup/tiup-component-dm-list.md) + - [tiup dm patch](/tiup/tiup-component-dm-patch.md) + - [tiup dm prune](/tiup/tiup-component-dm-prune.md) + - [tiup dm reload](/tiup/tiup-component-dm-reload.md) + - [tiup dm replay](/tiup/tiup-component-dm-replay.md) + - [tiup dm restart](/tiup/tiup-component-dm-restart.md) + - [tiup dm scale-in](/tiup/tiup-component-dm-scale-in.md) + - [tiup dm scale-out](/tiup/tiup-component-dm-scale-out.md) + - [tiup dm start](/tiup/tiup-component-dm-start.md) + - [tiup dm stop](/tiup/tiup-component-dm-stop.md) + - [tiup dm template](/tiup/tiup-component-dm-template.md) + - [tiup dm upgrade](/tiup/tiup-component-dm-upgrade.md) + - [TiDB Cluster Topology Reference](/tiup/tiup-cluster-topology-reference.md) + - [DM Cluster Topology Reference](/tiup/tiup-dm-topology-reference.md) + - [Mirror Reference Guide](/tiup/tiup-mirror-reference.md) + - TiUP Components + - [tiup-playground](/tiup/tiup-playground.md) + - [tiup-cluster](/tiup/tiup-cluster.md) + - [tiup-mirror](/tiup/tiup-mirror.md) + - [tiup-bench](/tiup/tiup-bench.md) + - PingCAP Clinic Diagnostic Service + - [Overview](/clinic/clinic-introduction.md) + - [Quick Start](/clinic/quick-start-with-clinic.md) + - [Troubleshoot Clusters Using PingCAP Clinic](/clinic/clinic-user-guide-for-tiup.md) + - [PingCAP Clinic Diagnostic Data](/clinic/clinic-data-instruction-for-tiup.md) + - [TiDB Operator](/tidb-operator-overview.md) + - [Dumpling](/dumpling-overview.md) + - TiDB Lightning + - [Overview](/tidb-lightning/tidb-lightning-overview.md) + - [Get Started](/get-started-with-tidb-lightning.md) + - Prechecks and requirements + - [Prechecks](/tidb-lightning/tidb-lightning-prechecks.md) + - [Target Database Requirements](/tidb-lightning/tidb-lightning-requirements.md) + - Data Sources + - [Data Match Rules](/tidb-lightning/tidb-lightning-data-source.md) + - [CSV](/tidb-lightning/tidb-lightning-data-source.md#csv) + - [SQL](/tidb-lightning/tidb-lightning-data-source.md#sql) + - [Parquet](/tidb-lightning/tidb-lightning-data-source.md#parquet) + - [Customized File](/tidb-lightning/tidb-lightning-data-source.md#match-customized-files) + - Physical Import Mode + - [Requirements and Limitations](/tidb-lightning/tidb-lightning-physical-import-mode.md) + - [Use Physical Import Mode](/tidb-lightning/tidb-lightning-physical-import-mode-usage.md) + - Logical Import Mode + - [Requirements and Limitations](/tidb-lightning/tidb-lightning-logical-import-mode.md) + - [Use Logical Import Mode](/tidb-lightning/tidb-lightning-logical-import-mode-usage.md) + - Key Features + - [Checkpoints](/tidb-lightning/tidb-lightning-checkpoints.md) + - [Table Filter](/table-filter.md) + - [Import Data in Parallel](/tidb-lightning/tidb-lightning-distributed-import.md) + - [Error Resolution](/tidb-lightning/tidb-lightning-error-resolution.md) + - [Web Interface](/tidb-lightning/tidb-lightning-web-interface.md) + - [Deploy](/tidb-lightning/deploy-tidb-lightning.md) + - [Troubleshooting](/tidb-lightning/troubleshoot-tidb-lightning.md) + - Reference + - [Configuration File](/tidb-lightning/tidb-lightning-configuration.md) + - [Command Line Flags](/tidb-lightning/tidb-lightning-command-line-full.md) + - [Monitor](/tidb-lightning/monitor-tidb-lightning.md) + - [FAQ](/tidb-lightning/tidb-lightning-faq.md) + - [Glossary](/tidb-lightning/tidb-lightning-glossary.md) + - TiDB Data Migration + - [About TiDB Data Migration](/dm/dm-overview.md) + - [Architecture](/dm/dm-arch.md) + - [Quick Start](/dm/quick-start-with-dm.md) + - [Best Practices](/dm/dm-best-practices.md) + - Deploy a DM cluster + - [Hardware and Software Requirements](/dm/dm-hardware-and-software-requirements.md) + - [Use TiUP (Recommended)](/dm/deploy-a-dm-cluster-using-tiup.md) + - [Use TiUP Offline](/dm/deploy-a-dm-cluster-using-tiup-offline.md) + - [Use Binary](/dm/deploy-a-dm-cluster-using-binary.md) + - [Use Kubernetes](https://docs.pingcap.com/tidb-in-kubernetes/dev/deploy-tidb-dm) + - Tutorials + - [Create a Data Source](/dm/quick-start-create-source.md) + - [Manage Data Sources](/dm/dm-manage-source.md) + - [Configure Tasks](/dm/dm-task-configuration-guide.md) + - [Shard Merge](/dm/dm-shard-merge.md) + - [Table Routing](/dm/dm-table-routing.md) + - [Block and Allow Lists](/dm/dm-block-allow-table-lists.md) + - [Binlog Event Filter](/dm/dm-binlog-event-filter.md) + - [Filter DMLs Using SQL Expressions](/dm/feature-expression-filter.md) + - [Online DDL Tool Support](/dm/dm-online-ddl-tool-support.md) + - Manage a Data Migration Task + - [Precheck a Task](/dm/dm-precheck.md) + - [Create a Task](/dm/dm-create-task.md) + - [Query Status](/dm/dm-query-status.md) + - [Pause a Task](/dm/dm-pause-task.md) + - [Resume a Task](/dm/dm-resume-task.md) + - [Stop a Task](/dm/dm-stop-task.md) + - Advanced Tutorials + - Merge and Migrate Data from Sharded Tables + - [Overview](/dm/feature-shard-merge.md) + - [Pessimistic Mode](/dm/feature-shard-merge-pessimistic.md) + - [Optimistic Mode](/dm/feature-shard-merge-optimistic.md) + - [Manually Handle Sharding DDL Lock](/dm/manually-handling-sharding-ddl-locks.md) + - [Migrate from MySQL Databases that Use GH-ost/PT-osc](/dm/feature-online-ddl.md) + - [Migrate Data to a Downstream TiDB Table with More Columns](/migrate-with-more-columns-downstream.md) + - [Continuous Data Validation](/dm/dm-continuous-data-validation.md) + - Maintain + - Cluster Upgrade + - [Maintain DM Clusters Using TiUP (Recommended)](/dm/maintain-dm-using-tiup.md) + - [Manually Upgrade from v1.0.x to v2.0+](/dm/manually-upgrade-dm-1.0-to-2.0.md) + - Tools + - [Manage Using WebUI](/dm/dm-webui-guide.md) + - [Manage Using dmctl](/dm/dmctl-introduction.md) + - Performance Tuning + - [Benchmarks](/dm/dm-benchmark-v5.4.0.md) + - [Optimize Configurations](/dm/dm-tune-configuration.md) + - [Test DM Performance](/dm/dm-performance-test.md) + - [Handle Performance Issues](/dm/dm-handle-performance-issues.md) + - Manage Data Sources + - [Switch the MySQL Instance to Be Migrated](/dm/usage-scenario-master-slave-switch.md) + - Manage Tasks + - [Handle Failed DDL Statements](/dm/handle-failed-ddl-statements.md) + - [Manage Schemas of Tables to be Migrated](/dm/dm-manage-schema.md) + - [Export and Import Data Sources and Task Configurations of Clusters](/dm/dm-export-import-config.md) + - [Handle Alerts](/dm/dm-handle-alerts.md) + - [Daily Check](/dm/dm-daily-check.md) + - Reference + - Architecture + - [DM-worker](/dm/dm-worker-intro.md) + - [Relay Log](/dm/relay-log.md) + - [DDL Handling](/dm/dm-ddl-compatible.md) + - Command Line + - [DM-master & DM-worker](/dm/dm-command-line-flags.md) + - Configuration Files + - [Overview](/dm/dm-config-overview.md) + - [Upstream Database Configurations](/dm/dm-source-configuration-file.md) + - [Task Configurations](/dm/task-configuration-file-full.md) + - [DM-master Configuration](/dm/dm-master-configuration-file.md) + - [DM-worker Configuration](/dm/dm-worker-configuration-file.md) + - [Table Selector](/dm/table-selector.md) + - [OpenAPI](/dm/dm-open-api.md) + - [Compatibility Catalog](/dm/dm-compatibility-catalog.md) + - Secure + - [Enable TLS for DM Connections](/dm/dm-enable-tls.md) + - [Generate Self-signed Certificates](/dm/dm-generate-self-signed-certificates.md) + - Monitoring and Alerts + - [Monitoring Metrics](/dm/monitor-a-dm-cluster.md) + - [Alert Rules](/dm/dm-alert-rules.md) + - [Error Codes](/dm/dm-error-handling.md#handle-common-errors) + - [Glossary](/dm/dm-glossary.md) + - Example + - [Migrate Data Using DM](/dm/migrate-data-using-dm.md) + - [Create a Data Migration Task](/dm/quick-start-create-task.md) + - [Best Practices of Data Migration in the Shard Merge Scenario](/dm/shard-merge-best-practices.md) + - Troubleshoot + - [FAQ](/dm/dm-faq.md) + - [Handle Errors](/dm/dm-error-handling.md) + - [Release Notes](/dm/dm-release-notes.md) + - TiCDC + - [Overview](/ticdc/ticdc-overview.md) + - [Deploy and Maintain](/ticdc/deploy-ticdc.md) + - Changefeed + - [Overview](/ticdc/ticdc-changefeed-overview.md) + - Create Changefeeds + - [Replicate Data to MySQL-compatible Databases](/ticdc/ticdc-sink-to-mysql.md) + - [Replicate Data to Kafka](/ticdc/ticdc-sink-to-kafka.md) + - [Replicate Data to Storage Services](/ticdc/ticdc-sink-to-cloud-storage.md) + - [Manage Changefeeds](/ticdc/ticdc-manage-changefeed.md) + - [Log Filter](/ticdc/ticdc-filter.md) + - [Bidirectional Replication](/ticdc/ticdc-bidirectional-replication.md) + - Monitor and Alert + - [Monitoring Metrics](/ticdc/monitor-ticdc.md) + - [Alert Rules](/ticdc/ticdc-alert-rules.md) + - Reference + - [Architecture](/ticdc/ticdc-architecture.md) + - [TiCDC Server Configurations](/ticdc/ticdc-server-config.md) + - [TiCDC Changefeed Configurations](/ticdc/ticdc-changefeed-config.md) + - Output Protocols + - [TiCDC Avro Protocol](/ticdc/ticdc-avro-protocol.md) + - [TiCDC Canal-JSON Protocol](/ticdc/ticdc-canal-json.md) + - [TiCDC Open Protocol](/ticdc/ticdc-open-protocol.md) + - [TiCDC CSV Protocol](/ticdc/ticdc-csv.md) + - [TiCDC Open API](/ticdc/ticdc-open-api.md) + - [Guide for Developing a Storage Sink Consumer](/ticdc/ticdc-storage-consumer-dev-guide.md) + - [Compatibility](/ticdc/ticdc-compatibility.md) + - [Troubleshoot](/ticdc/troubleshoot-ticdc.md) + - [FAQs](/ticdc/ticdc-faq.md) + - [Glossary](/ticdc/ticdc-glossary.md) + - TiDB Binlog + - [Overview](/tidb-binlog/tidb-binlog-overview.md) + - [Quick Start](/tidb-binlog/get-started-with-tidb-binlog.md) + - [Deploy](/tidb-binlog/deploy-tidb-binlog.md) + - [Maintain](/tidb-binlog/maintain-tidb-binlog-cluster.md) + - [Configure](/tidb-binlog/tidb-binlog-configuration-file.md) + - [Pump](/tidb-binlog/tidb-binlog-configuration-file.md#pump) + - [Drainer](/tidb-binlog/tidb-binlog-configuration-file.md#drainer) + - [Upgrade](/tidb-binlog/upgrade-tidb-binlog.md) + - [Monitor](/tidb-binlog/monitor-tidb-binlog-cluster.md) + - [Reparo](/tidb-binlog/tidb-binlog-reparo.md) + - [binlogctl](/tidb-binlog/binlog-control.md) + - [Binlog Consumer Client](/tidb-binlog/binlog-consumer-client.md) + - [TiDB Binlog Relay Log](/tidb-binlog/tidb-binlog-relay-log.md) + - [Bidirectional Replication Between TiDB Clusters](/tidb-binlog/bidirectional-replication-between-tidb-clusters.md) + - [Glossary](/tidb-binlog/tidb-binlog-glossary.md) + - Troubleshoot + - [Troubleshoot](/tidb-binlog/troubleshoot-tidb-binlog.md) + - [Handle Errors](/tidb-binlog/handle-tidb-binlog-errors.md) + - [FAQ](/tidb-binlog/tidb-binlog-faq.md) + - sync-diff-inspector + - [Overview](/sync-diff-inspector/sync-diff-inspector-overview.md) + - [Data Check for Tables with Different Schema/Table Names](/sync-diff-inspector/route-diff.md) + - [Data Check in the Sharding Scenario](/sync-diff-inspector/shard-diff.md) + - [Data Check for TiDB Upstream/Downstream Clusters](/sync-diff-inspector/upstream-downstream-diff.md) + - [Data Check in the DM Replication Scenario](/sync-diff-inspector/dm-diff.md) + - TiSpark + - [User Guide](/tispark-overview.md) +- Reference + - Cluster Architecture + - [Overview](/tidb-architecture.md) + - [Storage](/tidb-storage.md) + - [Computing](/tidb-computing.md) + - [Scheduling](/tidb-scheduling.md) + - Storage Engine - TiKV + - [TiKV Overview](/tikv-overview.md) + - [RocksDB Overview](/storage-engine/rocksdb-overview.md) + - [Titan Overview](/storage-engine/titan-overview.md) + - [Titan Configuration](/storage-engine/titan-configuration.md) + - Storage Engine - TiFlash + - [Overview](/tiflash/tiflash-overview.md) + - [Create TiFlash Replicas](/tiflash/create-tiflash-replicas.md) + - [Use TiDB to Read TiFlash Replicas](/tiflash/use-tidb-to-read-tiflash.md) + - [Use TiSpark to Read TiFlash Replicas](/tiflash/use-tispark-to-read-tiflash.md) + - [Use MPP Mode](/tiflash/use-tiflash-mpp-mode.md) + - [Supported Push-down Calculations](/tiflash/tiflash-supported-pushdown-calculations.md) + - [TiFlash Query Result Materialization](/tiflash/tiflash-results-materialization.md) + - [Data Validation](/tiflash/tiflash-data-validation.md) + - [Compatibility](/tiflash/tiflash-compatibility.md) + - [System Variables](/system-variables.md) + - Configuration File Parameters + - [tidb-server](/tidb-configuration-file.md) + - [tikv-server](/tikv-configuration-file.md) + - [tiflash-server](/tiflash/tiflash-configuration.md) + - [pd-server](/pd-configuration-file.md) + - CLI + - [tikv-ctl](/tikv-control.md) + - [pd-ctl](/pd-control.md) + - [tidb-ctl](/tidb-control.md) + - [pd-recover](/pd-recover.md) + - Command Line Flags + - [tidb-server](/command-line-flags-for-tidb-configuration.md) + - [tikv-server](/command-line-flags-for-tikv-configuration.md) + - [tiflash-server](/tiflash/tiflash-command-line-flags.md) + - [pd-server](/command-line-flags-for-pd-configuration.md) + - Key Monitoring Metrics + - [Overview](/grafana-overview-dashboard.md) + - [Performance Overview](/grafana-performance-overview-dashboard.md) + - [TiDB](/grafana-tidb-dashboard.md) + - [PD](/grafana-pd-dashboard.md) + - [TiKV](/grafana-tikv-dashboard.md) + - [TiFlash](/tiflash/monitor-tiflash.md) + - [TiCDC](/ticdc/monitor-ticdc.md) + - Secure + - [Enable TLS Between TiDB Clients and Servers](/enable-tls-between-clients-and-servers.md) + - [Enable TLS Between TiDB Components](/enable-tls-between-components.md) + - [Generate Self-signed Certificates](/generate-self-signed-certificates.md) + - [Encryption at Rest](/encryption-at-rest.md) + - [Enable Encryption for Disk Spill](/enable-disk-spill-encrypt.md) + - [Log Redaction](/log-redaction.md) + - Privileges + - [Security Compatibility with MySQL](/security-compatibility-with-mysql.md) + - [Privilege Management](/privilege-management.md) + - [User Account Management](/user-account-management.md) + - [TiDB Password Management](/password-management.md) + - [Role-Based Access Control](/role-based-access-control.md) + - [Certificate-Based Authentication](/certificate-authentication.md) + - SQL + - SQL Language Structure and Syntax + - Attributes + - [AUTO_INCREMENT](/auto-increment.md) + - [AUTO_RANDOM](/auto-random.md) + - [SHARD_ROW_ID_BITS](/shard-row-id-bits.md) + - [Literal Values](/literal-values.md) + - [Schema Object Names](/schema-object-names.md) + - [Keywords and Reserved Words](/keywords.md) + - [User-Defined Variables](/user-defined-variables.md) + - [Expression Syntax](/expression-syntax.md) + - [Comment Syntax](/comment-syntax.md) + - SQL Statements + - [`ADD COLUMN`](/sql-statements/sql-statement-add-column.md) + - [`ADD INDEX`](/sql-statements/sql-statement-add-index.md) + - [`ADMIN`](/sql-statements/sql-statement-admin.md) + - [`ADMIN CANCEL DDL`](/sql-statements/sql-statement-admin-cancel-ddl.md) + - [`ADMIN CHECKSUM TABLE`](/sql-statements/sql-statement-admin-checksum-table.md) + - [`ADMIN CHECK [TABLE|INDEX]`](/sql-statements/sql-statement-admin-check-table-index.md) + - [`ADMIN CLEANUP`](/sql-statements/sql-statement-admin-cleanup.md) + - [`ADMIN RECOVER INDEX`](/sql-statements/sql-statement-admin-recover.md) + - [`ADMIN SHOW DDL [JOBS|JOB QUERIES]`](/sql-statements/sql-statement-admin-show-ddl.md) + - [`ADMIN SHOW TELEMETRY`](/sql-statements/sql-statement-admin-show-telemetry.md) + - [`ALTER DATABASE`](/sql-statements/sql-statement-alter-database.md) + - [`ALTER INDEX`](/sql-statements/sql-statement-alter-index.md) + - [`ALTER INSTANCE`](/sql-statements/sql-statement-alter-instance.md) + - [`ALTER PLACEMENT POLICY`](/sql-statements/sql-statement-alter-placement-policy.md) + - [`ALTER TABLE`](/sql-statements/sql-statement-alter-table.md) + - [`ALTER TABLE COMPACT`](/sql-statements/sql-statement-alter-table-compact.md) + - [`ALTER USER`](/sql-statements/sql-statement-alter-user.md) + - [`ANALYZE TABLE`](/sql-statements/sql-statement-analyze-table.md) + - [`BACKUP`](/sql-statements/sql-statement-backup.md) + - [`BATCH`](/sql-statements/sql-statement-batch.md) + - [`BEGIN`](/sql-statements/sql-statement-begin.md) + - [`CHANGE COLUMN`](/sql-statements/sql-statement-change-column.md) + - [`COMMIT`](/sql-statements/sql-statement-commit.md) + - [`CHANGE DRAINER`](/sql-statements/sql-statement-change-drainer.md) + - [`CHANGE PUMP`](/sql-statements/sql-statement-change-pump.md) + - [`CREATE [GLOBAL|SESSION] BINDING`](/sql-statements/sql-statement-create-binding.md) + - [`CREATE DATABASE`](/sql-statements/sql-statement-create-database.md) + - [`CREATE INDEX`](/sql-statements/sql-statement-create-index.md) + - [`CREATE PLACEMENT POLICY`](/sql-statements/sql-statement-create-placement-policy.md) + - [`CREATE ROLE`](/sql-statements/sql-statement-create-role.md) + - [`CREATE SEQUENCE`](/sql-statements/sql-statement-create-sequence.md) + - [`CREATE TABLE LIKE`](/sql-statements/sql-statement-create-table-like.md) + - [`CREATE TABLE`](/sql-statements/sql-statement-create-table.md) + - [`CREATE USER`](/sql-statements/sql-statement-create-user.md) + - [`CREATE VIEW`](/sql-statements/sql-statement-create-view.md) + - [`DEALLOCATE`](/sql-statements/sql-statement-deallocate.md) + - [`DELETE`](/sql-statements/sql-statement-delete.md) + - [`DESC`](/sql-statements/sql-statement-desc.md) + - [`DESCRIBE`](/sql-statements/sql-statement-describe.md) + - [`DO`](/sql-statements/sql-statement-do.md) + - [`DROP [GLOBAL|SESSION] BINDING`](/sql-statements/sql-statement-drop-binding.md) + - [`DROP COLUMN`](/sql-statements/sql-statement-drop-column.md) + - [`DROP DATABASE`](/sql-statements/sql-statement-drop-database.md) + - [`DROP INDEX`](/sql-statements/sql-statement-drop-index.md) + - [`DROP PLACEMENT POLICY`](/sql-statements/sql-statement-drop-placement-policy.md) + - [`DROP ROLE`](/sql-statements/sql-statement-drop-role.md) + - [`DROP SEQUENCE`](/sql-statements/sql-statement-drop-sequence.md) + - [`DROP STATS`](/sql-statements/sql-statement-drop-stats.md) + - [`DROP TABLE`](/sql-statements/sql-statement-drop-table.md) + - [`DROP USER`](/sql-statements/sql-statement-drop-user.md) + - [`DROP VIEW`](/sql-statements/sql-statement-drop-view.md) + - [`EXECUTE`](/sql-statements/sql-statement-execute.md) + - [`EXPLAIN ANALYZE`](/sql-statements/sql-statement-explain-analyze.md) + - [`EXPLAIN`](/sql-statements/sql-statement-explain.md) + - [`FLASHBACK CLUSTER TO TIMESTAMP`](/sql-statements/sql-statement-flashback-to-timestamp.md) + - [`FLASHBACK DATABASE`](/sql-statements/sql-statement-flashback-database.md) + - [`FLASHBACK TABLE`](/sql-statements/sql-statement-flashback-table.md) + - [`FLUSH PRIVILEGES`](/sql-statements/sql-statement-flush-privileges.md) + - [`FLUSH STATUS`](/sql-statements/sql-statement-flush-status.md) + - [`FLUSH TABLES`](/sql-statements/sql-statement-flush-tables.md) + - [`GRANT `](/sql-statements/sql-statement-grant-privileges.md) + - [`GRANT `](/sql-statements/sql-statement-grant-role.md) + - [`INSERT`](/sql-statements/sql-statement-insert.md) + - [`KILL [TIDB]`](/sql-statements/sql-statement-kill.md) + - [`LOAD DATA`](/sql-statements/sql-statement-load-data.md) + - [`LOAD STATS`](/sql-statements/sql-statement-load-stats.md) + - [`LOCK STATS`](/sql-statements/sql-statement-lock-stats.md) + - [`LOCK TABLES` and `UNLOCK TABLES`](/sql-statements/sql-statement-lock-tables-and-unlock-tables.md) + - [`MODIFY COLUMN`](/sql-statements/sql-statement-modify-column.md) + - [`PREPARE`](/sql-statements/sql-statement-prepare.md) + - [`RECOVER TABLE`](/sql-statements/sql-statement-recover-table.md) + - [`RENAME USER`](/sql-statements/sql-statement-rename-user.md) + - [`RENAME INDEX`](/sql-statements/sql-statement-rename-index.md) + - [`RENAME TABLE`](/sql-statements/sql-statement-rename-table.md) + - [`REPLACE`](/sql-statements/sql-statement-replace.md) + - [`RESTORE`](/sql-statements/sql-statement-restore.md) + - [`REVOKE `](/sql-statements/sql-statement-revoke-privileges.md) + - [`REVOKE `](/sql-statements/sql-statement-revoke-role.md) + - [`ROLLBACK`](/sql-statements/sql-statement-rollback.md) + - [`SAVEPOINT`](/sql-statements/sql-statement-savepoint.md) + - [`SELECT`](/sql-statements/sql-statement-select.md) + - [`SET DEFAULT ROLE`](/sql-statements/sql-statement-set-default-role.md) + - [`SET [NAMES|CHARACTER SET]`](/sql-statements/sql-statement-set-names.md) + - [`SET PASSWORD`](/sql-statements/sql-statement-set-password.md) + - [`SET ROLE`](/sql-statements/sql-statement-set-role.md) + - [`SET TRANSACTION`](/sql-statements/sql-statement-set-transaction.md) + - [`SET [GLOBAL|SESSION] `](/sql-statements/sql-statement-set-variable.md) + - [`SHOW ANALYZE STATUS`](/sql-statements/sql-statement-show-analyze-status.md) + - [`SHOW [BACKUPS|RESTORES]`](/sql-statements/sql-statement-show-backups.md) + - [`SHOW [GLOBAL|SESSION] BINDINGS`](/sql-statements/sql-statement-show-bindings.md) + - [`SHOW BUILTINS`](/sql-statements/sql-statement-show-builtins.md) + - [`SHOW CHARACTER SET`](/sql-statements/sql-statement-show-character-set.md) + - [`SHOW COLLATION`](/sql-statements/sql-statement-show-collation.md) + - [`SHOW [FULL] COLUMNS FROM`](/sql-statements/sql-statement-show-columns-from.md) + - [`SHOW CONFIG`](/sql-statements/sql-statement-show-config.md) + - [`SHOW CREATE DATABASE`](/sql-statements/sql-statement-show-create-database.md) + - [`SHOW CREATE PLACEMENT POLICY`](/sql-statements/sql-statement-show-create-placement-policy.md) + - [`SHOW CREATE SEQUENCE`](/sql-statements/sql-statement-show-create-sequence.md) + - [`SHOW CREATE TABLE`](/sql-statements/sql-statement-show-create-table.md) + - [`SHOW CREATE USER`](/sql-statements/sql-statement-show-create-user.md) + - [`SHOW DATABASES`](/sql-statements/sql-statement-show-databases.md) + - [`SHOW DRAINER STATUS`](/sql-statements/sql-statement-show-drainer-status.md) + - [`SHOW ENGINES`](/sql-statements/sql-statement-show-engines.md) + - [`SHOW ERRORS`](/sql-statements/sql-statement-show-errors.md) + - [`SHOW [FULL] FIELDS FROM`](/sql-statements/sql-statement-show-fields-from.md) + - [`SHOW GRANTS`](/sql-statements/sql-statement-show-grants.md) + - [`SHOW INDEX [FROM|IN]`](/sql-statements/sql-statement-show-index.md) + - [`SHOW INDEXES [FROM|IN]`](/sql-statements/sql-statement-show-indexes.md) + - [`SHOW KEYS [FROM|IN]`](/sql-statements/sql-statement-show-keys.md) + - [`SHOW MASTER STATUS`](/sql-statements/sql-statement-show-master-status.md) + - [`SHOW PLACEMENT`](/sql-statements/sql-statement-show-placement.md) + - [`SHOW PLACEMENT FOR`](/sql-statements/sql-statement-show-placement-for.md) + - [`SHOW PLACEMENT LABELS`](/sql-statements/sql-statement-show-placement-labels.md) + - [`SHOW PLUGINS`](/sql-statements/sql-statement-show-plugins.md) + - [`SHOW PRIVILEGES`](/sql-statements/sql-statement-show-privileges.md) + - [`SHOW [FULL] PROCESSSLIST`](/sql-statements/sql-statement-show-processlist.md) + - [`SHOW PROFILES`](/sql-statements/sql-statement-show-profiles.md) + - [`SHOW PUMP STATUS`](/sql-statements/sql-statement-show-pump-status.md) + - [`SHOW SCHEMAS`](/sql-statements/sql-statement-show-schemas.md) + - [`SHOW STATS_HEALTHY`](/sql-statements/sql-statement-show-stats-healthy.md) + - [`SHOW STATS_HISTOGRAMS`](/sql-statements/sql-statement-show-histograms.md) + - [`SHOW STATS_LOCKED`](/sql-statements/sql-statement-show-stats-locked.md) + - [`SHOW STATS_META`](/sql-statements/sql-statement-show-stats-meta.md) + - [`SHOW STATUS`](/sql-statements/sql-statement-show-status.md) + - [`SHOW TABLE NEXT_ROW_ID`](/sql-statements/sql-statement-show-table-next-rowid.md) + - [`SHOW TABLE REGIONS`](/sql-statements/sql-statement-show-table-regions.md) + - [`SHOW TABLE STATUS`](/sql-statements/sql-statement-show-table-status.md) + - [`SHOW [FULL] TABLES`](/sql-statements/sql-statement-show-tables.md) + - [`SHOW [GLOBAL|SESSION] VARIABLES`](/sql-statements/sql-statement-show-variables.md) + - [`SHOW WARNINGS`](/sql-statements/sql-statement-show-warnings.md) + - [`SHUTDOWN`](/sql-statements/sql-statement-shutdown.md) + - [`SPLIT REGION`](/sql-statements/sql-statement-split-region.md) + - [`START TRANSACTION`](/sql-statements/sql-statement-start-transaction.md) + - [`TABLE`](/sql-statements/sql-statement-table.md) + - [`TRACE`](/sql-statements/sql-statement-trace.md) + - [`TRACE`](/sql-statements/sql-statement-trace.md) + - [`TRUNCATE`](/sql-statements/sql-statement-truncate.md) + - [`UNLOCK STATS`](/sql-statements/sql-statement-unlock-stats.md) + - [`UPDATE`](/sql-statements/sql-statement-update.md) + - [`USE`](/sql-statements/sql-statement-use.md) + - [`WITH`](/sql-statements/sql-statement-with.md) + - Data Types + - [Overview](/data-type-overview.md) + - [Default Values](/data-type-default-values.md) + - [Numeric Types](/data-type-numeric.md) + - [Date and Time Types](/data-type-date-and-time.md) + - [String Types](/data-type-string.md) + - [JSON Type](/data-type-json.md) + - Functions and Operators + - [Overview](/functions-and-operators/functions-and-operators-overview.md) + - [Type Conversion in Expression Evaluation](/functions-and-operators/type-conversion-in-expression-evaluation.md) + - [Operators](/functions-and-operators/operators.md) + - [Control Flow Functions](/functions-and-operators/control-flow-functions.md) + - [String Functions](/functions-and-operators/string-functions.md) + - [Numeric Functions and Operators](/functions-and-operators/numeric-functions-and-operators.md) + - [Date and Time Functions](/functions-and-operators/date-and-time-functions.md) + - [Bit Functions and Operators](/functions-and-operators/bit-functions-and-operators.md) + - [Cast Functions and Operators](/functions-and-operators/cast-functions-and-operators.md) + - [Encryption and Compression Functions](/functions-and-operators/encryption-and-compression-functions.md) + - [Locking Functions](/functions-and-operators/locking-functions.md) + - [Information Functions](/functions-and-operators/information-functions.md) + - [JSON Functions](/functions-and-operators/json-functions.md) + - [Aggregate (GROUP BY) Functions](/functions-and-operators/aggregate-group-by-functions.md) + - [Window Functions](/functions-and-operators/window-functions.md) + - [Miscellaneous Functions](/functions-and-operators/miscellaneous-functions.md) + - [Precision Math](/functions-and-operators/precision-math.md) + - [Set Operations](/functions-and-operators/set-operators.md) + - [List of Expressions for Pushdown](/functions-and-operators/expressions-pushed-down.md) + - [TiDB Specific Functions](/functions-and-operators/tidb-functions.md) + - [Comparisons between Functions and Syntax of Oracle and TiDB](/oracle-functions-to-tidb.md) + - [Clustered Indexes](/clustered-indexes.md) + - [Constraints](/constraints.md) + - [Generated Columns](/generated-columns.md) + - [SQL Mode](/sql-mode.md) + - [Table Attributes](/table-attributes.md) + - Transactions + - [Overview](/transaction-overview.md) + - [Isolation Levels](/transaction-isolation-levels.md) + - [Optimistic Transactions](/optimistic-transaction.md) + - [Pessimistic Transactions](/pessimistic-transaction.md) + - [Non-Transactional DML Statements](/non-transactional-dml.md) + - [Views](/views.md) + - [Partitioning](/partitioned-table.md) + - [Temporary Tables](/temporary-tables.md) + - [Cached Tables](/cached-tables.md) + - Character Set and Collation + - [Overview](/character-set-and-collation.md) + - [GBK](/character-set-gbk.md) + - [Placement Rules in SQL](/placement-rules-in-sql.md) + - System Tables + - [`mysql`](/mysql-schema.md) + - INFORMATION_SCHEMA + - [Overview](/information-schema/information-schema.md) + - [`ANALYZE_STATUS`](/information-schema/information-schema-analyze-status.md) + - [`CLIENT_ERRORS_SUMMARY_BY_HOST`](/information-schema/client-errors-summary-by-host.md) + - [`CLIENT_ERRORS_SUMMARY_BY_USER`](/information-schema/client-errors-summary-by-user.md) + - [`CLIENT_ERRORS_SUMMARY_GLOBAL`](/information-schema/client-errors-summary-global.md) + - [`CHARACTER_SETS`](/information-schema/information-schema-character-sets.md) + - [`CLUSTER_CONFIG`](/information-schema/information-schema-cluster-config.md) + - [`CLUSTER_HARDWARE`](/information-schema/information-schema-cluster-hardware.md) + - [`CLUSTER_INFO`](/information-schema/information-schema-cluster-info.md) + - [`CLUSTER_LOAD`](/information-schema/information-schema-cluster-load.md) + - [`CLUSTER_LOG`](/information-schema/information-schema-cluster-log.md) + - [`CLUSTER_SYSTEMINFO`](/information-schema/information-schema-cluster-systeminfo.md) + - [`COLLATIONS`](/information-schema/information-schema-collations.md) + - [`COLLATION_CHARACTER_SET_APPLICABILITY`](/information-schema/information-schema-collation-character-set-applicability.md) + - [`COLUMNS`](/information-schema/information-schema-columns.md) + - [`DATA_LOCK_WAITS`](/information-schema/information-schema-data-lock-waits.md) + - [`DDL_JOBS`](/information-schema/information-schema-ddl-jobs.md) + - [`DEADLOCKS`](/information-schema/information-schema-deadlocks.md) + - [`ENGINES`](/information-schema/information-schema-engines.md) + - [`INSPECTION_RESULT`](/information-schema/information-schema-inspection-result.md) + - [`INSPECTION_RULES`](/information-schema/information-schema-inspection-rules.md) + - [`INSPECTION_SUMMARY`](/information-schema/information-schema-inspection-summary.md) + - [`KEY_COLUMN_USAGE`](/information-schema/information-schema-key-column-usage.md) + - [`MEMORY_USAGE`](/information-schema/information-schema-memory-usage.md) + - [`MEMORY_USAGE_OPS_HISTORY`](/information-schema/information-schema-memory-usage-ops-history.md) + - [`METRICS_SUMMARY`](/information-schema/information-schema-metrics-summary.md) + - [`METRICS_TABLES`](/information-schema/information-schema-metrics-tables.md) + - [`PARTITIONS`](/information-schema/information-schema-partitions.md) + - [`PLACEMENT_POLICIES`](/information-schema/information-schema-placement-policies.md) + - [`PROCESSLIST`](/information-schema/information-schema-processlist.md) + - [`REFERENTIAL_CONSTRAINTS`](/information-schema/information-schema-referential-constraints.md) + - [`SCHEMATA`](/information-schema/information-schema-schemata.md) + - [`SEQUENCES`](/information-schema/information-schema-sequences.md) + - [`SESSION_VARIABLES`](/information-schema/information-schema-session-variables.md) + - [`SLOW_QUERY`](/information-schema/information-schema-slow-query.md) + - [`STATISTICS`](/information-schema/information-schema-statistics.md) + - [`TABLES`](/information-schema/information-schema-tables.md) + - [`TABLE_CONSTRAINTS`](/information-schema/information-schema-table-constraints.md) + - [`TABLE_STORAGE_STATS`](/information-schema/information-schema-table-storage-stats.md) + - [`TIDB_HOT_REGIONS`](/information-schema/information-schema-tidb-hot-regions.md) + - [`TIDB_HOT_REGIONS_HISTORY`](/information-schema/information-schema-tidb-hot-regions-history.md) + - [`TIDB_INDEXES`](/information-schema/information-schema-tidb-indexes.md) + - [`TIDB_SERVERS_INFO`](/information-schema/information-schema-tidb-servers-info.md) + - [`TIDB_TRX`](/information-schema/information-schema-tidb-trx.md) + - [`TIFLASH_REPLICA`](/information-schema/information-schema-tiflash-replica.md) + - [`TIFLASH_SEGMENTS`](/information-schema/information-schema-tiflash-segments.md) + - [`TIFLASH_TABLES`](/information-schema/information-schema-tiflash-tables.md) + - [`TIKV_REGION_PEERS`](/information-schema/information-schema-tikv-region-peers.md) + - [`TIKV_REGION_STATUS`](/information-schema/information-schema-tikv-region-status.md) + - [`TIKV_STORE_STATUS`](/information-schema/information-schema-tikv-store-status.md) + - [`USER_ATTRIBUTES`](/information-schema/information-schema-user-attributes.md) + - [`USER_PRIVILEGES`](/information-schema/information-schema-user-privileges.md) + - [`VARIABLES_INFO`](/information-schema/information-schema-variables-info.md) + - [`VIEWS`](/information-schema/information-schema-views.md) + - [`METRICS_SCHEMA`](/metrics-schema.md) + - [Metadata Lock](/metadata-lock.md) + - UI + - TiDB Dashboard + - [Overview](/dashboard/dashboard-intro.md) + - Maintain + - [Deploy](/dashboard/dashboard-ops-deploy.md) + - [Reverse Proxy](/dashboard/dashboard-ops-reverse-proxy.md) + - [User Management](/dashboard/dashboard-user.md) + - [Secure](/dashboard/dashboard-ops-security.md) + - [Access](/dashboard/dashboard-access.md) + - [Overview Page](/dashboard/dashboard-overview.md) + - [Cluster Info Page](/dashboard/dashboard-cluster-info.md) + - [Top SQL Page](/dashboard/top-sql.md) + - [Key Visualizer Page](/dashboard/dashboard-key-visualizer.md) + - [Metrics Relation Graph](/dashboard/dashboard-metrics-relation.md) + - SQL Statements Analysis + - [SQL Statements Page](/dashboard/dashboard-statement-list.md) + - [SQL Details Page](/dashboard/dashboard-statement-details.md) + - [Slow Queries Page](/dashboard/dashboard-slow-query.md) + - Cluster Diagnostics + - [Access Cluster Diagnostics Page](/dashboard/dashboard-diagnostics-access.md) + - [View Diagnostics Report](/dashboard/dashboard-diagnostics-report.md) + - [Use Diagnostics](/dashboard/dashboard-diagnostics-usage.md) + - [Monitoring Page](/dashboard/dashboard-monitoring.md) + - [Search Logs Page](/dashboard/dashboard-log-search.md) + - Instance Profiling + - [Manual Profiling](/dashboard/dashboard-profiling.md) + - [Continuous Profiling](/dashboard/continuous-profiling.md) + - Session Management and Configuration + - [Share Session](/dashboard/dashboard-session-share.md) + - [Configure SSO](/dashboard/dashboard-session-sso.md) + - [FAQ](/dashboard/dashboard-faq.md) + - [Telemetry](/telemetry.md) + - [Errors Codes](/error-codes.md) + - [Table Filter](/table-filter.md) + - [Schedule Replicas by Topology Labels](/schedule-replicas-by-topology-labels.md) +- FAQs + - [FAQ Summary](/faq/faq-overview.md) + - [TiDB FAQs](/faq/tidb-faq.md) + - [SQL FAQs](/faq/sql-faq.md) + - [Deployment FAQs](/faq/deploy-and-maintain-faq.md) + - [Migration FAQs](/faq/migration-tidb-faq.md) + - [Upgrade FAQs](/faq/upgrade-faq.md) + - [Monitoring FAQs](/faq/monitor-faq.md) + - [Cluster Management FAQs](/faq/manage-cluster-faq.md) + - [High Availability FAQs](/faq/high-availability-faq.md) + - [High Reliability FAQs](/faq/high-reliability-faq.md) + - [Backup and Restore FAQs](/faq/backup-and-restore-faq.md) +- Release Notes + - [All Releases](/releases/release-notes.md) + - [Release Timeline](/releases/release-timeline.md) + - [TiDB Versioning](/releases/versioning.md) + - [TiDB Installation Packages](/binary-package.md) + - v6.5 + - [6.5.1](/releases/release-6.5.1.md) + - [6.5.0](/releases/release-6.5.0.md) + - v6.4 + - [6.4.0-DMR](/releases/release-6.4.0.md) + - v6.3 + - [6.3.0-DMR](/releases/release-6.3.0.md) + - v6.2 + - [6.2.0-DMR](/releases/release-6.2.0.md) + - v6.1 + - [6.1.5](/releases/release-6.1.5.md) + - [6.1.4](/releases/release-6.1.4.md) + - [6.1.3](/releases/release-6.1.3.md) + - [6.1.2](/releases/release-6.1.2.md) + - [6.1.1](/releases/release-6.1.1.md) + - [6.1.0](/releases/release-6.1.0.md) + - v6.0 + - [6.0.0-DMR](/releases/release-6.0.0-dmr.md) + - v5.4 + - [5.4.3](/releases/release-5.4.3.md) + - [5.4.2](/releases/release-5.4.2.md) + - [5.4.1](/releases/release-5.4.1.md) + - [5.4.0](/releases/release-5.4.0.md) + - v5.3 + - [5.3.4](/releases/release-5.3.4.md) + - [5.3.3](/releases/release-5.3.3.md) + - [5.3.2](/releases/release-5.3.2.md) + - [5.3.1](/releases/release-5.3.1.md) + - [5.3.0](/releases/release-5.3.0.md) + - v5.2 + - [5.2.4](/releases/release-5.2.4.md) + - [5.2.3](/releases/release-5.2.3.md) + - [5.2.2](/releases/release-5.2.2.md) + - [5.2.1](/releases/release-5.2.1.md) + - [5.2.0](/releases/release-5.2.0.md) + - v5.1 + - [5.1.5](/releases/release-5.1.5.md) + - [5.1.4](/releases/release-5.1.4.md) + - [5.1.3](/releases/release-5.1.3.md) + - [5.1.2](/releases/release-5.1.2.md) + - [5.1.1](/releases/release-5.1.1.md) + - [5.1.0](/releases/release-5.1.0.md) + - v5.0 + - [5.0.6](/releases/release-5.0.6.md) + - [5.0.5](/releases/release-5.0.5.md) + - [5.0.4](/releases/release-5.0.4.md) + - [5.0.3](/releases/release-5.0.3.md) + - [5.0.2](/releases/release-5.0.2.md) + - [5.0.1](/releases/release-5.0.1.md) + - [5.0 GA](/releases/release-5.0.0.md) + - [5.0.0-rc](/releases/release-5.0.0-rc.md) + - v4.0 + - [4.0.16](/releases/release-4.0.16.md) + - [4.0.15](/releases/release-4.0.15.md) + - [4.0.14](/releases/release-4.0.14.md) + - [4.0.13](/releases/release-4.0.13.md) + - [4.0.12](/releases/release-4.0.12.md) + - [4.0.11](/releases/release-4.0.11.md) + - [4.0.10](/releases/release-4.0.10.md) + - [4.0.9](/releases/release-4.0.9.md) + - [4.0.8](/releases/release-4.0.8.md) + - [4.0.7](/releases/release-4.0.7.md) + - [4.0.6](/releases/release-4.0.6.md) + - [4.0.5](/releases/release-4.0.5.md) + - [4.0.4](/releases/release-4.0.4.md) + - [4.0.3](/releases/release-4.0.3.md) + - [4.0.2](/releases/release-4.0.2.md) + - [4.0.1](/releases/release-4.0.1.md) + - [4.0 GA](/releases/release-4.0-ga.md) + - [4.0.0-rc.2](/releases/release-4.0.0-rc.2.md) + - [4.0.0-rc.1](/releases/release-4.0.0-rc.1.md) + - [4.0.0-rc](/releases/release-4.0.0-rc.md) + - [4.0.0-beta.2](/releases/release-4.0.0-beta.2.md) + - [4.0.0-beta.1](/releases/release-4.0.0-beta.1.md) + - [4.0.0-beta](/releases/release-4.0.0-beta.md) + - v3.1 + - [3.1.2](/releases/release-3.1.2.md) + - [3.1.1](/releases/release-3.1.1.md) + - [3.1.0 GA](/releases/release-3.1.0-ga.md) + - [3.1.0-rc](/releases/release-3.1.0-rc.md) + - [3.1.0-beta.2](/releases/release-3.1.0-beta.2.md) + - [3.1.0-beta.1](/releases/release-3.1.0-beta.1.md) + - [3.1.0-beta](/releases/release-3.1.0-beta.md) + - v3.0 + - [3.0.20](/releases/release-3.0.20.md) + - [3.0.19](/releases/release-3.0.19.md) + - [3.0.18](/releases/release-3.0.18.md) + - [3.0.17](/releases/release-3.0.17.md) + - [3.0.16](/releases/release-3.0.16.md) + - [3.0.15](/releases/release-3.0.15.md) + - [3.0.14](/releases/release-3.0.14.md) + - [3.0.13](/releases/release-3.0.13.md) + - [3.0.12](/releases/release-3.0.12.md) + - [3.0.11](/releases/release-3.0.11.md) + - [3.0.10](/releases/release-3.0.10.md) + - [3.0.9](/releases/release-3.0.9.md) + - [3.0.8](/releases/release-3.0.8.md) + - [3.0.7](/releases/release-3.0.7.md) + - [3.0.6](/releases/release-3.0.6.md) + - [3.0.5](/releases/release-3.0.5.md) + - [3.0.4](/releases/release-3.0.4.md) + - [3.0.3](/releases/release-3.0.3.md) + - [3.0.2](/releases/release-3.0.2.md) + - [3.0.1](/releases/release-3.0.1.md) + - [3.0 GA](/releases/release-3.0-ga.md) + - [3.0.0-rc.3](/releases/release-3.0.0-rc.3.md) + - [3.0.0-rc.2](/releases/release-3.0.0-rc.2.md) + - [3.0.0-rc.1](/releases/release-3.0.0-rc.1.md) + - [3.0.0-beta.1](/releases/release-3.0.0-beta.1.md) + - [3.0.0-beta](/releases/release-3.0-beta.md) + - v2.1 + - [2.1.19](/releases/release-2.1.19.md) + - [2.1.18](/releases/release-2.1.18.md) + - [2.1.17](/releases/release-2.1.17.md) + - [2.1.16](/releases/release-2.1.16.md) + - [2.1.15](/releases/release-2.1.15.md) + - [2.1.14](/releases/release-2.1.14.md) + - [2.1.13](/releases/release-2.1.13.md) + - [2.1.12](/releases/release-2.1.12.md) + - [2.1.11](/releases/release-2.1.11.md) + - [2.1.10](/releases/release-2.1.10.md) + - [2.1.9](/releases/release-2.1.9.md) + - [2.1.8](/releases/release-2.1.8.md) + - [2.1.7](/releases/release-2.1.7.md) + - [2.1.6](/releases/release-2.1.6.md) + - [2.1.5](/releases/release-2.1.5.md) + - [2.1.4](/releases/release-2.1.4.md) + - [2.1.3](/releases/release-2.1.3.md) + - [2.1.2](/releases/release-2.1.2.md) + - [2.1.1](/releases/release-2.1.1.md) + - [2.1 GA](/releases/release-2.1-ga.md) + - [2.1 RC5](/releases/release-2.1-rc.5.md) + - [2.1 RC4](/releases/release-2.1-rc.4.md) + - [2.1 RC3](/releases/release-2.1-rc.3.md) + - [2.1 RC2](/releases/release-2.1-rc.2.md) + - [2.1 RC1](/releases/release-2.1-rc.1.md) + - [2.1 Beta](/releases/release-2.1-beta.md) + - v2.0 + - [2.0.11](/releases/release-2.0.11.md) + - [2.0.10](/releases/release-2.0.10.md) + - [2.0.9](/releases/release-2.0.9.md) + - [2.0.8](/releases/release-2.0.8.md) + - [2.0.7](/releases/release-2.0.7.md) + - [2.0.6](/releases/release-2.0.6.md) + - [2.0.5](/releases/release-2.0.5.md) + - [2.0.4](/releases/release-2.0.4.md) + - [2.0.3](/releases/release-2.0.3.md) + - [2.0.2](/releases/release-2.0.2.md) + - [2.0.1](/releases/release-2.0.1.md) + - [2.0](/releases/release-2.0-ga.md) + - [2.0 RC5](/releases/release-2.0-rc.5.md) + - [2.0 RC4](/releases/release-2.0-rc.4.md) + - [2.0 RC3](/releases/release-2.0-rc.3.md) + - [2.0 RC1](/releases/release-2.0-rc.1.md) + - [1.1 Beta](/releases/release-1.1-beta.md) + - [1.1 Alpha](/releases/release-1.1-alpha.md) + - v1.0 + - [1.0.8](/releases/release-1.0.8.md) + - [1.0.7](/releases/release-1.0.7.md) + - [1.0.6](/releases/release-1.0.6.md) + - [1.0.5](/releases/release-1.0.5.md) + - [1.0.4](/releases/release-1.0.4.md) + - [1.0.3](/releases/release-1.0.3.md) + - [1.0.2](/releases/release-1.0.2.md) + - [1.0.1](/releases/release-1.0.1.md) + - [1.0](/releases/release-1.0-ga.md) + - [Pre-GA](/releases/release-pre-ga.md) + - [RC4](/releases/release-rc.4.md) + - [RC3](/releases/release-rc.3.md) + - [RC2](/releases/release-rc.2.md) + - [RC1](/releases/release-rc.1.md) +- [Glossary](/glossary.md) diff --git a/test/sync_pr_docs/data/markdown-pages/en/tidb/release-6.5/develop/dev-guide-choose-driver-or-orm.md b/test/sync_pr_docs/data/markdown-pages/en/tidb/release-6.5/develop/dev-guide-choose-driver-or-orm.md new file mode 100644 index 00000000..dd424412 --- /dev/null +++ b/test/sync_pr_docs/data/markdown-pages/en/tidb/release-6.5/develop/dev-guide-choose-driver-or-orm.md @@ -0,0 +1,300 @@ +--- +title: Choose Driver or ORM +summary: Learn how to choose a driver or ORM framework to connect to TiDB. +--- + +# Choose Driver or ORM + +> **Note:** +> +> TiDB provides the following two support levels for drivers and ORMs: +> +> - **Full**: indicates that TiDB is compatible with most features of the tool and maintains compatibility with its newer versions. PingCAP will periodically conduct compatibility tests with the latest version of [Third-party tools supported by TiDB](/develop/dev-guide-third-party-support.md). +> - **Compatible**: indicates that because the corresponding third-party tool is adapted to MySQL and TiDB is highly compatible with the MySQL protocol, so TiDB can use most features of the tool. However, PingCAP has not completed a full test on all features of the tool, which might lead to some unexpected behaviors. +> +> For more information, refer to [Third-Party Tools Supported by TiDB](/develop/dev-guide-third-party-support.md). + +TiDB is highly compatible with the MySQL protocol but some features are incompatible with MySQL. For a full list of compatibility differences, see [MySQL Compatibility](/mysql-compatibility.md). + +## Java + +This section describes how to use drivers and ORM frameworks in Java. + +### Java drivers + + +
+ +Support level: **Full** + +You can follow the [MySQL documentation](https://dev.mysql.com/doc/connector-j/8.0/en/) to download and configure a Java JDBC driver. It is recommended to use MySQL Connector/J 8.0.29 or later with TiDB v6.3.0 and newer. + +> **Tip:** +> +> There is a [bug](https://bugs.mysql.com/bug.php?id=106252) in the Connector/J 8.0 versions before 8.0.32, which might cause threads to hang when using TiDB versions earlier than v6.3.0. To avoid this issue, it is recommended that you use either MySQL Connector/J 8.0.32 or a later version, or the TiDB JDBC (see the *TiDB-JDBC* tab). + +For an example of how to build a complete application, see [Build a simple CRUD app with TiDB and JDBC](/develop/dev-guide-sample-application-java-jdbc.md). + +
+
+ +Support level: **Full** + +[TiDB-JDBC](https://github.com/pingcap/mysql-connector-j) is a customized Java driver based on MySQL 8.0.29. Compiled based on MySQL official version 8.0.29, TiDB-JDBC fixes the bug of multi-parameter and multi-field EOF in the prepare mode in the original JDBC, and adds features such as automatic TiCDC snapshot maintenance and the SM3 authentication plugin. + +Using SM3-based authentication is only supported with the TiDB version of MySQL Connector/J. + +If you use Maven, add the following content to the `` section in the `pom.xml` file: + +```xml + + io.github.lastincisor + mysql-connector-java + 8.0.29-tidb-1.0.0 + +``` + +If you need to enable SM3 authentication, add the following content to the `` section in the `pom.xml` file: + +```xml + + io.github.lastincisor + mysql-connector-java + 8.0.29-tidb-1.0.0 + + + org.bouncycastle + bcprov-jdk15on + 1.67 + + + org.bouncycastle + bcpkix-jdk15on + 1.67 + +``` + +If you use Gradle, add the following content to `dependencies`: + +```gradle +implementation group: 'io.github.lastincisor', name: 'mysql-connector-java', version: '8.0.29-tidb-1.0.0' +implementation group: 'org.bouncycastle', name: 'bcprov-jdk15on', version: '1.67' +implementation group: 'org.bouncycastle', name: 'bcpkix-jdk15on', version: '1.67' +``` + +
+
+ +### Java ORM frameworks + +> **Note:** +> +> - Currently, Hibernate does [not support nested transactions](https://stackoverflow.com/questions/37927208/nested-transaction-in-spring-app-with-jpa-postgres). +> +> - Since v6.2.0, TiDB supports [savepoint](/sql-statements/sql-statement-savepoint.md). To use the `Propagation.NESTED` transaction propagation option in `@Transactional`, that is, to set `@Transactional(propagation = Propagation.NESTED)`, make sure that your TiDB is v6.2.0 or later. + + +
+ +Support level: **Full** + +To avoid manually managing complex relationships between different dependencies of an application, you can use [Gradle](https://gradle.org/install) or [Maven](https://maven.apache.org/install.html) to get all dependencies of your application, including those indirect ones. Note that only Hibernate `6.0.0.Beta2` or above supports the TiDB dialect. + +If you are using **Maven**, add the following to your ``: + +```xml + + org.hibernate.orm + hibernate-core + 6.0.0.CR2 + + + + mysql + mysql-connector-java + 5.1.49 + +``` + +If you are using **Gradle**, add the following to your `dependencies`: + +```gradle +implementation 'org.hibernate:hibernate-core:6.0.0.CR2' +implementation 'mysql:mysql-connector-java:5.1.49' +``` + +- For an example of using Hibernate to build a TiDB application by native Java, see [Build a simple CRUD app with TiDB and Hibernate](/develop/dev-guide-sample-application-java-hibernate.md). +- For an example of using Spring Data JPA or Hibernate to build a TiDB application by Spring, see [Build a TiDB app using Spring Boot](/develop/dev-guide-sample-application-java-spring-boot.md). + +In addition, you need to specify the TiDB dialect in your [Hibernate configuration file](https://www.tutorialspoint.com/hibernate/hibernate_configuration.htm): `org.hibernate.dialect.TiDBDialect`, which is only supported by Hibernate `6.0.0.Beta2` or above. If your `Hibernate` version is earlier than `6.0.0.Beta2`, upgrade it first. + +> **Note:** +> +> If you are unable to upgrade your `Hibernate` version, use the MySQL 5.7 dialect `org.hibernate.dialect.MySQL57Dialect` instead. However, this setting might cause unpredictable results and the absence of some TiDB-specific features, such as [sequences](/sql-statements/sql-statement-create-sequence.md). + +
+ +
+ +Support level: **Full** + +To avoid manually managing complex relationships between different dependencies of an application, you can use [Gradle](https://gradle.org/install) or [Maven](https://maven.apache.org/install.html) to get all dependencies of your application, including those indirect dependencies. + +If you are using Maven, add the following to your ``: + +```xml + + org.mybatis + mybatis + 3.5.9 + + + + mysql + mysql-connector-java + 5.1.49 + +``` + +If you are using Gradle, add the following to your `dependencies`: + +```gradle +implementation 'org.mybatis:mybatis:3.5.9' +implementation 'mysql:mysql-connector-java:5.1.49' +``` + +For an example of using MyBatis to build a TiDB application, see [Build a simple CRUD app with TiDB and Mybatis](/develop/dev-guide-sample-application-java-mybatis.md). + +
+ +
+ +### Java client load balancing + +**tidb-loadbalance** + +Support level: **Full** + +[tidb-loadbalance](https://github.com/pingcap/tidb-loadbalance) is a load balancing component on the application side. With tidb-loadbalance, you can automatically maintain the node information of TiDB server and distribute JDBC connections on the client using the tidb-loadbalance policies. Using a direct JDBC connection between the client application and TiDB server has higher performance than using the load balancing component. + +Currently, tidb-loadbalance supports the following policies: roundrobin, random, and weight. + +> **Note:** +> +> tidb-loadbalance must be used with [mysql-connector-j](https://github.com/pingcap/mysql-connector-j). + +If you use Maven, add the following content to the element body of `` in the `pom.xml` file: + +```xml + + io.github.lastincisor + mysql-connector-java + 8.0.29-tidb-1.0.0 + + + io.github.lastincisor + tidb-loadbalance + 0.0.5 + +``` + +If you use Gradle, add the following content to `dependencies`: + +```gradle +implementation group: 'io.github.lastincisor', name: 'mysql-connector-java', version: '8.0.29-tidb-1.0.0' +implementation group: 'io.github.lastincisor', name: 'tidb-loadbalance', version: '0.0.5' +``` + +## Golang + +This section describes how to use drivers and ORM frameworks in Golang. + +### Golang drivers + +**go-sql-driver/mysql** + +Support level: **Full** + +To download and configure a Golang driver, refer to the [go-sql-driver/mysql documentation](https://github.com/go-sql-driver/mysql). + +For an example of how to build a complete application, see [Build a simple CRUD app with TiDB and Go-MySQL-Driver](/develop/dev-guide-sample-application-golang-sql-driver.md). + +### Golang ORM frameworks + +**GORM** + +Support level: **Full** + +GORM is a popular ORM framework for Golang. To get all dependencies in your application, you can use the `go get` command. + +```shell +go get -u gorm.io/gorm +go get -u gorm.io/driver/mysql +``` + +For an example of using GORM to build a TiDB application, see [Build a simple CRUD app with TiDB and GORM](/develop/dev-guide-sample-application-golang-gorm.md). + +## Python + +This section describes how to use drivers and ORM frameworks in Python. + +### Python drivers + + +
+ +Support level: **Compatible** + +You can follow the [PyMySQL documentation](https://pypi.org/project/PyMySQL/) to download and configure the driver. It is recommended to use PyMySQL 1.0.2 or later versions. + +For an example of using PyMySQL to build a TiDB application, see [Build a simple CRUD app with TiDB and PyMySQL](/develop/dev-guide-sample-application-python-pymysql.md#step-2-get-the-code). + +
+
+ +Support level: **Compatible** + +You can follow the [mysqlclient documentation](https://pypi.org/project/mysqlclient/) to download and configure the driver. It is recommended to use mysqlclient 2.1.1 or later versions. + +For an example of using mysqlclient to build a TiDB application, see [Build a simple CRUD app with TiDB and mysqlclient](/develop/dev-guide-sample-application-python-mysqlclient.md#step-2-get-the-code). + +
+
+ +Support level: **Compatible** + +You can follow the [MySQL Connector/Python documentation](https://dev.mysql.com/doc/connector-python/en/connector-python-installation-binary.html) to download and configure the driver. It is recommended to use Connector/Python 8.0.31 or later versions. + +For an example of using MySQL Connector/Python to build a TiDB application, see [Build a simple CRUD app with TiDB and MySQL Connector/Python](/develop/dev-guide-sample-application-python-mysql-connector.md#step-2-get-the-code). + +
+
+ +### Python ORM frameworks + + +
+ +Support level: **Compatible** + +[SQLAlchemy](https://www.sqlalchemy.org/) is a popular ORM framework for Python. To get all dependencies in your application, you can use the `pip install SQLAlchemy==1.4.44` command. It is recommended to use SQLAlchemy 1.4.44 or later versions. + +For an example of using SQLAlchemy to build a TiDB application, see [Build a simple CRUD app with TiDB and SQLAlchemy](/develop/dev-guide-sample-application-python-sqlalchemy.md#step-2-get-the-code). + +
+
+ +Support level: **Compatible** + +[peewee](http://docs.peewee-orm.com/en/latest/) is a popular ORM framework for Python. To get all dependencies in your application, you can use the `pip install peewee==3.15.4` command. It is recommended to use peewee 3.15.4 or later versions. + +For an example of using peewee to build a TiDB application, see [Build a simple CRUD app with TiDB and peewee](/develop/dev-guide-sample-application-python-peewee.md#step-2-get-the-code). + +
+
+ + + +After you have determined the driver or ORM, you can [connect to your TiDB cluster](https://docs.pingcap.com/tidbcloud/connect-to-tidb-cluster). + + diff --git a/test/sync_pr_docs/data/markdown-pages/en/tidb/release-6.5/develop/dev-guide-insert-data.md b/test/sync_pr_docs/data/markdown-pages/en/tidb/release-6.5/develop/dev-guide-insert-data.md new file mode 100644 index 00000000..c75bdc44 --- /dev/null +++ b/test/sync_pr_docs/data/markdown-pages/en/tidb/release-6.5/develop/dev-guide-insert-data.md @@ -0,0 +1,304 @@ +--- +title: Insert Data +summary: Learn about how to insert data. +--- + + + +# Insert Data + +This document describes how to insert data into TiDB by using the SQL language with different programming languages. + +## Before you start + +Before reading this document, you need to prepare the following: + +- [Build a TiDB Cluster in TiDB Cloud (Serverless Tier)](/develop/dev-guide-build-cluster-in-cloud.md). +- Read [Schema Design Overview](/develop/dev-guide-schema-design-overview.md), [Create a Database](/develop/dev-guide-create-database.md), [Create a Table](/develop/dev-guide-create-table.md), and [Create Secondary Indexes](/develop/dev-guide-create-secondary-indexes.md) + +## Insert rows + +There are two ways to insert multiple rows of data. For example, if you need to insert **3** players' data. + +- A **multi-line insertion statement**: + + {{< copyable "sql" >}} + + ```sql + INSERT INTO `player` (`id`, `coins`, `goods`) VALUES (1, 1000, 1), (2, 230, 2), (3, 300, 5); + ``` + +- Multiple **single-line insertion statements**: + + {{< copyable "sql" >}} + + ```sql + INSERT INTO `player` (`id`, `coins`, `goods`) VALUES (1, 1000, 1); + INSERT INTO `player` (`id`, `coins`, `goods`) VALUES (2, 230, 2); + INSERT INTO `player` (`id`, `coins`, `goods`) VALUES (3, 300, 5); + ``` + +Generally the `multi-line insertion statement` runs faster than the multiple `single-line insertion statements`. + + +
+ +```sql +CREATE TABLE `player` (`id` INT, `coins` INT, `goods` INT); +INSERT INTO `player` (`id`, `coins`, `goods`) VALUES (1, 1000, 1), (2, 230, 2); +``` + +For more information on how to use this SQL, see [Connecting to a TiDB Cluster](/develop/dev-guide-build-cluster-in-cloud.md#step-2-connect-to-a-cluster) and follow the steps to enter the SQL statement after connecting to a TiDB cluster using a client. + +
+ +
+ +```java +// ds is an entity of com.mysql.cj.jdbc.MysqlDataSource +try (Connection connection = ds.getConnection()) { + connection.setAutoCommit(false); + + PreparedStatement pstmt = connection.prepareStatement("INSERT INTO player (id, coins, goods) VALUES (?, ?, ?)")) + + // first player + pstmt.setInt(1, 1); + pstmt.setInt(2, 1000); + pstmt.setInt(3, 1); + pstmt.addBatch(); + + // second player + pstmt.setInt(1, 2); + pstmt.setInt(2, 230); + pstmt.setInt(3, 2); + pstmt.addBatch(); + + pstmt.executeBatch(); + connection.commit(); +} catch (SQLException e) { + e.printStackTrace(); +} +``` + +Due to the default MySQL JDBC Driver settings, you need to change some parameters to get better bulk insert performance. + +| Parameter | Means | Recommended Scenario | Recommended Configuration| +| :------------------------: | :-----------------------------------: | :-----------------------------------------------------------------------------------------------------------------------------------------------: | :----------------------: | +| `useServerPrepStmts` | Whether to use the server side to enable prepared statements | When you need to use a prepared statement more than once | `true` | +| `cachePrepStmts` | Whether the client caches prepared statements | `useServerPrepStmts=true` 时 | `true` | +| `prepStmtCacheSqlLimit` | Maximum size of a prepared statement (256 characters by default) | When the prepared statement is greater than 256 characters | Configured according to the actual size of the prepared statement | +| `prepStmtCacheSize` | Maximum number of prepared statement caches (25 by default) | When the number of prepared statements is greater than 25 | Configured according to the actual number of prepared statements | +| `rewriteBatchedStatements` | Whether to rewrite **Batched** statements | When batch operations are required | `true` | +| `allowMultiQueries` | Start batch operations | Because a [client bug](https://bugs.mysql.com/bug.php?id=96623) requires this to be set when `rewriteBatchedStatements = true` and `useServerPrepStmts = true` | `true` | + +MySQL JDBC Driver also provides an integrated configuration: `useConfigs`. When it is configured with `maxPerformance`, it is equivalent to configuring a set of configurations. Taking `mysql:mysql-connector-java:8.0.28` as an example, `useConfigs=maxPerformance` contains: + +```properties +cachePrepStmts=true +cacheCallableStmts=true +cacheServerConfiguration=true +useLocalSessionState=true +elideSetAutoCommits=true +alwaysSendSetIsolation=false +enableQueryTimeouts=false +connectionAttributes=none +useInformationSchema=true +``` + +You can check `mysql-connector-java-{version}.jar!/com/mysql/cj/configurations/maxPerformance.properties` to get the configurations contained in `useConfigs=maxPerformance` for the corresponding version of MySQL JDBC Driver. + +The following is a typical scenario of JDBC connection string configurations. In this example, Host: `127.0.0.1`, Port: `4000`, User name: `root`, Password: null, Default database: `test`: + +``` +jdbc:mysql://127.0.0.1:4000/test?user=root&useConfigs=maxPerformance&useServerPrepStmts=true&prepStmtCacheSqlLimit=2048&prepStmtCacheSize=256&rewriteBatchedStatements=true&allowMultiQueries=true +``` + +For complete examples in Java, see: + +- [Build a simple CRUD app with TiDB and JDBC](/develop/dev-guide-sample-application-java-jdbc.md#step-2-get-the-code) +- [Build a simple CRUD app with TiDB and Hibernate](/develop/dev-guide-sample-application-java-hibernate.md#step-2-get-the-code) +- [Build the TiDB app using Spring Boot](/develop/dev-guide-sample-application-java-spring-boot.md) + +
+ +
+ +```go +package main + +import ( + "database/sql" + "strings" + + _ "github.com/go-sql-driver/mysql" +) + +type Player struct { + ID string + Coins int + Goods int +} + +func bulkInsertPlayers(db *sql.DB, players []Player, batchSize int) error { + tx, err := db.Begin() + if err != nil { + return err + } + + stmt, err := tx.Prepare(buildBulkInsertSQL(batchSize)) + if err != nil { + return err + } + + defer stmt.Close() + + for len(players) > batchSize { + if _, err := stmt.Exec(playerToArgs(players[:batchSize])...); err != nil { + tx.Rollback() + return err + } + + players = players[batchSize:] + } + + if len(players) != 0 { + if _, err := tx.Exec(buildBulkInsertSQL(len(players)), playerToArgs(players)...); err != nil { + tx.Rollback() + return err + } + } + + if err := tx.Commit(); err != nil { + tx.Rollback() + return err + } + + return nil +} + +func playerToArgs(players []Player) []interface{} { + var args []interface{} + for _, player := range players { + args = append(args, player.ID, player.Coins, player.Goods) + } + return args +} + +func buildBulkInsertSQL(amount int) string { + return "INSERT INTO player (id, coins, goods) VALUES (?, ?, ?)" + strings.Repeat(",(?,?,?)", amount-1) +} +``` + +For complete examples in Golang, see: + +- [Use Go-MySQL-Driver to build a simple CRUD app with TiDB and Golang](/develop/dev-guide-sample-application-golang-sql-driver.md#step-2-get-the-code) +- [Use GORM to build a simple CRUD app with TiDB and Golang](/develop/dev-guide-sample-application-golang-gorm.md#step-2-get-the-code) + +
+ +
+ +```python +import MySQLdb +connection = MySQLdb.connect( + host="127.0.0.1", + port=4000, + user="root", + password="", + database="bookshop", + autocommit=True +) + +with get_connection(autocommit=True) as connection: + with connection.cursor() as cur: + player_list = random_player(1919) + for idx in range(0, len(player_list), 114): + cur.executemany("INSERT INTO player (id, coins, goods) VALUES (%s, %s, %s)", player_list[idx:idx + 114]) +``` + +For complete examples in Python, see: + +- [Use PyMySQL to build a simple CRUD app with TiDB and Python](/develop/dev-guide-sample-application-python-pymysql.md#step-2-get-the-code) +- [Use mysqlclient to build a simple CRUD app with TiDB and Python](/develop/dev-guide-sample-application-python-mysqlclient.md#step-2-get-the-code) +- [Use MySQL Connector/Python to build a simple CRUD app with TiDB and Python](/develop/dev-guide-sample-application-python-mysql-connector.md#step-2-get-the-code) +- [Use SQLAlchemy to build a simple CRUD app with TiDB and Python](/develop/dev-guide-sample-application-python-sqlalchemy.md#step-2-get-the-code) +- [Use peewee to build a simple CRUD app with TiDB and Python](/develop/dev-guide-sample-application-python-peewee.md#step-2-get-the-code) + +
+ +
+ +## Bulk-Insert + +If you need to quickly import a large amount of data into a TiDB cluster, it is recommended that you use a range of tools provided by **PingCAP** for data migration. Using the `INSERT` statement is not the best way, because it is not efficient and requires to handle exceptions and other issues on your own. + +The following are the recommended tools for bulk-insert: + +- Data export: [Dumpling](/dumpling-overview.md). You can export MySQL or TiDB data to local or Amazon S3. + + + +- Data import: [TiDB Lightning](/tidb-lightning/tidb-lightning-overview.md). You can import **Dumpling** exported data, a **CSV** file, or [Migrate Data from Amazon Aurora to TiDB](/migrate-aurora-to-tidb.md). It also supports reading data from a local disk or Amazon S3 cloud disk. +- Data replication: [TiDB Data Migration](/dm/dm-overview.md). You can replicate MySQL, MariaDB, and Amazon Aurora databases to TiDB. It also supports merging and migrating the sharded instances and tables from the source databases. +- Data backup and restore: [Backup & Restore (BR)](/br/backup-and-restore-overview.md). Compared to **Dumpling**, **BR** is more suitable for **_big data_** scenario. + + + + + +- Data import: [Data Import Task](/tidb-cloud/import-sample-data.md) page in the TiDB Cloud console. You can import **Dumpling** exported data, a **CSV** file, or [Migrate Data from Amazon Aurora to TiDB](/tidb-cloud/migrate-from-aurora-bulk-import.md). It also supports reading data from a local disk, Amazon S3 cloud disk, or GCS cloud disk. +- Data replication: [TiDB Data Migration](https://docs.pingcap.com/tidb/stable/dm-overview). You can replicate MySQL, MariaDB, and Amazon Aurora databases to TiDB. It also supports merging and migrating the sharded instances and tables from the source databases. +- Data backup and restore: [Backup](/tidb-cloud/backup-and-restore.md) page in the TiDB Cloud console. Compared to **Dumpling**, backup and restore is more suitable for **_big data_** scenario. + + + +## Avoid hotspots + +When designing a table, you need to consider if there is a large number of insert operations. If so, you need to avoid hotspots during table design. See the [Select primary key](/develop/dev-guide-create-table.md#select-primary-key) section and follow the [Rules when selecting primary key](/develop/dev-guide-create-table.md#guidelines-to-follow-when-selecting-primary-key). + + + +For more information on how to handle hotspot issues, see [Troubleshoot Hotspot Issues](/troubleshoot-hot-spot-issues.md). + + + +## Insert data to a table with the `AUTO_RANDOM` primary key + +If the primary key of the table you insert has the `AUTO_RANDOM` attribute, then by default the primary key cannot be specified. For example, in the [`bookshop`](/develop/dev-guide-bookshop-schema-design.md) database, you can see that the `id` field of the [`users` table](/develop/dev-guide-bookshop-schema-design.md#users-table) contains the `AUTO_RANDOM` attribute. + +In this case, you **cannot** use SQL like the following to insert: + +```sql +INSERT INTO `bookshop`.`users` (`id`, `balance`, `nickname`) VALUES (1, 0.00, 'nicky'); +``` + +An error will occur: + +``` +ERROR 8216 (HY000): Invalid auto random: Explicit insertion on auto_random column is disabled. Try to set @@allow_auto_random_explicit_insert = true. +``` + +It is not recommended to manually specify the `AUTO_RANDOM` column during insertion time. + +There are two solutions to handle this error: + +- (Recommended) Remove this column from the insert statement and use the `AUTO_RANDOM` value that TiDB initialized for you. This fits the semantics of `AUTO_RANDOM`. + + {{< copyable "sql" >}} + + ```sql + INSERT INTO `bookshop`.`users` (`balance`, `nickname`) VALUES (0.00, 'nicky'); + ``` + +- If you are sure that you **_must_** specify this column, then you can use the [`SET` statement](https://docs.pingcap.com/zh/tidb/stable/sql-statement-set-variable) to allow the column of `AUTO_RANDOM` to be specified during insertion time by changing the user variable. + + {{< copyable "sql" >}} + + ```sql + SET @@allow_auto_random_explicit_insert = true; + INSERT INTO `bookshop`.`users` (`id`, `balance`, `nickname`) VALUES (1, 0.00, 'nicky'); + ``` + +## Use HTAP + +In TiDB, HTAP capabilities save you from performing additional operations when inserting data. There is no additional insertion logic. TiDB automatically guarantees data consistency. All you need to do is [turn on column-oriented replica synchronization](/develop/dev-guide-create-table.md#use-htap-capabilities) after creating the table, and use the column-oriented replica to speed up your queries directly. diff --git a/test/sync_pr_docs/data/markdown-pages/en/tidb/release-6.5/develop/dev-guide-overview.md b/test/sync_pr_docs/data/markdown-pages/en/tidb/release-6.5/develop/dev-guide-overview.md new file mode 100644 index 00000000..53bc52bb --- /dev/null +++ b/test/sync_pr_docs/data/markdown-pages/en/tidb/release-6.5/develop/dev-guide-overview.md @@ -0,0 +1,85 @@ +--- +title: Developer Guide Overview +summary: Introduce the overview of the developer guide. +--- + +# Developer Guide Overview + +This guide is written for application developers, but if you are interested in the inner workings of TiDB or want to get involved in TiDB development, read the [TiDB Kernel Development Guide](https://pingcap.github.io/tidb-dev-guide/) for more information about TiDB. + + + +This tutorial shows how to quickly build an application using TiDB, the possible use cases of TiDB and how to handle common problems. + +Before reading this page, it is recommended that you read the [Quick Start Guide for the TiDB Database Platform](/quick-start-with-tidb.md). + + + + + +This tutorial shows how to quickly build an application using TiDB Cloud, the possible use cases of TiDB Cloud and how to handle common problems. + + + +## TiDB basics + +Before you start working with TiDB, you need to understand some important mechanisms of how TiDB works: + +- Read the [TiDB Transaction Overview](/transaction-overview.md) to understand how transactions work in TiDB, or check out the [Transaction Notes for Application Developers](/develop/dev-guide-transaction-overview.md) to learn about transaction knowledge required for application development. +- Understand [the way applications interact with TiDB](#the-way-applications-interact-with-tidb). +- To learn core components and concepts of building up the distributed database TiDB and TiDB Cloud, refer to the free online course [Introduction to TiDB](https://eng.edu.pingcap.com/catalog/info/id:203/?utm_source=docs-dev-guide). + +## TiDB transaction mechanisms + +TiDB supports distributed transactions and offers both [optimistic transaction](/optimistic-transaction.md) and [pessimistic transaction](/pessimistic-transaction.md) modes. The current version of TiDB uses the **pessimistic transaction** mode by default, which allows you to transact with TiDB as you would with a traditional monolithic database (for example, MySQL). + +You can start a transaction using [`BEGIN`](/sql-statements/sql-statement-begin.md), explicitly specify a **pessimistic transaction** using `BEGIN PESSIMISTIC`, or explicitly specify an **optimistic transaction** using `BEGIN OPTIMISTIC`. After that, you can either commit ([`COMMIT`](/sql-statements/sql-statement-commit.md)) or roll back ([`ROLLBACK`](/sql-statements/sql-statement-rollback.md)) the transaction. + +TiDB guarantees atomicity for all statements between the start of `BEGIN` and the end of `COMMIT` or `ROLLBACK`, that is, all statements that are executed during this period either succeed or fail as a whole. This is used to ensure data consistency you need for application development. + + + +If you are not sure what an **optimistic transaction** is, do ***NOT*** use it yet. Because **optimistic transactions** require that the application can correctly handle [all errors](/error-codes.md) returned by the `COMMIT` statement. If you are not sure how your application handles them, use a **pessimistic transaction** instead. + + + + + +If you are not sure what an **optimistic transaction** is, do ***NOT*** use it yet. Because **optimistic transactions** require that the application can correctly handle [all errors](https://docs.pingcap.com/tidb/stable/error-codes) returned by the `COMMIT` statement. If you are not sure how your application handles them, use a **pessimistic transaction** instead. + + + +## The way applications interact with TiDB + +TiDB is highly compatible with the MySQL protocol and supports [most MySQL syntax and features](/mysql-compatibility.md), so most MySQL connection libraries are compatible with TiDB. If your application framework or language does not have an official adaptation from PingCAP, it is recommended that you use MySQL's client libraries. More and more third-party libraries are actively supporting TiDB's different features. + +Since TiDB is compatible with the MySQL protocol and MySQL syntax, most of the ORMs that support MySQL are also compatible with TiDB. + +## Read more + + + +- [Quick Start](/develop/dev-guide-build-cluster-in-cloud.md) +- [Choose Driver or ORM](/develop/dev-guide-choose-driver-or-orm.md) +- [Connect to TiDB](/develop/dev-guide-connect-to-tidb.md) +- [Database Schema Design](/develop/dev-guide-schema-design-overview.md) +- [Write Data](/develop/dev-guide-insert-data.md) +- [Read Data](/develop/dev-guide-get-data-from-single-table.md) +- [Transaction](/develop/dev-guide-transaction-overview.md) +- [Optimize](/develop/dev-guide-optimize-sql-overview.md) +- [Example Applications](/develop/dev-guide-sample-application-java-spring-boot.md) + + + + + +- [Quick Start](/develop/dev-guide-build-cluster-in-cloud.md) +- [Choose Driver or ORM](/develop/dev-guide-choose-driver-or-orm.md) +- [Database Schema Design](/develop/dev-guide-schema-design-overview.md) +- [Write Data](/develop/dev-guide-insert-data.md) +- [Read Data](/develop/dev-guide-get-data-from-single-table.md) +- [Transaction](/develop/dev-guide-transaction-overview.md) +- [Optimize](/develop/dev-guide-optimize-sql-overview.md) +- [Example Applications](/develop/dev-guide-sample-application-java-spring-boot.md) + + diff --git a/test/sync_pr_docs/data/markdown-pages/en/tidb/release-6.5/develop/dev-guide-playground-gitpod.md b/test/sync_pr_docs/data/markdown-pages/en/tidb/release-6.5/develop/dev-guide-playground-gitpod.md new file mode 100644 index 00000000..fae77fb1 --- /dev/null +++ b/test/sync_pr_docs/data/markdown-pages/en/tidb/release-6.5/develop/dev-guide-playground-gitpod.md @@ -0,0 +1,169 @@ +--- +title: Gitpod +--- + + + +# Gitpod + +With [Gitpod](https://www.gitpod.io/), you can get a full development environment in your browser with the click of a button or link, and you can write code right away. + +Gitpod is an open-source Kubernetes application (GitHub repository address: ) for direct-to-code development environments, which spins up fresh, automated development environments for each task, in the cloud, in seconds. It enables you to describe your development environment as code and start instant, remote and cloud-based development environments directly from your browser or your Desktop IDE. + +## Quick start + +1. Fork the example code repository [pingcap-inc/tidb-example-java](https://github.com/pingcap-inc/tidb-example-java) for TiDB application development. + +2. Start your Gitpod workspace by prefixing the URL of the sample code repository with `https://gitpod.io/#` in the address bar of your browser. + + - For example, `https://gitpod.io/#https://github.com/pingcap-inc/tidb-example-java`. + + - You can configure environment variables in the URL. For example, `https://gitpod.io/#targetFile=spring-jpa-hibernate_Makefile,targetMode=spring-jpa-hibernate/https://github.com/pingcap-inc/tidb-example-java`. + +3. Log in and start the workspace using one of the providers listed. For example, `Github`. + +## Use the default Gitpod configuration and environment + +After completing the [quick-start](#quick-start) steps, it will take a while for Gitpod to set up your workspace. + +Take the [Spring Boot Web](/develop/dev-guide-sample-application-java-spring-boot.md) application as an example. You can create a new workspace by the `https://gitpod.io/#targetFile=spring-jpa-hibernate_Makefile,targetMode=spring-jpa-hibernate/https://github.com/pingcap-inc/tidb-example-java` URL. + +After that, you will see a page similar to the following: + +![playground gitpod workspace init](/media/develop/playground-gitpod-workspace-init.png) + +This scenario in the page uses [TiUP](https://docs.pingcap.com/zh/tidb/stable/tiup-overview) to build a TiDB Playground. You can check the progress on the left side of the terminal area. + +Once the TiDB Playground is ready, another `Spring JPA Hibernate` task will run. You can check the progress on the right side of the terminal area. + +After all these tasks are finished, you will see a page similar to the following. On this page, check the `REMOTE EXPLORER` area in the left navigation pane (Gitpod supports URL-based port forwarding) and find the URL of your port `8080`. + +![playground gitpod workspace ready](/media/develop/playground-gitpod-workspace-ready.png) + +You can test the API by [sending an HTTP request](/develop/dev-guide-sample-application-java-spring-boot.md#step-6-http-requests). Make sure to replace the `http://localhost:8080` URL with the one you found in the `REMOTE EXPLORER` area. + +## Using custom Gitpod configuration and Docker image + +### Customize Gitpod configurations + +Referring to [example.gitpod.yml](https://github.com/pingcap-inc/tidb-example-java/blob/main/.gitpod.yml), create a `.gitpod. yml` file in the root directory of your project to configure the Gitpod workspace. + +```yml +# This configuration file was automatically generated by Gitpod. +# Please adjust to your needs (see https://www.gitpod.io/docs/config-gitpod-file) +# and commit this file to your remote git repository to share the goodness with others. + +# image: +# file: .gitpod.Dockerfile + +tasks: + - name: Open Target File + command: | + if [ -n "$targetFile" ]; then code ${targetFile//[_]//}; fi + - name: TiUP init playground + command: | + $HOME/.tiup/bin/tiup playground + - name: Test Case + openMode: split-right + init: echo "*** Waiting for TiUP Playground Ready! ***" + command: | + gp await-port 3930 + if [ "$targetMode" == "plain-java-jdbc" ] + then + cd plain-java-jdbc + code src/main/resources/dbinit.sql + code src/main/java/com/pingcap/JDBCExample.java + make mysql + elif [ "$targetMode" == "plain-java-hibernate" ] + then + cd plain-java-hibernate + make + elif [ "$targetMode" == "spring-jpa-hibernate" ] + then + cd spring-jpa-hibernate + make + fi +ports: + - port: 8080 + visibility: public + - port: 4000 + visibility: public + - port: 2379-36663 + onOpen: ignore +``` + +### Customize Gitpod Docker images + +By default, Gitpod uses a standard Docker image named Workspace-Full as the basis for the workspace. Workspaces launched from this default image are pre-installed with Docker, Go, Java, Node.js, C/C++, Python, Ruby, Rust, PHP, and tools such as Homebrew, Tailscale, and Nginx. + +You can use a public Docker image or a Dockerfile and also install any required dependencies for your project. + +For example, you can use a Dockerfile (see also [Example `.gitpod.Dockerfile`](https://github.com/pingcap-inc/tidb-example-java/blob/main/.gitpod.Dockerfile)) as follows: + +```dockerfile +FROM gitpod/workspace-java-17 + +RUN sudo apt install mysql-client -y +RUN curl --proto '=https' --tlsv1.2 -sSf https://tiup-mirrors.pingcap.com/install.sh | sh +``` + +Then, you need to update `.gitpod.yml`: + +```yml +# This configuration file was automatically generated by Gitpod. +# Please adjust to your needs (see https://www.gitpod.io/docs/config-gitpod-file) +# and commit this file to your remote git repository to share the goodness with others. + +image: + # Import your Dockerfile here. + file: .gitpod.Dockerfile + +tasks: + - name: Open Target File + command: | + if [ -n "$targetFile" ]; then code ${targetFile//[_]//}; fi + - name: TiUP init playground + command: | + $HOME/.tiup/bin/tiup playground + - name: Test Case + openMode: split-right + init: echo "*** Waiting for TiUP Playground Ready! ***" + command: | + gp await-port 3930 + if [ "$targetMode" == "plain-java-jdbc" ] + then + cd plain-java-jdbc + code src/main/resources/dbinit.sql + code src/main/java/com/pingcap/JDBCExample.java + make mysql + elif [ "$targetMode" == "plain-java-hibernate" ] + then + cd plain-java-hibernate + make + elif [ "$targetMode" == "spring-jpa-hibernate" ] + then + cd spring-jpa-hibernate + make + fi +ports: + - port: 8080 + visibility: public + - port: 4000 + visibility: public + - port: 2379-36663 + onOpen: ignore +``` + +### Apply changes + +After completing the configuration of the `.gitpod.yml` file, make sure that the latest code is available in your corresponding GitHub repository. + +Visit `https://gitpod.io/#` to create a new Gitpod workspace with the latest code applied. + +Visit `https://gitpod.io/workspaces` for all established workspaces. + +## Summary + +Gitpod provides a complete, automated, and pre-configured cloud-native development environment. You can develop, run, and test code directly in the browser without any local configurations. + +![playground gitpod summary](/media/develop/playground-gitpod-summary.png) diff --git a/test/sync_pr_docs/data/markdown-pages/en/tidb/release-6.5/develop/dev-guide-prepared-statement.md b/test/sync_pr_docs/data/markdown-pages/en/tidb/release-6.5/develop/dev-guide-prepared-statement.md new file mode 100644 index 00000000..30dca748 --- /dev/null +++ b/test/sync_pr_docs/data/markdown-pages/en/tidb/release-6.5/develop/dev-guide-prepared-statement.md @@ -0,0 +1,226 @@ +--- +title: Prepared Statements +summary: Learn about how to use the TiDB prepared statements. +--- + +# Prepared Statements + +A [prepared statement](/sql-statements/sql-statement-prepare.md) templatizes multiple SQL statements in which only parameters are different. It separates the SQL statements from the parameters. You can use it to improve the following aspects of SQL statements: + +- **Security**: Because parameters and statements are separated, the risk of [SQL injection](https://en.wikipedia.org/wiki/SQL_injection) attacks is avoided. +- **Performance**: Because the statement is parsed in advance on the TiDB server, only parameters are passed for subsequent executions, saving the cost of parsing the entire SQL statements, splicing SQL statement strings, and network transmission. + +In most applications, SQL statements can be enumerated. You can use a limited number of SQL statements to complete data queries for the entire application. So using a prepared statement is a best practice. + +## SQL syntax + +This section describes the SQL syntax for creating, running and deleting a prepared statement. + +### Create a prepared statement + +```sql +PREPARE {prepared_statement_name} FROM '{prepared_statement_sql}'; +``` + +| Parameter Name | Description | +| :-------------------------: | :------------------------------------: | +| `{prepared_statement_name}` | name of the prepared statement| +| `{prepared_statement_sql}` | the prepared statement SQL with a question mark as a placeholder | + +See [PREPARE statement](/sql-statements/sql-statement-prepare.md) for more information. + +### Use the prepared statement + +A prepared statement can only use **user variables** as parameters, so use the [`SET` statement](/sql-statements/sql-statement-set-variable.md) to set the variables before the [`EXECUTE` statement](/sql-statements/sql-statement-execute.md) can call the prepared statement. + +```sql +SET @{parameter_name} = {parameter_value}; +EXECUTE {prepared_statement_name} USING @{parameter_name}; +``` + +| Parameter Name | Description | +| :-------------------------: | :-------------------------------------------------------------------: | +| `{parameter_name}` | user variable name | +| `{parameter_value}` | user variable value | +| `{prepared_statement_name}` | The name of the preprocessing statement, which must be the same as the name defined in the [Create a prepared statement](#create-a-prepared-statement) | + +See the [`EXECUTE` statement](/sql-statements/sql-statement-execute.md) for more information. + +### Delete the prepared statement + +```sql +DEALLOCATE PREPARE {prepared_statement_name}; +``` + +| Parameter Name | Description | +| :-------------------------: | :-------------------------------------------------------------------: | +| `{prepared_statement_name}` | The name of the preprocessing statement, which must be the same as the name defined in the [Create a prepared statement](#create-a-prepared-statement) | + +See the [`DEALLOCATE` statement](/sql-statements/sql-statement-deallocate.md) for more information. + +## Examples + +This section describes two examples of prepared statements: `SELECT` data and `INSERT` data. + +### `SELECT` example + +For example, you need to query a book with `id = 1` in the [`bookshop` application](/develop/dev-guide-bookshop-schema-design.md#books-table). + + + +
+ +```sql +PREPARE `books_query` FROM 'SELECT * FROM `books` WHERE `id` = ?'; +``` + +Running result: + +``` +Query OK, 0 rows affected (0.01 sec) +``` + +```sql +SET @id = 1; +``` + +Running result: + +``` +Query OK, 0 rows affected (0.04 sec) +``` + +```sql +EXECUTE `books_query` USING @id; +``` + +Running result: + +``` ++---------+---------------------------------+--------+---------------------+-------+--------+ +| id | title | type | published_at | stock | price | ++---------+---------------------------------+--------+---------------------+-------+--------+ +| 1 | The Adventures of Pierce Wehner | Comics | 1904-06-06 20:46:25 | 586 | 411.66 | ++---------+---------------------------------+--------+---------------------+-------+--------+ +1 row in set (0.05 sec) +``` + +
+ +
+ +```java +// ds is an entity of com.mysql.cj.jdbc.MysqlDataSource +try (Connection connection = ds.getConnection()) { + PreparedStatement preparedStatement = connection.prepareStatement("SELECT * FROM `books` WHERE `id` = ?"); + preparedStatement.setLong(1, 1); + + ResultSet res = preparedStatement.executeQuery(); + if(!res.next()) { + System.out.println("No books in the table with id 1"); + } else { + // got book's info, which id is 1 + System.out.println(res.getLong("id")); + System.out.println(res.getString("title")); + System.out.println(res.getString("type")); + } +} catch (SQLException e) { + e.printStackTrace(); +} +``` + +
+ +
+ +### `INSERT` example + +Using the [`books` table](/develop/dev-guide-bookshop-schema-design.md#books-table) as an example, you need to insert a book with `title = TiDB Developer Guide`, `type = Science & Technology`, `stock = 100`, `price = 0.0`, and `published_at = NOW()` (current time of insertion). Note that you don't need to specify the `AUTO_RANDOM` attribute in the **primary key** of the `books` table. For more information about inserting data, see [Insert Data](/develop/dev-guide-insert-data.md). + + + +
+ +```sql +PREPARE `books_insert` FROM 'INSERT INTO `books` (`title`, `type`, `stock`, `price`, `published_at`) VALUES (?, ?, ?, ?, ?);'; +``` + +Running result: + +``` +Query OK, 0 rows affected (0.03 sec) +``` + +```sql +SET @title = 'TiDB Developer Guide'; +SET @type = 'Science & Technology'; +SET @stock = 100; +SET @price = 0.0; +SET @published_at = NOW(); +``` + +Running result: + +``` +Query OK, 0 rows affected (0.04 sec) +``` + +```sql +EXECUTE `books_insert` USING @title, @type, @stock, @price, @published_at; +``` + +Running result: + +``` +Query OK, 1 row affected (0.03 sec) +``` + +
+ +
+ +```java +try (Connection connection = ds.getConnection()) { + String sql = "INSERT INTO `books` (`title`, `type`, `stock`, `price`, `published_at`) VALUES (?, ?, ?, ?, ?);"; + PreparedStatement preparedStatement = connection.prepareStatement(sql); + + preparedStatement.setString(1, "TiDB Developer Guide"); + preparedStatement.setString(2, "Science & Technology"); + preparedStatement.setInt(3, 100); + preparedStatement.setBigDecimal(4, new BigDecimal("0.0")); + preparedStatement.setTimestamp(5, new Timestamp(Calendar.getInstance().getTimeInMillis())); + + preparedStatement.executeUpdate(); +} catch (SQLException e) { + e.printStackTrace(); +} +``` + +As you can see, JDBC helps you control the life cycle of prepared statements and you don't need to manually create, use, or delete prepared statements in your application. However, note that because TiDB is compatible with MySQL, the default configuration for using MySQL JDBC Driver on the client-side is not to enable the **_server-side_** prepared statement option, but to use the client-side prepared statement. + +The following configurations help you use the TiDB server-side prepared statements under JDBC: + +| Parameter | Means | Recommended Scenario | Recommended Configuration| +| :------------------------: | :-----------------------------------: | :-----------------------------------------------------------------------------------------------------------------------------------------------: | :----------------------: | +| `useServerPrepStmts` | Whether to use the server side to enable prepared statements | When you need to use a prepared statement more than once | `true` | +| `cachePrepStmts` | Whether the client caches prepared statements | `useServerPrepStmts=true` 时 | `true` | +| `prepStmtCacheSqlLimit` | Maximum size of a prepared statement (256 characters by default) | When the prepared statement is greater than 256 characters | Configured according to the actual size of the prepared statement | +| `prepStmtCacheSize` | Maximum number of prepared statements (25 by default) | When the number of prepared statements is greater than 25 | Configured according to the actual number of prepared statements | + +The following is a typical scenario of JDBC connection string configurations. Host: `127.0.0.1`, Port: `4000`, User name: `root`, Password: null, Default database: `test`: + +``` +jdbc:mysql://127.0.0.1:4000/test?user=root&useConfigs=maxPerformance&useServerPrepStmts=true&prepStmtCacheSqlLimit=2048&prepStmtCacheSize=256&rewriteBatchedStatements=true&allowMultiQueries=true +``` + +You can also see the [insert rows](/develop/dev-guide-insert-data.md#insert-rows) chapter if you need to change other JDBC parameters when inserting data. + +For a complete example in Java, see: + +- [Build a simple CRUD app with TiDB and JDBC](/develop/dev-guide-sample-application-java-jdbc.md#step-2-get-the-code) +- [Build a simple CRUD app with TiDB and Hibernate](/develop/dev-guide-sample-application-java-hibernate.md#step-2-get-the-code) +- [Build the TiDB app using Spring Boot](/develop/dev-guide-sample-application-java-spring-boot.md) + +
+ +
diff --git a/test/sync_pr_docs/data/markdown-pages/en/tidb/release-6.5/develop/dev-guide-sample-application-golang-gorm.md b/test/sync_pr_docs/data/markdown-pages/en/tidb/release-6.5/develop/dev-guide-sample-application-golang-gorm.md new file mode 100644 index 00000000..ae815765 --- /dev/null +++ b/test/sync_pr_docs/data/markdown-pages/en/tidb/release-6.5/develop/dev-guide-sample-application-golang-gorm.md @@ -0,0 +1,326 @@ +--- +title: Build a Simple CRUD App with TiDB and GORM +summary: Learn how to build a simple CRUD application with TiDB and GORM. +--- + + + + +# Build a Simple CRUD App with TiDB and GORM + +[GORM](https://gorm.io/) is a popular open-source ORM library for Golang. + +This document describes how to use TiDB and GORM to build a simple CRUD application. + +> **Note:** +> +> It is recommended to use Golang 1.16 or a later version. + +## Step 1. Launch your TiDB cluster + + + +The following introduces how to start a TiDB cluster. + +**Use a TiDB Cloud Serverless Tier cluster** + +For detailed steps, see [Create a Serverless Tier cluster](/develop/dev-guide-build-cluster-in-cloud.md#step-1-create-a-serverless-tier-cluster). + +**Use a local cluster** + +For detailed steps, see [Deploy a local test cluster](/quick-start-with-tidb.md#deploy-a-local-test-cluster) or [Deploy a TiDB Cluster Using TiUP](/production-deployment-using-tiup.md). + + + + + +See [Create a Serverless Tier cluster](/develop/dev-guide-build-cluster-in-cloud.md#step-1-create-a-serverless-tier-cluster). + + + +## Step 2. Get the code + +```shell +git clone https://github.com/pingcap-inc/tidb-example-golang.git +``` + +Compared with GORM, the go-sql-driver/mysql implementation might be not a best practice, because you need to write error handling logic, close `*sql.Rows` manually and cannot reuse code easily, which makes your code slightly redundant. + +The following instructions take `v1.23.5` as an example. + +To adapt TiDB transactions, write a toolkit [util](https://github.com/pingcap-inc/tidb-example-golang/tree/main/util) according to the following code: + +```go +package util + +import ( + "context" + "database/sql" +) + +type TiDBSqlTx struct { + *sql.Tx + conn *sql.Conn + pessimistic bool +} + +func TiDBSqlBegin(db *sql.DB, pessimistic bool) (*TiDBSqlTx, error) { + ctx := context.Background() + conn, err := db.Conn(ctx) + if err != nil { + return nil, err + } + if pessimistic { + _, err = conn.ExecContext(ctx, "set @@tidb_txn_mode=?", "pessimistic") + } else { + _, err = conn.ExecContext(ctx, "set @@tidb_txn_mode=?", "optimistic") + } + if err != nil { + return nil, err + } + tx, err := conn.BeginTx(ctx, nil) + if err != nil { + return nil, err + } + return &TiDBSqlTx{ + conn: conn, + Tx: tx, + pessimistic: pessimistic, + }, nil +} + +func (tx *TiDBSqlTx) Commit() error { + defer tx.conn.Close() + return tx.Tx.Commit() +} + +func (tx *TiDBSqlTx) Rollback() error { + defer tx.conn.Close() + return tx.Tx.Rollback() +} +``` + +Change to the `gorm` directory: + +```shell +cd gorm +``` + +The structure of this directory is as follows: + +``` +. +├── Makefile +├── go.mod +├── go.sum +└── gorm.go +``` + +`gorm.go` is the main body of the `gorm`. Compared with go-sql-driver/mysql, GORM avoids differences in database creation between different databases. It also implements a lot of operations, such as AutoMigrate and CRUD of objects, which greatly simplifies the code. + +`Player` is a data entity struct that is a mapping for tables. Each property of a `Player` corresponds to a field in the `player` table. Compared with go-sql-driver/mysql, `Player` in GORM adds struct tags to indicate mapping relationships for more information, such as `gorm:"primaryKey;type:VARCHAR(36);column:id"`. + +```go + +package main + +import ( + "fmt" + "math/rand" + + "github.com/google/uuid" + "github.com/pingcap-inc/tidb-example-golang/util" + + "gorm.io/driver/mysql" + "gorm.io/gorm" + "gorm.io/gorm/clause" + "gorm.io/gorm/logger" +) + +type Player struct { + ID string `gorm:"primaryKey;type:VARCHAR(36);column:id"` + Coins int `gorm:"column:coins"` + Goods int `gorm:"column:goods"` +} + +func (*Player) TableName() string { + return "player" +} + +func main() { + // 1. Configure the example database connection. + db := createDB() + + // AutoMigrate for player table + db.AutoMigrate(&Player{}) + + // 2. Run some simple examples. + simpleExample(db) + + // 3. Explore more. + tradeExample(db) +} + +func tradeExample(db *gorm.DB) { + // Player 1: id is "1", has only 100 coins. + // Player 2: id is "2", has 114514 coins, and 20 goods. + player1 := &Player{ID: "1", Coins: 100} + player2 := &Player{ID: "2", Coins: 114514, Goods: 20} + + // Create two players "by hand", using the INSERT statement on the backend. + db.Clauses(clause.OnConflict{UpdateAll: true}).Create(player1) + db.Clauses(clause.OnConflict{UpdateAll: true}).Create(player2) + + // Player 1 wants to buy 10 goods from player 2. + // It will cost 500 coins, but player 1 cannot afford it. + fmt.Println("\nbuyGoods:\n => this trade will fail") + if err := buyGoods(db, player2.ID, player1.ID, 10, 500); err == nil { + panic("there shouldn't be success") + } + + // So player 1 has to reduce the incoming quantity to two. + fmt.Println("\nbuyGoods:\n => this trade will success") + if err := buyGoods(db, player2.ID, player1.ID, 2, 100); err != nil { + panic(err) + } +} + +func simpleExample(db *gorm.DB) { + // Create a player, who has a coin and a goods. + if err := db.Clauses(clause.OnConflict{UpdateAll: true}). + Create(&Player{ID: "test", Coins: 1, Goods: 1}).Error; err != nil { + panic(err) + } + + // Get a player. + var testPlayer Player + db.Find(&testPlayer, "id = ?", "test") + fmt.Printf("getPlayer: %+v\n", testPlayer) + + // Create players with bulk inserts. Insert 1919 players totally, with 114 players per batch. + bulkInsertPlayers := make([]Player, 1919, 1919) + total, batch := 1919, 114 + for i := 0; i < total; i++ { + bulkInsertPlayers[i] = Player{ + ID: uuid.New().String(), + Coins: rand.Intn(10000), + Goods: rand.Intn(10000), + } + } + + if err := db.Session(&gorm.Session{Logger: db.Logger.LogMode(logger.Error)}). + CreateInBatches(bulkInsertPlayers, batch).Error; err != nil { + panic(err) + } + + // Count players amount. + playersCount := int64(0) + db.Model(&Player{}).Count(&playersCount) + fmt.Printf("countPlayers: %d\n", playersCount) + + // Print 3 players. + threePlayers := make([]Player, 3, 3) + db.Limit(3).Find(&threePlayers) + for index, player := range threePlayers { + fmt.Printf("print %d player: %+v\n", index+1, player) + } +} + +func createDB() *gorm.DB { + dsn := "root:@tcp(127.0.0.1:4000)/test?charset=utf8mb4" + db, err := gorm.Open(mysql.Open(dsn), &gorm.Config{ + Logger: logger.Default.LogMode(logger.Info), + }) + if err != nil { + panic(err) + } + + return db +} + +func buyGoods(db *gorm.DB, sellID, buyID string, amount, price int) error { + return util.TiDBGormBegin(db, true, func(tx *gorm.DB) error { + var sellPlayer, buyPlayer Player + if err := tx.Clauses(clause.Locking{Strength: "UPDATE"}). + Find(&sellPlayer, "id = ?", sellID).Error; err != nil { + return err + } + + if sellPlayer.ID != sellID || sellPlayer.Goods < amount { + return fmt.Errorf("sell player %s goods not enough", sellID) + } + + if err := tx.Clauses(clause.Locking{Strength: "UPDATE"}). + Find(&buyPlayer, "id = ?", buyID).Error; err != nil { + return err + } + + if buyPlayer.ID != buyID || buyPlayer.Coins < price { + return fmt.Errorf("buy player %s coins not enough", buyID) + } + + updateSQL := "UPDATE player set goods = goods + ?, coins = coins + ? WHERE id = ?" + if err := tx.Exec(updateSQL, -amount, price, sellID).Error; err != nil { + return err + } + + if err := tx.Exec(updateSQL, amount, -price, buyID).Error; err != nil { + return err + } + + fmt.Println("\n[buyGoods]:\n 'trade success'") + return nil + }) +} +``` + +## Step 3. Run the code + +The following content introduces how to run the code step by step. + +### Step 3.1 Modify parameters for TiDB Cloud + +If you are using a TiDB Cloud Serverless Tier cluster, modify the value of the `dsn` in `gorm.go`: + +```go +dsn := "root:@tcp(127.0.0.1:4000)/test?charset=utf8mb4" +``` + +Suppose that the password you set is `123456`, and the connection parameters you get from the cluster details page are the following: + +- Endpoint: `xxx.tidbcloud.com` +- Port: `4000` +- User: `2aEp24QWEDLqRFs.root` + +In this case, you can modify the `mysql.RegisterTLSConfig` and `dsn` as follows: + +```go +mysql.RegisterTLSConfig("register-tidb-tls", &tls.Config { + MinVersion: tls.VersionTLS12, + ServerName: "xxx.tidbcloud.com", +}) + +dsn := "2aEp24QWEDLqRFs.root:123456@tcp(xxx.tidbcloud.com:4000)/test?charset=utf8mb4&tls=register-tidb-tls" +``` + +### Step 3.2 Run the code + +To run the code, you can run `make build` and `make run` respectively: + +```shell +make build # this command executes `go build -o bin/gorm-example` +make run # this command executes `./bin/gorm-example` +``` + +Or you can use the native commands: + +```shell +go build -o bin/gorm-example +./bin/gorm-example +``` + +Or run the `make` command directly, which is a combination of `make build` and `make run`. + +## Step 4. Expected output + +[GORM Expected Output](https://github.com/pingcap-inc/tidb-example-golang/blob/main/Expected-Output.md#gorm) \ No newline at end of file diff --git a/test/sync_pr_docs/data/markdown-pages/en/tidb/release-6.5/develop/dev-guide-sample-application-golang-sql-driver.md b/test/sync_pr_docs/data/markdown-pages/en/tidb/release-6.5/develop/dev-guide-sample-application-golang-sql-driver.md new file mode 100644 index 00000000..6420b2c7 --- /dev/null +++ b/test/sync_pr_docs/data/markdown-pages/en/tidb/release-6.5/develop/dev-guide-sample-application-golang-sql-driver.md @@ -0,0 +1,557 @@ +--- +title: Build a Simple CRUD App with TiDB and Go-MySQL-Driver +summary: Learn how to build a simple CRUD application with TiDB and Go-MySQL-Driver. +aliases: ['/tidbcloud/dev-guide-sample-application-golang','/tidb/stable/dev-guide-sample-application-golang','/tidb/v6.5/dev-guide-sample-application-golang'] +--- + + + + +# Build a Simple CRUD App with TiDB and Go-MySQL-Driver + +This document describes how to use TiDB and [Go-MySQL-Driver](https://github.com/go-sql-driver/mysql) to build a simple CRUD application. + +> **Note:** +> +> It is recommended to use Golang 1.16 or a later version. + +## Step 1. Launch your TiDB cluster + + + +The following introduces how to start a TiDB cluster. + +**Use a TiDB Cloud Serverless Tier cluster** + +For detailed steps, see [Create a Serverless Tier cluster](/develop/dev-guide-build-cluster-in-cloud.md#step-1-create-a-serverless-tier-cluster). + +**Use a local cluster** + +For detailed steps, see [Deploy a local test cluster](/quick-start-with-tidb.md#deploy-a-local-test-cluster) or [Deploy a TiDB Cluster Using TiUP](/production-deployment-using-tiup.md). + + + + + +See [Create a Serverless Tier cluster](/develop/dev-guide-build-cluster-in-cloud.md#step-1-create-a-serverless-tier-cluster). + + + +## Step 2. Get the code + +```shell +git clone https://github.com/pingcap-inc/tidb-example-golang.git +``` + +Change to the `sqldriver` directory: + +```shell +cd sqldriver +``` + +The structure of this directory is as follows: + +``` +. +├── Makefile +├── dao.go +├── go.mod +├── go.sum +├── sql +│   └── dbinit.sql +├── sql.go +└── sqldriver.go +``` + +You can find initialization statements for the table creation in `dbinit.sql`: + +```sql +USE test; +DROP TABLE IF EXISTS player; + +CREATE TABLE player ( + `id` VARCHAR(36), + `coins` INTEGER, + `goods` INTEGER, + PRIMARY KEY (`id`) +); +``` + +`sqldriver.go` is the main body of the `sqldriver`. TiDB is highly compatible with the MySQL protocol, so you need to initialize a MySQL source instance `db, err := sql.Open("mysql", dsn)` to connect to TiDB. Then, you can use `dao.go` to read, edit, add, and delete data. + +```go +package main + +import ( + "database/sql" + "fmt" + + _ "github.com/go-sql-driver/mysql" +) + +func main() { + // 1. Configure the example database connection. + dsn := "root:@tcp(127.0.0.1:4000)/test?charset=utf8mb4" + openDB("mysql", dsn, func(db *sql.DB) { + // 2. Run some simple examples. + simpleExample(db) + + // 3. Explore more. + tradeExample(db) + }) +} + +func simpleExample(db *sql.DB) { + // Create a player, who has a coin and a goods. + err := createPlayer(db, Player{ID: "test", Coins: 1, Goods: 1}) + if err != nil { + panic(err) + } + + // Get a player. + testPlayer, err := getPlayer(db, "test") + if err != nil { + panic(err) + } + fmt.Printf("getPlayer: %+v\n", testPlayer) + + // Create players with bulk inserts. Insert 1919 players totally, with 114 players per batch. + + err = bulkInsertPlayers(db, randomPlayers(1919), 114) + if err != nil { + panic(err) + } + + // Count players amount. + playersCount, err := getCount(db) + if err != nil { + panic(err) + } + fmt.Printf("countPlayers: %d\n", playersCount) + + // Print 3 players. + threePlayers, err := getPlayerByLimit(db, 3) + if err != nil { + panic(err) + } + for index, player := range threePlayers { + fmt.Printf("print %d player: %+v\n", index+1, player) + } +} + +func tradeExample(db *sql.DB) { + // Player 1: id is "1", has only 100 coins. + // Player 2: id is "2", has 114514 coins, and 20 goods. + player1 := Player{ID: "1", Coins: 100} + player2 := Player{ID: "2", Coins: 114514, Goods: 20} + + // Create two players "by hand", using the INSERT statement on the backend. + if err := createPlayer(db, player1); err != nil { + panic(err) + } + if err := createPlayer(db, player2); err != nil { + panic(err) + } + + // Player 1 wants to buy 10 goods from player 2. + // It will cost 500 coins, but player 1 cannot afford it. + fmt.Println("\nbuyGoods:\n => this trade will fail") + if err := buyGoods(db, player2.ID, player1.ID, 10, 500); err == nil { + panic("there shouldn't be success") + } + + // So player 1 has to reduce the incoming quantity to two. + fmt.Println("\nbuyGoods:\n => this trade will success") + if err := buyGoods(db, player2.ID, player1.ID, 2, 100); err != nil { + panic(err) + } +} + +func openDB(driverName, dataSourceName string, runnable func(db *sql.DB)) { + db, err := sql.Open(driverName, dataSourceName) + if err != nil { + panic(err) + } + defer db.Close() + + runnable(db) +} +``` + +To adapt TiDB transactions, write a toolkit [util](https://github.com/pingcap-inc/tidb-example-golang/tree/main/util) according to the following code: + +```go +package util + +import ( + "context" + "database/sql" +) + +type TiDBSqlTx struct { + *sql.Tx + conn *sql.Conn + pessimistic bool +} + +func TiDBSqlBegin(db *sql.DB, pessimistic bool) (*TiDBSqlTx, error) { + ctx := context.Background() + conn, err := db.Conn(ctx) + if err != nil { + return nil, err + } + if pessimistic { + _, err = conn.ExecContext(ctx, "set @@tidb_txn_mode=?", "pessimistic") + } else { + _, err = conn.ExecContext(ctx, "set @@tidb_txn_mode=?", "optimistic") + } + if err != nil { + return nil, err + } + tx, err := conn.BeginTx(ctx, nil) + if err != nil { + return nil, err + } + return &TiDBSqlTx{ + conn: conn, + Tx: tx, + pessimistic: pessimistic, + }, nil +} + +func (tx *TiDBSqlTx) Commit() error { + defer tx.conn.Close() + return tx.Tx.Commit() +} + +func (tx *TiDBSqlTx) Rollback() error { + defer tx.conn.Close() + return tx.Tx.Rollback() +} +``` + +`dao.go` defines a set of data manipulation methods to provide the ability to write data. This is also the core part of this example. + +```go +package main + +import ( + "database/sql" + "fmt" + "math/rand" + "strings" + + "github.com/google/uuid" + "github.com/pingcap-inc/tidb-example-golang/util" +) + +type Player struct { + ID string + Coins int + Goods int +} + +// createPlayer create a player +func createPlayer(db *sql.DB, player Player) error { + _, err := db.Exec(CreatePlayerSQL, player.ID, player.Coins, player.Goods) + return err +} + +// getPlayer get a player +func getPlayer(db *sql.DB, id string) (Player, error) { + var player Player + + rows, err := db.Query(GetPlayerSQL, id) + if err != nil { + return player, err + } + defer rows.Close() + + if rows.Next() { + err = rows.Scan(&player.ID, &player.Coins, &player.Goods) + if err == nil { + return player, nil + } else { + return player, err + } + } + + return player, fmt.Errorf("can not found player") +} + +// getPlayerByLimit get players by limit +func getPlayerByLimit(db *sql.DB, limit int) ([]Player, error) { + var players []Player + + rows, err := db.Query(GetPlayerByLimitSQL, limit) + if err != nil { + return players, err + } + defer rows.Close() + + for rows.Next() { + player := Player{} + err = rows.Scan(&player.ID, &player.Coins, &player.Goods) + if err == nil { + players = append(players, player) + } else { + return players, err + } + } + + return players, nil +} + +// bulk-insert players +func bulkInsertPlayers(db *sql.DB, players []Player, batchSize int) error { + tx, err := util.TiDBSqlBegin(db, true) + if err != nil { + return err + } + + stmt, err := tx.Prepare(buildBulkInsertSQL(batchSize)) + if err != nil { + return err + } + + defer stmt.Close() + + for len(players) > batchSize { + if _, err := stmt.Exec(playerToArgs(players[:batchSize])...); err != nil { + tx.Rollback() + return err + } + + players = players[batchSize:] + } + + if len(players) != 0 { + if _, err := tx.Exec(buildBulkInsertSQL(len(players)), playerToArgs(players)...); err != nil { + tx.Rollback() + return err + } + } + + if err := tx.Commit(); err != nil { + tx.Rollback() + return err + } + + return nil +} + +func getCount(db *sql.DB) (int, error) { + count := 0 + + rows, err := db.Query(GetCountSQL) + if err != nil { + return count, err + } + + defer rows.Close() + + if rows.Next() { + if err := rows.Scan(&count); err != nil { + return count, err + } + } + + return count, nil +} + +func buyGoods(db *sql.DB, sellID, buyID string, amount, price int) error { + var sellPlayer, buyPlayer Player + + tx, err := util.TiDBSqlBegin(db, true) + if err != nil { + return err + } + + buyExec := func() error { + stmt, err := tx.Prepare(GetPlayerWithLockSQL) + if err != nil { + return err + } + defer stmt.Close() + + sellRows, err := stmt.Query(sellID) + if err != nil { + return err + } + defer sellRows.Close() + + if sellRows.Next() { + if err := sellRows.Scan(&sellPlayer.ID, &sellPlayer.Coins, &sellPlayer.Goods); err != nil { + return err + } + } + sellRows.Close() + + if sellPlayer.ID != sellID || sellPlayer.Goods < amount { + return fmt.Errorf("sell player %s goods not enough", sellID) + } + + buyRows, err := stmt.Query(buyID) + if err != nil { + return err + } + defer buyRows.Close() + + if buyRows.Next() { + if err := buyRows.Scan(&buyPlayer.ID, &buyPlayer.Coins, &buyPlayer.Goods); err != nil { + return err + } + } + buyRows.Close() + + if buyPlayer.ID != buyID || buyPlayer.Coins < price { + return fmt.Errorf("buy player %s coins not enough", buyID) + } + + updateStmt, err := tx.Prepare(UpdatePlayerSQL) + if err != nil { + return err + } + defer updateStmt.Close() + + if _, err := updateStmt.Exec(-amount, price, sellID); err != nil { + return err + } + + if _, err := updateStmt.Exec(amount, -price, buyID); err != nil { + return err + } + + return nil + } + + err = buyExec() + if err == nil { + fmt.Println("\n[buyGoods]:\n 'trade success'") + tx.Commit() + } else { + tx.Rollback() + } + + return err +} + +func playerToArgs(players []Player) []interface{} { + var args []interface{} + for _, player := range players { + args = append(args, player.ID, player.Coins, player.Goods) + } + return args +} + +func buildBulkInsertSQL(amount int) string { + return CreatePlayerSQL + strings.Repeat(",(?,?,?)", amount-1) +} + +func randomPlayers(amount int) []Player { + players := make([]Player, amount, amount) + for i := 0; i < amount; i++ { + players[i] = Player{ + ID: uuid.New().String(), + Coins: rand.Intn(10000), + Goods: rand.Intn(10000), + } + } + + return players +} +``` + +`sql.go` defines SQL statements as constants: + +```go +package main + +const ( + CreatePlayerSQL = "INSERT INTO player (id, coins, goods) VALUES (?, ?, ?)" + GetPlayerSQL = "SELECT id, coins, goods FROM player WHERE id = ?" + GetCountSQL = "SELECT count(*) FROM player" + GetPlayerWithLockSQL = GetPlayerSQL + " FOR UPDATE" + UpdatePlayerSQL = "UPDATE player set goods = goods + ?, coins = coins + ? WHERE id = ?" + GetPlayerByLimitSQL = "SELECT id, coins, goods FROM player LIMIT ?" +) +``` + +## Step 3. Run the code + +The following content introduces how to run the code step by step. + +### Step 3.1 Table initialization + + + +When using go-sql-driver/mysql, you need to initialize the database tables manually. If you are using a local cluster, and MySQL client has been installed locally, you can run it directly in the `sqldriver` directory: + +```shell +make mysql +``` + +Or you can execute the following command: + +```shell +mysql --host 127.0.0.1 --port 4000 -u root + + + +When using go-sql-driver/mysql, you need to connect to your cluster and run the statement in the `sql/dbinit.sql` file to initialize the database tables manually. + + + +### Step 3.2 Modify parameters for TiDB Cloud + +If you are using a TiDB Cloud Serverless Tier cluster, modify the value of the `dsn` in `sqldriver.go`: + +```go +dsn := "root:@tcp(127.0.0.1:4000)/test?charset=utf8mb4" +``` + +Suppose that the password you set is `123456`, and the connection parameters you get from the cluster details page are the following: + +- Endpoint: `xxx.tidbcloud.com` +- Port: `4000` +- User: `2aEp24QWEDLqRFs.root` + +In this case, you can modify the `mysql.RegisterTLSConfig` and `dsn` as follows: + +```go +mysql.RegisterTLSConfig("register-tidb-tls", &tls.Config { + MinVersion: tls.VersionTLS12, + ServerName: "xxx.tidbcloud.com", +}) + +dsn := "2aEp24QWEDLqRFs.root:123456@tcp(xxx.tidbcloud.com:4000)/test?charset=utf8mb4&tls=register-tidb-tls" +``` + +### Step 3.3 Run + +To run the code, you can run `make mysql`, `make build` and `make run` respectively: + +```shell +make mysql # this command executes `mysql --host 127.0.0.1 --port 4000 -u root + + +# Build a Simple CRUD App with TiDB and Hibernate + +This document describes how to use TiDB and Hibernate to build a simple CRUD application. + +> **Note:** +> +> It is recommended to use Java 8 or a later Java version. + +## Step 1. Launch your TiDB cluster + + + +The following introduces how to start a TiDB cluster. + +**Use a TiDB Cloud Serverless Tier cluster** + +For detailed steps, see [Create a Serverless Tier cluster](/develop/dev-guide-build-cluster-in-cloud.md#step-1-create-a-serverless-tier-cluster). + +**Use a local cluster** + +For detailed steps, see [Deploy a local test cluster](/quick-start-with-tidb.md#deploy-a-local-test-cluster) or [Deploy a TiDB Cluster Using TiUP](/production-deployment-using-tiup.md). + + + + + +See [Create a Serverless Tier cluster](/develop/dev-guide-build-cluster-in-cloud.md#step-1-create-a-serverless-tier-cluster). + + + +## Step 2. Get the code + +```shell +git clone https://github.com/pingcap-inc/tidb-example-java.git +``` + +Compared with Hibernate, the JDBC implementation might be not a best practice, because you need to write error handling logic manually and cannot reuse code easily, which makes your code slightly redundant. + +Hibernate is a popular open-source Java ORM, and it supports TiDB dialect starting from `v6.0.0.Beta2`, which fits TiDB features well. The following instructions take `v6.0.0.Beta2` as an example. + +Change to the `plain-java-hibernate` directory: + +```shell +cd plain-java-hibernate +``` + +The structure of this directory is as follows: + +``` +. +├── Makefile +├── plain-java-hibernate.iml +├── pom.xml +└── src + └── main + ├── java + │ └── com + │ └── pingcap + │ └── HibernateExample.java + └── resources + └── hibernate.cfg.xml +``` + +`hibernate.cfg.xml` is the Hibernate configuration file: + +```xml + + + + + + + com.mysql.cj.jdbc.Driver + org.hibernate.dialect.TiDBDialect + jdbc:mysql://localhost:4000/test + root + + false + + + create-drop + + + true + true + + +``` + +`HibernateExample.java` is the main body of the `plain-java-hibernate`. Compared with JDBC, when using Hibernate, you only need to write the path of the configuration file, because Hibernate avoids differences in database creation between different databases. + +`PlayerDAO` is a class used to manage data, in which `DAO` means [Data Access Object](https://en.wikipedia.org/wiki/Data_access_object). The class defines a set of data manipulation methods for writing data. Compared with JDBC, Hibernate encapsulates a large number of operations such as object mapping and CRUD of basic objects, which greatly simplifies the code. + +`PlayerBean` is a data entity class that is a mapping for tables. Each property of a `PlayerBean` corresponds to a field in the `player` table. Compared with JDBC, `PlayerBean` in Hibernate adds annotations to indicate mapping relationships for more information. + +```java +package com.pingcap; + +import jakarta.persistence.Column; +import jakarta.persistence.Entity; +import jakarta.persistence.Id; +import jakarta.persistence.Table; +import org.hibernate.JDBCException; +import org.hibernate.Session; +import org.hibernate.SessionFactory; +import org.hibernate.Transaction; +import org.hibernate.cfg.Configuration; +import org.hibernate.query.NativeQuery; +import org.hibernate.query.Query; + +import java.util.Arrays; +import java.util.Collections; +import java.util.List; +import java.util.function.Function; + +@Entity +@Table(name = "player_hibernate") +class PlayerBean { + @Id + private String id; + @Column(name = "coins") + private Integer coins; + @Column(name = "goods") + private Integer goods; + + public PlayerBean() { + } + + public PlayerBean(String id, Integer coins, Integer goods) { + this.id = id; + this.coins = coins; + this.goods = goods; + } + + public String getId() { + return id; + } + + public void setId(String id) { + this.id = id; + } + + public Integer getCoins() { + return coins; + } + + public void setCoins(Integer coins) { + this.coins = coins; + } + + public Integer getGoods() { + return goods; + } + + public void setGoods(Integer goods) { + this.goods = goods; + } + + @Override + public String toString() { + return String.format(" %-8s => %10s\n %-8s => %10s\n %-8s => %10s\n", + "id", this.id, "coins", this.coins, "goods", this.goods); + } +} + +/** + * Main class for the basic Hibernate example. + **/ +public class HibernateExample +{ + public static class PlayerDAO { + public static class NotEnoughException extends RuntimeException { + public NotEnoughException(String message) { + super(message); + } + } + + // Run SQL code in a way that automatically handles the + // transaction retry logic so we don't have to duplicate it in + // various places. + public Object runTransaction(Session session, Function fn) { + Object resultObject = null; + + Transaction txn = session.beginTransaction(); + try { + resultObject = fn.apply(session); + txn.commit(); + System.out.println("APP: COMMIT;"); + } catch (JDBCException e) { + System.out.println("APP: ROLLBACK BY JDBC ERROR;"); + txn.rollback(); + } catch (NotEnoughException e) { + System.out.printf("APP: ROLLBACK BY LOGIC; %s", e.getMessage()); + txn.rollback(); + } + return resultObject; + } + + public Function createPlayers(List players) throws JDBCException { + return session -> { + Integer addedPlayerAmount = 0; + for (PlayerBean player: players) { + session.persist(player); + addedPlayerAmount ++; + } + System.out.printf("APP: createPlayers() --> %d\n", addedPlayerAmount); + return addedPlayerAmount; + }; + } + + public Function buyGoods(String sellId, String buyId, Integer amount, Integer price) throws JDBCException { + return session -> { + PlayerBean sellPlayer = session.get(PlayerBean.class, sellId); + PlayerBean buyPlayer = session.get(PlayerBean.class, buyId); + + if (buyPlayer == null || sellPlayer == null) { + throw new NotEnoughException("sell or buy player not exist"); + } + + if (buyPlayer.getCoins() < price || sellPlayer.getGoods() < amount) { + throw new NotEnoughException("coins or goods not enough, rollback"); + } + + buyPlayer.setGoods(buyPlayer.getGoods() + amount); + buyPlayer.setCoins(buyPlayer.getCoins() - price); + session.persist(buyPlayer); + + sellPlayer.setGoods(sellPlayer.getGoods() - amount); + sellPlayer.setCoins(sellPlayer.getCoins() + price); + session.persist(sellPlayer); + + System.out.printf("APP: buyGoods --> sell: %s, buy: %s, amount: %d, price: %d\n", sellId, buyId, amount, price); + return 0; + }; + } + + public Function getPlayerByID(String id) throws JDBCException { + return session -> session.get(PlayerBean.class, id); + } + + public Function printPlayers(Integer limit) throws JDBCException { + return session -> { + NativeQuery limitQuery = session.createNativeQuery("SELECT * FROM player_hibernate LIMIT :limit", PlayerBean.class); + limitQuery.setParameter("limit", limit); + List players = limitQuery.getResultList(); + + for (PlayerBean player: players) { + System.out.println("\n[printPlayers]:\n" + player); + } + return 0; + }; + } + + public Function countPlayers() throws JDBCException { + return session -> { + Query countQuery = session.createQuery("SELECT count(player_hibernate) FROM PlayerBean player_hibernate", Long.class); + return countQuery.getSingleResult(); + }; + } + } + + public static void main(String[] args) { + // 1. Create a SessionFactory based on our hibernate.cfg.xml configuration + // file, which defines how to connect to the database. + SessionFactory sessionFactory + = new Configuration() + .configure("hibernate.cfg.xml") + .addAnnotatedClass(PlayerBean.class) + .buildSessionFactory(); + + try (Session session = sessionFactory.openSession()) { + // 2. And then, create DAO to manager your data. + PlayerDAO playerDAO = new PlayerDAO(); + + // 3. Run some simple example. + + // Create a player who has 1 coin and 1 goods. + playerDAO.runTransaction(session, playerDAO.createPlayers(Collections.singletonList( + new PlayerBean("test", 1, 1)))); + + // Get a player. + PlayerBean testPlayer = (PlayerBean)playerDAO.runTransaction(session, playerDAO.getPlayerByID("test")); + System.out.printf("PlayerDAO.getPlayer:\n => id: %s\n => coins: %s\n => goods: %s\n", + testPlayer.getId(), testPlayer.getCoins(), testPlayer.getGoods()); + + // Count players amount. + Long count = (Long)playerDAO.runTransaction(session, playerDAO.countPlayers()); + System.out.printf("PlayerDAO.countPlayers:\n => %d total players\n", count); + + // Print 3 players. + playerDAO.runTransaction(session, playerDAO.printPlayers(3)); + + // 4. Getting further. + + // Player 1: id is "1", has only 100 coins. + // Player 2: id is "2", has 114514 coins, and 20 goods. + PlayerBean player1 = new PlayerBean("1", 100, 0); + PlayerBean player2 = new PlayerBean("2", 114514, 20); + + // Create two players "by hand", using the INSERT statement on the backend. + int addedCount = (Integer)playerDAO.runTransaction(session, + playerDAO.createPlayers(Arrays.asList(player1, player2))); + System.out.printf("PlayerDAO.createPlayers:\n => %d total inserted players\n", addedCount); + + // Player 1 wants to buy 10 goods from player 2. + // It will cost 500 coins, but player 1 can't afford it. + System.out.println("\nPlayerDAO.buyGoods:\n => this trade will fail"); + Integer updatedCount = (Integer)playerDAO.runTransaction(session, + playerDAO.buyGoods(player2.getId(), player1.getId(), 10, 500)); + System.out.printf("PlayerDAO.buyGoods:\n => %d total update players\n", updatedCount); + + // So player 1 have to reduce his incoming quantity to two. + System.out.println("\nPlayerDAO.buyGoods:\n => this trade will success"); + updatedCount = (Integer)playerDAO.runTransaction(session, + playerDAO.buyGoods(player2.getId(), player1.getId(), 2, 100)); + System.out.printf("PlayerDAO.buyGoods:\n => %d total update players\n", updatedCount); + } finally { + sessionFactory.close(); + } + } +} +``` + +## Step 3. Run the code + +The following content introduces how to run the code step by step. + +### Step 3.1 Table initialization + +No need to initialize tables manually. + +### Step 3.2 Modify parameters for TiDB Cloud + +If you are using a TiDB Cloud Serverless Tier cluster, modify the `hibernate.connection.url`, `hibernate.connection.username`, `hibernate.connection.password` in `hibernate.cfg.xml`. + +```xml + + + + + + + com.mysql.cj.jdbc.Driver + org.hibernate.dialect.TiDBDialect + jdbc:mysql://localhost:4000/test + root + + false + + + create-drop + + + true + true + + +``` + +Suppose that the password you set is `123456`, and the connection parameters you get from the cluster details page are the following: + +- Endpoint: `xxx.tidbcloud.com` +- Port: `4000` +- User: `2aEp24QWEDLqRFs.root` + +In this case, you can modify the parameters as follows: + +```xml + + + + + + + com.mysql.cj.jdbc.Driver + org.hibernate.dialect.TiDBDialect + jdbc:mysql://xxx.tidbcloud.com:4000/test?sslMode=VERIFY_IDENTITY&enabledTLSProtocols=TLSv1.2,TLSv1.3 + 2aEp24QWEDLqRFs.root + 123456 + false + + + create-drop + + + true + true + + +``` + +### Step 3.3 Run + +To run the code, you can run `make build` and `make run` respectively: + +```shell +make build # this command executes `mvn clean package` +make run # this command executes `java -jar target/plain-java-jdbc-0.0.1-jar-with-dependencies.jar` +``` + +Or you can use the native commands: + +```shell +mvn clean package +java -jar target/plain-java-jdbc-0.0.1-jar-with-dependencies.jar +``` + +Or run the `make` command directly, which is a combination of `make build` and `make run`. + +## Step 4. Expected output + +[Hibernate Expected Output](https://github.com/pingcap-inc/tidb-example-java/blob/main/Expected-Output.md#plain-java-hibernate) \ No newline at end of file diff --git a/test/sync_pr_docs/data/markdown-pages/en/tidb/release-6.5/develop/dev-guide-sample-application-java-jdbc.md b/test/sync_pr_docs/data/markdown-pages/en/tidb/release-6.5/develop/dev-guide-sample-application-java-jdbc.md new file mode 100644 index 00000000..34d75cb9 --- /dev/null +++ b/test/sync_pr_docs/data/markdown-pages/en/tidb/release-6.5/develop/dev-guide-sample-application-java-jdbc.md @@ -0,0 +1,591 @@ +--- +title: Build a Simple CRUD App with TiDB and JDBC +summary: Learn how to build a simple CRUD application with TiDB and JDBC. +--- + + + + +# Build a Simple CRUD App with TiDB and JDBC + +This document describes how to use TiDB and JDBC to build a simple CRUD application. + +> **Note:** +> +> It is recommended to use Java 8 or a later Java version. + +## Step 1. Launch your TiDB cluster + + + +The following introduces how to start a TiDB cluster. + +**Use a TiDB Cloud Serverless Tier cluster** + +For detailed steps, see [Create a Serverless Tier cluster](/develop/dev-guide-build-cluster-in-cloud.md#step-1-create-a-serverless-tier-cluster). + +**Use a local cluster** + +For detailed steps, see [Deploy a local test cluster](/quick-start-with-tidb.md#deploy-a-local-test-cluster) or [Deploy a TiDB Cluster Using TiUP](/production-deployment-using-tiup.md). + + + + + +See [Create a Serverless Tier cluster](/develop/dev-guide-build-cluster-in-cloud.md#step-1-create-a-serverless-tier-cluster). + + + +## Step 2. Get the code + +```shell +git clone https://github.com/pingcap-inc/tidb-example-java.git +``` + +Change to the `plain-java-jdbc` directory: + +```shell +cd plain-java-jdbc +``` + +The structure of this directory is as follows: + +``` +. +├── Makefile +├── plain-java-jdbc.iml +├── pom.xml +└── src + └── main + ├── java + │ └── com + │ └── pingcap + │ └── JDBCExample.java + └── resources + └── dbinit.sql +``` + +You can find initialization statements for the table creation in `dbinit.sql`: + +```sql +USE test; +DROP TABLE IF EXISTS player; + +CREATE TABLE player ( + `id` VARCHAR(36), + `coins` INTEGER, + `goods` INTEGER, + PRIMARY KEY (`id`) +); +``` + +`JDBCExample.java` is the main body of the `plain-java-jdbc`. TiDB is highly compatible with the MySQL protocol, so you need to initialize a MySQL source instance `MysqlDataSource` to connect to TiDB. Then, you can initialize `PlayerDAO` for object management and use it to read, edit, add, and delete data. + +`PlayerDAO` is a class used to manage data, in which `DAO` means [Data Access Object](https://en.wikipedia.org/wiki/Data_access_object). The class defines a set of data manipulation methods to provide the ability to write data. + +`PlayerBean` is a data entity class that is a mapping for tables. Each property of a `PlayerBean` corresponds to a field in the `player` table. + +```java +package com.pingcap; + +import com.mysql.cj.jdbc.MysqlDataSource; + +import java.sql.Connection; +import java.sql.PreparedStatement; +import java.sql.ResultSet; +import java.sql.SQLException; +import java.util.*; + +/** + * Main class for the basic JDBC example. + **/ +public class JDBCExample +{ + public static class PlayerBean { + private String id; + private Integer coins; + private Integer goods; + + public PlayerBean() { + } + + public PlayerBean(String id, Integer coins, Integer goods) { + this.id = id; + this.coins = coins; + this.goods = goods; + } + + public String getId() { + return id; + } + + public void setId(String id) { + this.id = id; + } + + public Integer getCoins() { + return coins; + } + + public void setCoins(Integer coins) { + this.coins = coins; + } + + public Integer getGoods() { + return goods; + } + + public void setGoods(Integer goods) { + this.goods = goods; + } + + @Override + public String toString() { + return String.format(" %-8s => %10s\n %-8s => %10s\n %-8s => %10s\n", + "id", this.id, "coins", this.coins, "goods", this.goods); + } + } + + /** + * Data access object used by 'ExampleDataSource'. + * Example for CURD and bulk insert. + */ + public static class PlayerDAO { + private final MysqlDataSource ds; + private final Random rand = new Random(); + + PlayerDAO(MysqlDataSource ds) { + this.ds = ds; + } + + /** + * Create players by passing in a List of PlayerBean. + * + * @param players Will create players list + * @return The number of create accounts + */ + public int createPlayers(List players){ + int rows = 0; + + Connection connection = null; + PreparedStatement preparedStatement = null; + try { + connection = ds.getConnection(); + preparedStatement = connection.prepareStatement("INSERT INTO player (id, coins, goods) VALUES (?, ?, ?)"); + } catch (SQLException e) { + System.out.printf("[createPlayers] ERROR: { state => %s, cause => %s, message => %s }\n", + e.getSQLState(), e.getCause(), e.getMessage()); + e.printStackTrace(); + + return -1; + } + + try { + for (PlayerBean player : players) { + preparedStatement.setString(1, player.getId()); + preparedStatement.setInt(2, player.getCoins()); + preparedStatement.setInt(3, player.getGoods()); + + preparedStatement.execute(); + rows += preparedStatement.getUpdateCount(); + } + } catch (SQLException e) { + System.out.printf("[createPlayers] ERROR: { state => %s, cause => %s, message => %s }\n", + e.getSQLState(), e.getCause(), e.getMessage()); + e.printStackTrace(); + } finally { + try { + connection.close(); + } catch (SQLException e) { + e.printStackTrace(); + } + } + + System.out.printf("\n[createPlayers]:\n '%s'\n", preparedStatement); + return rows; + } + + /** + * Buy goods and transfer funds between one player and another in one transaction. + * @param sellId Sell player id. + * @param buyId Buy player id. + * @param amount Goods amount, if sell player has not enough goods, the trade will break. + * @param price Price should pay, if buy player has not enough coins, the trade will break. + * + * @return The number of effected players. + */ + public int buyGoods(String sellId, String buyId, Integer amount, Integer price) { + int effectPlayers = 0; + + Connection connection = null; + try { + connection = ds.getConnection(); + } catch (SQLException e) { + System.out.printf("[buyGoods] ERROR: { state => %s, cause => %s, message => %s }\n", + e.getSQLState(), e.getCause(), e.getMessage()); + e.printStackTrace(); + return effectPlayers; + } + + try { + connection.setAutoCommit(false); + + PreparedStatement playerQuery = connection.prepareStatement("SELECT * FROM player WHERE id=? OR id=? FOR UPDATE"); + playerQuery.setString(1, sellId); + playerQuery.setString(2, buyId); + playerQuery.execute(); + + PlayerBean sellPlayer = null; + PlayerBean buyPlayer = null; + + ResultSet playerQueryResultSet = playerQuery.getResultSet(); + while (playerQueryResultSet.next()) { + PlayerBean player = new PlayerBean( + playerQueryResultSet.getString("id"), + playerQueryResultSet.getInt("coins"), + playerQueryResultSet.getInt("goods") + ); + + System.out.println("\n[buyGoods]:\n 'check goods and coins enough'"); + System.out.println(player); + + if (sellId.equals(player.getId())) { + sellPlayer = player; + } else { + buyPlayer = player; + } + } + + if (sellPlayer == null || buyPlayer == null) { + throw new SQLException("player not exist."); + } + + if (sellPlayer.getGoods().compareTo(amount) < 0) { + throw new SQLException(String.format("sell player %s goods not enough.", sellId)); + } + + if (buyPlayer.getCoins().compareTo(price) < 0) { + throw new SQLException(String.format("buy player %s coins not enough.", buyId)); + } + + PreparedStatement transfer = connection.prepareStatement("UPDATE player set goods = goods + ?, coins = coins + ? WHERE id=?"); + transfer.setInt(1, -amount); + transfer.setInt(2, price); + transfer.setString(3, sellId); + transfer.execute(); + effectPlayers += transfer.getUpdateCount(); + + transfer.setInt(1, amount); + transfer.setInt(2, -price); + transfer.setString(3, buyId); + transfer.execute(); + effectPlayers += transfer.getUpdateCount(); + + connection.commit(); + + System.out.println("\n[buyGoods]:\n 'trade success'"); + } catch (SQLException e) { + System.out.printf("[buyGoods] ERROR: { state => %s, cause => %s, message => %s }\n", + e.getSQLState(), e.getCause(), e.getMessage()); + + try { + System.out.println("[buyGoods] Rollback"); + + connection.rollback(); + } catch (SQLException ex) { + // do nothing + } + } finally { + try { + connection.close(); + } catch (SQLException e) { + // do nothing + } + } + + return effectPlayers; + } + + /** + * Get the player info by id. + * + * @param id Player id. + * @return The player of this id. + */ + public PlayerBean getPlayer(String id) { + PlayerBean player = null; + + try (Connection connection = ds.getConnection()) { + PreparedStatement preparedStatement = connection.prepareStatement("SELECT * FROM player WHERE id = ?"); + preparedStatement.setString(1, id); + preparedStatement.execute(); + + ResultSet res = preparedStatement.executeQuery(); + if(!res.next()) { + System.out.printf("No players in the table with id %s", id); + } else { + player = new PlayerBean(res.getString("id"), res.getInt("coins"), res.getInt("goods")); + } + } catch (SQLException e) { + System.out.printf("PlayerDAO.getPlayer ERROR: { state => %s, cause => %s, message => %s }\n", + e.getSQLState(), e.getCause(), e.getMessage()); + } + + return player; + } + + /** + * Insert randomized account data (id, coins, goods) using the JDBC fast path for + * bulk inserts. The fastest way to get data into TiDB is using the + * TiDB Lightning(https://docs.pingcap.com/tidb/stable/tidb-lightning-overview). + * However, if you must bulk insert from the application using INSERT SQL, the best + * option is the method shown here. It will require the following: + * + * Add `rewriteBatchedStatements=true` to your JDBC connection settings. + * Setting rewriteBatchedStatements to true now causes CallableStatements + * with batched arguments to be re-written in the form "CALL (...); CALL (...); ..." + * to send the batch in as few client/server round trips as possible. + * https://dev.mysql.com/doc/relnotes/connector-j/5.1/en/news-5-1-3.html + * + * You can see the `rewriteBatchedStatements` param effect logic at + * implement function: `com.mysql.cj.jdbc.StatementImpl.executeBatchUsingMultiQueries` + * + * @param total Add players amount. + * @param batchSize Bulk insert size for per batch. + * + * @return The number of new accounts inserted. + */ + public int bulkInsertRandomPlayers(Integer total, Integer batchSize) { + int totalNewPlayers = 0; + + try (Connection connection = ds.getConnection()) { + // We're managing the commit lifecycle ourselves, so we can + // control the size of our batch inserts. + connection.setAutoCommit(false); + + // In this example we are adding 500 rows to the database, + // but it could be any number. What's important is that + // the batch size is 128. + try (PreparedStatement pstmt = connection.prepareStatement("INSERT INTO player (id, coins, goods) VALUES (?, ?, ?)")) { + for (int i=0; i<=(total/batchSize);i++) { + for (int j=0; j %s row(s) updated in this batch\n", count.length); + } + connection.commit(); + } catch (SQLException e) { + System.out.printf("PlayerDAO.bulkInsertRandomPlayers ERROR: { state => %s, cause => %s, message => %s }\n", + e.getSQLState(), e.getCause(), e.getMessage()); + } + } catch (SQLException e) { + System.out.printf("PlayerDAO.bulkInsertRandomPlayers ERROR: { state => %s, cause => %s, message => %s }\n", + e.getSQLState(), e.getCause(), e.getMessage()); + } + return totalNewPlayers; + } + + + /** + * Print a subset of players from the data store by limit. + * + * @param limit Print max size. + */ + public void printPlayers(Integer limit) { + try (Connection connection = ds.getConnection()) { + PreparedStatement preparedStatement = connection.prepareStatement("SELECT * FROM player LIMIT ?"); + preparedStatement.setInt(1, limit); + preparedStatement.execute(); + + ResultSet res = preparedStatement.executeQuery(); + while (!res.next()) { + PlayerBean player = new PlayerBean(res.getString("id"), + res.getInt("coins"), res.getInt("goods")); + System.out.println("\n[printPlayers]:\n" + player); + } + } catch (SQLException e) { + System.out.printf("PlayerDAO.printPlayers ERROR: { state => %s, cause => %s, message => %s }\n", + e.getSQLState(), e.getCause(), e.getMessage()); + } + } + + + /** + * Count players from the data store. + * + * @return All players count + */ + public int countPlayers() { + int count = 0; + + try (Connection connection = ds.getConnection()) { + PreparedStatement preparedStatement = connection.prepareStatement("SELECT count(*) FROM player"); + preparedStatement.execute(); + + ResultSet res = preparedStatement.executeQuery(); + if(res.next()) { + count = res.getInt(1); + } + } catch (SQLException e) { + System.out.printf("PlayerDAO.countPlayers ERROR: { state => %s, cause => %s, message => %s }\n", + e.getSQLState(), e.getCause(), e.getMessage()); + } + + return count; + } + } + + public static void main(String[] args) { + // 1. Configure the example database connection. + + // 1.1 Create a mysql data source instance. + MysqlDataSource mysqlDataSource = new MysqlDataSource(); + + // 1.2 Set server name, port, database name, username and password. + mysqlDataSource.setServerName("localhost"); + mysqlDataSource.setPortNumber(4000); + mysqlDataSource.setDatabaseName("test"); + mysqlDataSource.setUser("root"); + mysqlDataSource.setPassword(""); + + // Or you can use jdbc string instead. + // mysqlDataSource.setURL("jdbc:mysql://{host}:{port}/test?user={user}&password={password}"); + + // 2. And then, create DAO to manager your data. + PlayerDAO dao = new PlayerDAO(mysqlDataSource); + + // 3. Run some simple example. + + // Create a player, has a coin and a goods. + dao.createPlayers(Collections.singletonList(new PlayerBean("test", 1, 1))); + + // Get a player. + PlayerBean testPlayer = dao.getPlayer("test"); + System.out.printf("PlayerDAO.getPlayer:\n => id: %s\n => coins: %s\n => goods: %s\n", + testPlayer.getId(), testPlayer.getCoins(), testPlayer.getGoods()); + + // Create players with bulk inserts, insert 1919 players totally, and per batch for 114 players. + int addedCount = dao.bulkInsertRandomPlayers(1919, 114); + System.out.printf("PlayerDAO.bulkInsertRandomPlayers:\n => %d total inserted players\n", addedCount); + + // Count players amount. + int count = dao.countPlayers(); + System.out.printf("PlayerDAO.countPlayers:\n => %d total players\n", count); + + // Print 3 players. + dao.printPlayers(3); + + // 4. Getting further. + + // Player 1: id is "1", has only 100 coins. + // Player 2: id is "2", has 114514 coins, and 20 goods. + PlayerBean player1 = new PlayerBean("1", 100, 0); + PlayerBean player2 = new PlayerBean("2", 114514, 20); + + // Create two players "by hand", using the INSERT statement on the backend. + addedCount = dao.createPlayers(Arrays.asList(player1, player2)); + System.out.printf("PlayerDAO.createPlayers:\n => %d total inserted players\n", addedCount); + + // Player 1 wants to buy 10 goods from player 2. + // It will cost 500 coins, but player 1 can't afford it. + System.out.println("\nPlayerDAO.buyGoods:\n => this trade will fail"); + int updatedCount = dao.buyGoods(player2.getId(), player1.getId(), 10, 500); + System.out.printf("PlayerDAO.buyGoods:\n => %d total update players\n", updatedCount); + + // So player 1 have to reduce his incoming quantity to two. + System.out.println("\nPlayerDAO.buyGoods:\n => this trade will success"); + updatedCount = dao.buyGoods(player2.getId(), player1.getId(), 2, 100); + System.out.printf("PlayerDAO.buyGoods:\n => %d total update players\n", updatedCount); + } +} +``` + +## Step 3. Run the code + +The following content introduces how to run the code step by step. + +### Step 3.1 Table initialization + + + +When using JDBC, you need to initialize the database tables manually. If you are using a local cluster, and MySQL client has been installed locally, you can run it directly in the `plain-java-jdbc` directory: + +```shell +make mysql +``` + +Or you can execute the following command: + +```shell +mysql --host 127.0.0.1 --port 4000 -u root + + + +When using JDBC, you need to connect to your cluster and run the statement in the `src/main/resources/dbinit.sql` file to initialize the database tables manually. + + + +### Step 3.2 Modify parameters for TiDB Cloud + +If you are using a TiDB Cloud Serverless Tier cluster, modify the parameters of the host, port, user, and password in `JDBCExample.java`: + +```java +mysqlDataSource.setServerName("localhost"); +mysqlDataSource.setPortNumber(4000); +mysqlDataSource.setDatabaseName("test"); +mysqlDataSource.setUser("root"); +mysqlDataSource.setPassword(""); +``` + +Suppose that the password you set is `123456`, and the connection parameters you get from the cluster details page are the following: + +- Endpoint: `xxx.tidbcloud.com` +- Port: `4000` +- User: `2aEp24QWEDLqRFs.root` + +In this case, you can modify the parameters as follows: + +```java +mysqlDataSource.setServerName("xxx.tidbcloud.com"); +mysqlDataSource.setPortNumber(4000); +mysqlDataSource.setDatabaseName("test"); +mysqlDataSource.setUser("2aEp24QWEDLqRFs.root"); +mysqlDataSource.setPassword("123456"); +mysqlDataSource.setSslMode(PropertyDefinitions.SslMode.VERIFY_IDENTITY.name()); +mysqlDataSource.setEnabledTLSProtocols("TLSv1.2,TLSv1.3"); +``` + +### Step 3.3 Run + +To run the code, you can run `make build` and `make run` respectively: + +```shell +make build # this command executes `mvn clean package` +make run # this command executes `java -jar target/plain-java-jdbc-0.0.1-jar-with-dependencies.jar` +``` + +Or you can use the native commands: + +```shell +mvn clean package +java -jar target/plain-java-jdbc-0.0.1-jar-with-dependencies.jar +``` + +Or run the `make` command directly, which is a combination of `make build` and `make run`. + +## Step 4. Expected output + +[JDBC Expected Output](https://github.com/pingcap-inc/tidb-example-java/blob/main/Expected-Output.md#plain-java-jdbc) \ No newline at end of file diff --git a/test/sync_pr_docs/data/markdown-pages/en/tidb/release-6.5/develop/dev-guide-sample-application-java-mybatis.md b/test/sync_pr_docs/data/markdown-pages/en/tidb/release-6.5/develop/dev-guide-sample-application-java-mybatis.md new file mode 100644 index 00000000..ac746e44 --- /dev/null +++ b/test/sync_pr_docs/data/markdown-pages/en/tidb/release-6.5/develop/dev-guide-sample-application-java-mybatis.md @@ -0,0 +1,741 @@ +--- +title: Build a Simple CRUD App with TiDB and Mybatis +summary: Learn how to build a simple CRUD application with TiDB and Mybatis. +--- + + + + +# Build a Simple CRUD App with TiDB and Mybatis + +This document describes how to use TiDB and Mybatis to build a simple CRUD application. + +> **Note:** +> +> It is recommended to use Java 8 or a later Java version. + +## Step 1. Launch your TiDB cluster + + + +The following introduces how to start a TiDB cluster. + +**Use a TiDB Cloud Serverless Tier cluster** + +For detailed steps, see [Create a Serverless Tier cluster](/develop/dev-guide-build-cluster-in-cloud.md#step-1-create-a-serverless-tier-cluster). + +**Use a local cluster** + +For detailed steps, see [Deploy a local test cluster](/quick-start-with-tidb.md#deploy-a-local-test-cluster) or [Deploy a TiDB Cluster Using TiUP](/production-deployment-using-tiup.md). + + + + + +See [Create a Serverless Tier cluster](/develop/dev-guide-build-cluster-in-cloud.md#step-1-create-a-serverless-tier-cluster). + + + +## Step 2. Get the code + +```shell +git clone https://github.com/pingcap-inc/tidb-example-java.git +``` + +Compared with [Mybatis](https://mybatis.org/mybatis-3/index.html), the JDBC implementation might be not a best practice, because you need to write error handling logic manually and cannot reuse code easily, which makes your code slightly redundant. + +Mybatis is a popular open-source Java class persistence framework. The following uses [MyBatis Generator](https://mybatis.org/generator/quickstart.html) as a Maven plugin to generate the persistence layer code. + +Change to the `plain-java-mybatis` directory: + +```shell +cd plain-java-mybatis +``` + +The structure of this directory is as follows: + +``` +. +├── Makefile +├── pom.xml +└── src + └── main + ├── java + │   └── com + │   └── pingcap + │   ├── MybatisExample.java + │   ├── dao + │   │   └── PlayerDAO.java + │   └── model + │   ├── Player.java + │   ├── PlayerMapper.java + │   └── PlayerMapperEx.java + └── resources + ├── dbinit.sql + ├── log4j.properties + ├── mapper + │   ├── PlayerMapper.xml + │   └── PlayerMapperEx.xml + ├── mybatis-config.xml + └── mybatis-generator.xml +``` + +The automatically generated files are: + +- `src/main/java/com/pingcap/model/Player.java`: The `Player` entity class. +- `src/main/java/com/pingcap/model/PlayerMapper.java`: The interface of `PlayerMapper`. +- `src/main/resources/mapper/PlayerMapper.xml`: The XML mapping of `Player`. Mybatis uses this configuration to automatically generate the implementation class of the `PlayerMapper` interface. + +The strategy for generating these files is written in `mybatis-generator.xml`, which is the configuration file for [Mybatis Generator](https://mybatis.org/generator/quickstart.html). There are comments in the following configuration file to describe how to use it. + +```xml + + + + + + + + + + + + + + + + + + + + + + + + + + + + + +``` + +`mybatis-generator.xml` is included in `pom.xml` as the configuration of `mybatis-generator-maven-plugin`. + +```xml + + org.mybatis.generator + mybatis-generator-maven-plugin + 1.4.1 + + src/main/resources/mybatis-generator.xml + true + true + + + + + + mysql + mysql-connector-java + 5.1.49 + + + +``` + +Once included in the Maven plugin, you can delete the old generated files and make new ones using `mvn mybatis-generate`. Or you can use `make gen` to delete the old file and generate a new one at the same time. + +> **Note:** +> +> The property `configuration.overwrite` in `mybatis-generator.xml` only ensures that the generated Java code files are overwritten. But the XML mapping files are still written as appended. Therefore, it is recommended to delete the old file before Mybaits Generator generating a new one. + +`Player.java` is a data entity class file generated using Mybatis Generator, which is a mapping of database tables in the application. Each property of the `Player` class corresponds to a field in the `player` table. + +```java +package com.pingcap.model; + +public class Player { + private String id; + + private Integer coins; + + private Integer goods; + + public Player(String id, Integer coins, Integer goods) { + this.id = id; + this.coins = coins; + this.goods = goods; + } + + public Player() { + super(); + } + + public String getId() { + return id; + } + + public void setId(String id) { + this.id = id; + } + + public Integer getCoins() { + return coins; + } + + public void setCoins(Integer coins) { + this.coins = coins; + } + + public Integer getGoods() { + return goods; + } + + public void setGoods(Integer goods) { + this.goods = goods; + } +} +``` + +`PlayerMapper.java` is a mapping interface file generated using Mybatis Generator. This file only defines the interface, and the implementation classes of interface are automatically generated using XML or annotations. + +```java +package com.pingcap.model; + +import com.pingcap.model.Player; + +public interface PlayerMapper { + int deleteByPrimaryKey(String id); + + int insert(Player row); + + int insertSelective(Player row); + + Player selectByPrimaryKey(String id); + + int updateByPrimaryKeySelective(Player row); + + int updateByPrimaryKey(Player row); +} +``` + +`PlayerMapper.xml` is a mapping XML file generated using Mybatis Generator. Mybatis uses this to automatically generate the implementation class of the `PlayerMapper` interface. + +```xml + + + + + + + + + + + + id, coins, goods + + + + delete from player + where id = #{id,jdbcType=VARCHAR} + + + insert into player (id, coins, goods + ) + values (#{id,jdbcType=VARCHAR}, #{coins,jdbcType=INTEGER}, #{goods,jdbcType=INTEGER} + ) + + + insert into player + + + id, + + + coins, + + + goods, + + + + + #{id,jdbcType=VARCHAR}, + + + #{coins,jdbcType=INTEGER}, + + + #{goods,jdbcType=INTEGER}, + + + + + update player + + + coins = #{coins,jdbcType=INTEGER}, + + + goods = #{goods,jdbcType=INTEGER}, + + + where id = #{id,jdbcType=VARCHAR} + + + update player + set coins = #{coins,jdbcType=INTEGER}, + goods = #{goods,jdbcType=INTEGER} + where id = #{id,jdbcType=VARCHAR} + + +``` + +Since Mybatis Generator needs to generate the source code from the table definition, the table needs to be created first. To create the table, you can use `dbinit.sql`. + +```sql +USE test; +DROP TABLE IF EXISTS player; + +CREATE TABLE player ( + `id` VARCHAR(36), + `coins` INTEGER, + `goods` INTEGER, + PRIMARY KEY (`id`) +); +``` + +Split the interface `PlayerMapperEx` additionally to extend from `PlayerMapper` and write a matching `PlayerMapperEx.xml` file. Avoid changing `PlayerMapper.java` and `PlayerMapper.xml` directly. This is to avoid overwrite by Mybatis Generator. + +Define the added interface in `PlayerMapperEx.java`: + +```java +package com.pingcap.model; + +import java.util.List; + +public interface PlayerMapperEx extends PlayerMapper { + Player selectByPrimaryKeyWithLock(String id); + + List selectByLimit(Integer limit); + + Integer count(); +} +``` + +Define the mapping rules in `PlayerMapperEx.xml`: + +```xml + + + + + + + + + + + + id, coins, goods + + + + + + + + + +``` + +`PlayerDAO.java` is a class used to manage data, in which `DAO` means [Data Access Object](https://en.wikipedia.org/wiki/Data_access_object). The class defines a set of data manipulation methods for writing data. In it, Mybatis encapsulates a large number of operations such as object mapping and CRUD of basic objects, which greatly simplifies the code. + +```java +package com.pingcap.dao; + +import com.pingcap.model.Player; +import com.pingcap.model.PlayerMapperEx; +import org.apache.ibatis.session.SqlSession; +import org.apache.ibatis.session.SqlSessionFactory; + +import java.util.List; +import java.util.function.Function; + +public class PlayerDAO { + public static class NotEnoughException extends RuntimeException { + public NotEnoughException(String message) { + super(message); + } + } + + // Run SQL code in a way that automatically handles the + // transaction retry logic, so we don't have to duplicate it in + // various places. + public Object runTransaction(SqlSessionFactory sessionFactory, Function fn) { + Object resultObject = null; + SqlSession session = null; + + try { + // open a session with autoCommit is false + session = sessionFactory.openSession(false); + + // get player mapper + PlayerMapperEx playerMapperEx = session.getMapper(PlayerMapperEx.class); + + resultObject = fn.apply(playerMapperEx); + session.commit(); + System.out.println("APP: COMMIT;"); + } catch (Exception e) { + if (e instanceof NotEnoughException) { + System.out.printf("APP: ROLLBACK BY LOGIC; \n%s\n", e.getMessage()); + } else { + System.out.printf("APP: ROLLBACK BY ERROR; \n%s\n", e.getMessage()); + } + + if (session != null) { + session.rollback(); + } + } finally { + if (session != null) { + session.close(); + } + } + + return resultObject; + } + + public Function createPlayers(List players) { + return playerMapperEx -> { + Integer addedPlayerAmount = 0; + for (Player player: players) { + playerMapperEx.insert(player); + addedPlayerAmount ++; + } + System.out.printf("APP: createPlayers() --> %d\n", addedPlayerAmount); + return addedPlayerAmount; + }; + } + + public Function buyGoods(String sellId, String buyId, Integer amount, Integer price) { + return playerMapperEx -> { + Player sellPlayer = playerMapperEx.selectByPrimaryKeyWithLock(sellId); + Player buyPlayer = playerMapperEx.selectByPrimaryKeyWithLock(buyId); + + if (buyPlayer == null || sellPlayer == null) { + throw new NotEnoughException("sell or buy player not exist"); + } + + if (buyPlayer.getCoins() < price || sellPlayer.getGoods() < amount) { + throw new NotEnoughException("coins or goods not enough, rollback"); + } + + int affectRows = 0; + buyPlayer.setGoods(buyPlayer.getGoods() + amount); + buyPlayer.setCoins(buyPlayer.getCoins() - price); + affectRows += playerMapperEx.updateByPrimaryKey(buyPlayer); + + sellPlayer.setGoods(sellPlayer.getGoods() - amount); + sellPlayer.setCoins(sellPlayer.getCoins() + price); + affectRows += playerMapperEx.updateByPrimaryKey(sellPlayer); + + System.out.printf("APP: buyGoods --> sell: %s, buy: %s, amount: %d, price: %d\n", sellId, buyId, amount, price); + return affectRows; + }; + } + + public Function getPlayerByID(String id) { + return playerMapperEx -> playerMapperEx.selectByPrimaryKey(id); + } + + public Function printPlayers(Integer limit) { + return playerMapperEx -> { + List players = playerMapperEx.selectByLimit(limit); + + for (Player player: players) { + System.out.println("\n[printPlayers]:\n" + player); + } + return 0; + }; + } + + public Function countPlayers() { + return PlayerMapperEx::count; + } +} +``` + +`MybatisExample` is the main class of the `plain-java-mybatis` sample application. It defines the entry functions: + +```java +package com.pingcap; + +import com.pingcap.dao.PlayerDAO; +import com.pingcap.model.Player; +import org.apache.ibatis.io.Resources; +import org.apache.ibatis.session.SqlSessionFactory; +import org.apache.ibatis.session.SqlSessionFactoryBuilder; + +import java.io.IOException; +import java.io.InputStream; +import java.util.Arrays; +import java.util.Collections; + +public class MybatisExample { + public static void main( String[] args ) throws IOException { + // 1. Create a SqlSessionFactory based on our mybatis-config.xml configuration + // file, which defines how to connect to the database. + InputStream inputStream = Resources.getResourceAsStream("mybatis-config.xml"); + SqlSessionFactory sessionFactory = new SqlSessionFactoryBuilder().build(inputStream); + + // 2. And then, create DAO to manager your data + PlayerDAO playerDAO = new PlayerDAO(); + + // 3. Run some simple examples. + + // Create a player who has 1 coin and 1 goods. + playerDAO.runTransaction(sessionFactory, playerDAO.createPlayers( + Collections.singletonList(new Player("test", 1, 1)))); + + // Get a player. + Player testPlayer = (Player)playerDAO.runTransaction(sessionFactory, playerDAO.getPlayerByID("test")); + System.out.printf("PlayerDAO.getPlayer:\n => id: %s\n => coins: %s\n => goods: %s\n", + testPlayer.getId(), testPlayer.getCoins(), testPlayer.getGoods()); + + // Count players amount. + Integer count = (Integer)playerDAO.runTransaction(sessionFactory, playerDAO.countPlayers()); + System.out.printf("PlayerDAO.countPlayers:\n => %d total players\n", count); + + // Print 3 players. + playerDAO.runTransaction(sessionFactory, playerDAO.printPlayers(3)); + + // 4. Getting further. + + // Player 1: id is "1", has only 100 coins. + // Player 2: id is "2", has 114514 coins, and 20 goods. + Player player1 = new Player("1", 100, 0); + Player player2 = new Player("2", 114514, 20); + + // Create two players "by hand", using the INSERT statement on the backend. + int addedCount = (Integer)playerDAO.runTransaction(sessionFactory, + playerDAO.createPlayers(Arrays.asList(player1, player2))); + System.out.printf("PlayerDAO.createPlayers:\n => %d total inserted players\n", addedCount); + + // Player 1 wants to buy 10 goods from player 2. + // It will cost 500 coins, but player 1 cannot afford it. + System.out.println("\nPlayerDAO.buyGoods:\n => this trade will fail"); + Integer updatedCount = (Integer)playerDAO.runTransaction(sessionFactory, + playerDAO.buyGoods(player2.getId(), player1.getId(), 10, 500)); + System.out.printf("PlayerDAO.buyGoods:\n => %d total update players\n", updatedCount); + + // So player 1 has to reduce the incoming quantity to two. + System.out.println("\nPlayerDAO.buyGoods:\n => this trade will success"); + updatedCount = (Integer)playerDAO.runTransaction(sessionFactory, + playerDAO.buyGoods(player2.getId(), player1.getId(), 2, 100)); + System.out.printf("PlayerDAO.buyGoods:\n => %d total update players\n", updatedCount); + } +} +``` + +## Step 3. Run the code + +The following content introduces how to run the code step by step. + +### Step 3.1 Table initialization + +When using Mybatis, you need to initialize the database tables manually. If you are using a local cluster, and MySQL client has been installed locally, you can run it directly in the `plain-java-mybatis` directory: + +```shell +make prepare +``` + +Or you can execute the following command: + +```shell +mysql --host 127.0.0.1 --port 4000 -u root < src/main/resources/dbinit.sql +``` + +If you are using a non-local cluster or MySQL client has not been installed, connect to your cluster and run the statement in the `src/main/resources/dbinit.sql` file. + +### Step 3.2 Modify parameters for TiDB Cloud + +If you are using a TiDB Cloud Serverless Tier cluster, modify the `dataSource.url`, `dataSource.username`, `dataSource.password` in `mybatis-config.xml`. + +```xml + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + +``` + +Suppose that the password you set is `123456`, and the connection parameters you get from the cluster details page are the following: + +- Endpoint: `xxx.tidbcloud.com` +- Port: `4000` +- User: `2aEp24QWEDLqRFs.root` + +In this case, you can modify the parameters in `dataSource` node as follows: + +```xml + + + + + ... + + + + + + + + ... + + +``` + +### Step 3.3 Run + +To run the code, you can run `make prepare`, `make gen`, `make build` and `make run` respectively: + +```shell +make prepare +# this command executes : +# - `mysql --host 127.0.0.1 --port 4000 -u root < src/main/resources/dbinit.sql` +# - `mysql --host 127.0.0.1 --port 4000 -u root -e "TRUNCATE test.player"` + +make gen +# this command executes : +# - `rm -f src/main/java/com/pingcap/model/Player.java` +# - `rm -f src/main/java/com/pingcap/model/PlayerMapper.java` +# - `rm -f src/main/resources/mapper/PlayerMapper.xml` +# - `mvn mybatis-generator:generate` + +make build # this command executes `mvn clean package` +make run # this command executes `java -jar target/plain-java-mybatis-0.0.1-jar-with-dependencies.jar` +``` + +Or you can use the native commands: + +```shell +mysql --host 127.0.0.1 --port 4000 -u root < src/main/resources/dbinit.sql +mysql --host 127.0.0.1 --port 4000 -u root -e "TRUNCATE test.player" +rm -f src/main/java/com/pingcap/model/Player.java +rm -f src/main/java/com/pingcap/model/PlayerMapper.java +rm -f src/main/resources/mapper/PlayerMapper.xml +mvn mybatis-generator:generate +mvn clean package +java -jar target/plain-java-mybatis-0.0.1-jar-with-dependencies.jar +``` + +Or run the `make` command directly, which is a combination of `make prepare`, `make gen`, `make build` and `make run`. + +## Step 4. Expected output + +[Mybatis Expected Output](https://github.com/pingcap-inc/tidb-example-java/blob/main/Expected-Output.md#plain-java-mybatis) \ No newline at end of file diff --git a/test/sync_pr_docs/data/markdown-pages/en/tidb/release-6.5/develop/dev-guide-sample-application-java-spring-boot.md b/test/sync_pr_docs/data/markdown-pages/en/tidb/release-6.5/develop/dev-guide-sample-application-java-spring-boot.md new file mode 100644 index 00000000..df23c8d2 --- /dev/null +++ b/test/sync_pr_docs/data/markdown-pages/en/tidb/release-6.5/develop/dev-guide-sample-application-java-spring-boot.md @@ -0,0 +1,1024 @@ +--- +title: Build a TiDB App Using Spring Boot +summary: Learn an example of how to build a TiDB application using Spring Boot. +aliases: ['/tidbcloud/dev-guide-sample-application-spring-boot','/tidb/stable/dev-guide-sample-application-spring-boot'] +--- + + + +# Build a TiDB App Using Spring Boot + +This tutorial shows you how to build a [Spring Boot](https://spring.io/projects/spring-boot) web application using TiDB. The [Spring Data JPA](https://spring.io/projects/spring-data-jpa) module is used as the framework for data access capabilities. You can download the code for this sample application from [GitHub](https://github.com/pingcap-inc/tidb-example-java). + +This is a sample application for building a RESTful API, which shows a generic **Spring Boot** backend service using **TiDB** as the database. The following process was designed to recreate a real-world scenario: + +This is an example of a game where each player has two attributes: `coins` and `goods`. Each player is uniquely identified by an `id` field. Players can trade freely if they have sufficient coins and goods. + +You can build your own application based on this example. + +## Step 1: Launch your TiDB cluster + + + +The following introduces how to start a TiDB cluster. + +**Use a TiDB Cloud Serverless Tier cluster** + +For detailed steps, see [Create a Serverless Tier cluster](/develop/dev-guide-build-cluster-in-cloud.md#step-1-create-a-serverless-tier-cluster). + +**Use a local cluster** + +For detailed steps, see [Deploy a local test cluster](/quick-start-with-tidb.md#deploy-a-local-test-cluster) or [Deploy a TiDB Cluster Using TiUP](/production-deployment-using-tiup.md). + + + + + +See [Create a Serverless Tier cluster](/develop/dev-guide-build-cluster-in-cloud.md#step-1-create-a-serverless-tier-cluster). + + + +## Step 2: Install JDK + +Download and install the **Java Development Kit** (JDK) on your computer. It is a necessary tool for Java development. **Spring Boot** supports JDK for Java 8 and later versions. However, due to the **Hibernate** version, it is recommended that you use JDK for Java 11 and later versions. + +Both **Oracle JDK** and **OpenJDK** are supported. You can choose at your own discretion. This tutorial uses JDK 17 from **OpenJDK**. + +## Step 3: Install Maven + +This sample application uses **Apache Maven** to manage the application's dependencies. Spring supports Maven 3.3 and later versions. As dependency management software, the latest stable version of **Maven** is recommended. + +To install **Maven** from the command line. + +- macOS: + + {{< copyable "shell-regular" >}} + + ```shell + brew install maven + ``` + +- Debian-based Linux distributions (such as Ubuntu): + + {{< copyable "shell-regular" >}} + + ```shell + apt-get install maven + ``` + +- Red Hat-based Linux distributions (such as Fedora, CentOS): + + - dnf: + + {{< copyable "shell-regular" >}} + + ```shell + dnf install maven + ``` + + - yum: + + {{< copyable "shell-regular" >}} + + ```shell + yum install maven + ``` + +For other installation methods, refer to the [Maven official documentation](https://maven.apache.org/install.html). + +## Step 4: Get the application code + +Download or clone the [sample code repository](https://github.com/pingcap-inc/tidb-example-java) and navigate to the `spring-jpa-hibernate` directory. + +## Step 5: Run the application + +In this step, the application code is compiled and run, which produces a web application. Hibernate creates a `player_jpa` table within the `test` database. If you make requests using the application's RESTful API, these requests run [database transactions](/develop/dev-guide-transaction-overview.md) on the TiDB cluster. + +If you want to learn more about the code of this application, refer to [Implementation details](#implementation-details). + +### Step 5.1 Change parameters + +If you are using a TiDB Cloud Serverless Tier cluster, change the `spring.datasource.url`, `spring.datasource.username`, `spring.datasource.password` parameters in the `application.yml` (located in `src/main/resources`). + +```yaml +spring: + datasource: + url: jdbc:mysql://localhost:4000/test + username: root + # password: xxx + driver-class-name: com.mysql.cj.jdbc.Driver + jpa: + show-sql: true + database-platform: org.hibernate.dialect.TiDBDialect + hibernate: + ddl-auto: create-drop +``` + +Suppose that the password you set is `123456`, and the connection parameters you get from the cluster details page are the following: + +- Endpoint: `xxx.tidbcloud.com` +- Port: `4000` +- User: `2aEp24QWEDLqRFs.root` + +Accordingly, the parameters must be set as folows: + +```yaml +spring: + datasource: + url: jdbc:mysql://xxx.tidbcloud.com:4000/test?sslMode=VERIFY_IDENTITY&enabledTLSProtocols=TLSv1.2,TLSv1.3 + username: 2aEp24QWEDLqRFs.root + password: 123456 + driver-class-name: com.mysql.cj.jdbc.Driver + jpa: + show-sql: true + database-platform: org.hibernate.dialect.TiDBDialect + hibernate: + ddl-auto: create-drop +``` + +### Step 5.2 Run + +Open a terminal session and make sure you are in the `spring-jpa-hibernate` directory. If you are not already in this directory, navigate to the directory with the following command: + +```shell +cd /tidb-example-java/spring-jpa-hibernate +``` + +#### Build and run with Make (recommended) + +```shell +make +``` + +#### Build and run manually + +If you prefer to build manually, follow these steps: + +1. Clear cache and package: + + {{< copyable "shell-regular" >}} + + ```shell + mvn clean package + ``` + +2. Run applications with JAR files: + + {{< copyable "shell-regular" >}} + + ```shell + java -jar target/spring-jpa-hibernate-0.0.1.jar + ``` + +### Step 5.3 Output + +The final part of the output should look like the following: + +``` + . ____ _ __ _ _ + /\\ / ___'_ __ _ _(_)_ __ __ _ \ \ \ \ +( ( )\___ | '_ | '_| | '_ \/ _` | \ \ \ \ + \\/ ___)| |_)| | | | | || (_| | ) ) ) ) + ' |____| .__|_| |_|_| |_\__, | / / / / + =========|_|==============|___/=/_/_/_/ + :: Spring Boot :: (v3.0.1) + +2023-01-05T14:06:54.427+08:00 INFO 22005 --- [ main] com.pingcap.App : Starting App using Java 17.0.2 with PID 22005 (/Users/cheese/IdeaProjects/tidb-example-java/spring-jpa-hibernate/target/classes started by cheese in /Users/cheese/IdeaProjects/tidb-example-java) +2023-01-05T14:06:54.428+08:00 INFO 22005 --- [ main] com.pingcap.App : No active profile set, falling back to 1 default profile: "default" +2023-01-05T14:06:54.642+08:00 INFO 22005 --- [ main] .s.d.r.c.RepositoryConfigurationDelegate : Bootstrapping Spring Data JPA repositories in DEFAULT mode. +2023-01-05T14:06:54.662+08:00 INFO 22005 --- [ main] .s.d.r.c.RepositoryConfigurationDelegate : Finished Spring Data repository scanning in 17 ms. Found 1 JPA repository interfaces. +2023-01-05T14:06:54.830+08:00 INFO 22005 --- [ main] o.s.b.w.embedded.tomcat.TomcatWebServer : Tomcat initialized with port(s): 8080 (http) +2023-01-05T14:06:54.833+08:00 INFO 22005 --- [ main] o.apache.catalina.core.StandardService : Starting service [Tomcat] +2023-01-05T14:06:54.833+08:00 INFO 22005 --- [ main] o.apache.catalina.core.StandardEngine : Starting Servlet engine: [Apache Tomcat/10.1.4] +2023-01-05T14:06:54.865+08:00 INFO 22005 --- [ main] o.a.c.c.C.[Tomcat].[localhost].[/] : Initializing Spring embedded WebApplicationContext +2023-01-05T14:06:54.865+08:00 INFO 22005 --- [ main] w.s.c.ServletWebServerApplicationContext : Root WebApplicationContext: initialization completed in 421 ms +2023-01-05T14:06:54.916+08:00 INFO 22005 --- [ main] o.hibernate.jpa.internal.util.LogHelper : HHH000204: Processing PersistenceUnitInfo [name: default] +2023-01-05T14:06:54.929+08:00 INFO 22005 --- [ main] org.hibernate.Version : HHH000412: Hibernate ORM core version 6.1.6.Final +2023-01-05T14:06:54.969+08:00 WARN 22005 --- [ main] org.hibernate.orm.deprecation : HHH90000021: Encountered deprecated setting [javax.persistence.sharedCache.mode], use [jakarta.persistence.sharedCache.mode] instead +2023-01-05T14:06:55.005+08:00 INFO 22005 --- [ main] com.zaxxer.hikari.HikariDataSource : HikariPool-1 - Starting... +2023-01-05T14:06:55.074+08:00 INFO 22005 --- [ main] com.zaxxer.hikari.pool.HikariPool : HikariPool-1 - Added connection com.mysql.cj.jdbc.ConnectionImpl@5e905f2c +2023-01-05T14:06:55.075+08:00 INFO 22005 --- [ main] com.zaxxer.hikari.HikariDataSource : HikariPool-1 - Start completed. +2023-01-05T14:06:55.089+08:00 INFO 22005 --- [ main] SQL dialect : HHH000400: Using dialect: org.hibernate.dialect.TiDBDialect +Hibernate: drop table if exists player_jpa +Hibernate: drop sequence player_jpa_id_seq +Hibernate: create sequence player_jpa_id_seq start with 1 increment by 1 +Hibernate: create table player_jpa (id bigint not null, coins integer, goods integer, primary key (id)) engine=InnoDB +2023-01-05T14:06:55.332+08:00 INFO 22005 --- [ main] o.h.e.t.j.p.i.JtaPlatformInitiator : HHH000490: Using JtaPlatform implementation: [org.hibernate.engine.transaction.jta.platform.internal.NoJtaPlatform] +2023-01-05T14:06:55.335+08:00 INFO 22005 --- [ main] j.LocalContainerEntityManagerFactoryBean : Initialized JPA EntityManagerFactory for persistence unit 'default' +2023-01-05T14:06:55.579+08:00 WARN 22005 --- [ main] JpaBaseConfiguration$JpaWebConfiguration : spring.jpa.open-in-view is enabled by default. Therefore, database queries may be performed during view rendering. Explicitly configure spring.jpa.open-in-view to disable this warning +2023-01-05T14:06:55.710+08:00 INFO 22005 --- [ main] o.s.b.w.embedded.tomcat.TomcatWebServer : Tomcat started on port(s): 8080 (http) with context path '' +2023-01-05T14:06:55.714+08:00 INFO 22005 --- [ main] com.pingcap.App : Started App in 1.432 seconds (process running for 1.654) +``` + +The output log indicates the application behavior during startup. In this example, the application starts a **Servlet** using [Tomcat](https://tomcat.apache.org/), uses Hibernate as the ORM, uses [HikariCP](https://github.com/brettwooldridge/HikariCP) as the database connection pool implementation, and uses `org.hibernate.dialect.TiDBDialect` as the database dialect. After startup, Hibernate deletes and re-creates the `player_jpa` table and the `player_jpa_id_seq` sequence. At the end of startup, the application listens on port `8080` to provide HTTP services to the outside. + +If you want to learn more about the code of this application, refer to [implementation details](#implementation-details). + +## Step 6: HTTP requests + +After the service is up and running, you can send the HTTP requests to the backend application. is the base URL that provides services. This tutorial uses a series of HTTP requests to show how to use the service. + +### Step 6.1 Use Postman requests (recommended) + +You can download this [configuration file](https://raw.githubusercontent.com/pingcap-inc/tidb-example-java/main/spring-jpa-hibernate/Player.postman_collection.json) locally and import it into [Postman](https://www.postman.com/) as shown here: + +![import the collection into Postman](/media/develop/IMG_20220402-003303222.png) + +#### Create players + +Click on the **Create** tab and the **Send** button to send a POST request to `http://localhost:8080/player/`. The return value is the number of players added, which is expected to be 1. + +![Postman-Create a player](/media/develop/IMG_20220402-003350731.png) + +#### Get player information by ID + +Click on the **GetByID** tab and the **Send** button to send a GET request to `http://localhost:8080/player/1`. The return value is the information of the player with ID `1`. + +![Postman-GetByID](/media/develop/IMG_20220402-003416079.png) + +#### Get player information in bulk by limit + +Click on the **GetByLimit** tab and the **Send** button to send a GET request to `http://localhost:8080/player/limit/3`. The return value is a list of information for up to 3 players. + +![Postman-GetByLimit](/media/develop/IMG_20220402-003505846.png) + +#### Get player information by page + +Click on the **GetByPage** tab and the **Send** button to send a GET request to `http://localhost:8080/player/page?index=0&size=2`. The return value is the page with index `0`, with `2` players per page. The return value also contains the paging information such as offset, totalPages, and sort. + +![Postman-GetByPage](/media/develop/IMG_20220402-003528474.png) + +#### Count players + +Click the **Count** tab and the **Send** button to send a GET request to `http://localhost:8080/player/count`. The return value is the number of players. + +![Postman-Count](/media/develop/IMG_20220402-003549966.png) + +#### Player trading + +Click on the **Trade** tab and the **Send** button to send a PUT request to `http://localhost:8080/player/trade`. The request parameters are the seller's ID `sellID`, the buyer's ID `buyID`, the number of goods purchased `amount`, the number of coins consumed for the purchase `price`. + +The return value is whether the transaction is successful or not. When there are insufficient goods for the seller, insufficient coins for the buyer, or a database error, the [database transaction](/develop/dev-guide-transaction-overview.md) guarantees that the trade is not successful and no player's coins or goods are lost. + +![Postman-Trade](/media/develop/IMG_20220402-003659102.png) + +### Step 6.2 Using curl requests + +You can also use curl to make requests directly. + +#### Create players + +To create players, you can send a **POST** request to the `/player` endpoint. For example: + +```shell +curl --location --request POST 'http://localhost:8080/player/' --header 'Content-Type: application/json' --data-raw '[{"coins":100,"goods":20}]' +``` + +The request uses JSON as the payload. The example above indicates creating a player with 100 `coins` and 20 `goods`. The return value is the number of players created. + +```json +1 +``` + +#### Get player information by ID + +To get the player information, you can send a **GET** request to the `/player` endpoint. You need to specify the `id` of the player in the path parameter as follows: `/player/{id}`. The following example shows how to get the information of a player with `id` 1: + +```shell +curl --location --request GET 'http://localhost:8080/player/1' +``` + +The return value is the player's information: + +```json +{ + "coins": 200, + "goods": 10, + "id": 1 +} +``` + +#### Get player information in bulk by limit + +To get the player information in bulk, you can send a **GET** request to the `/player/limit` endpoint. You need to specify the total number of players in the path parameter as follows: `/player/limit/{limit}`. The following example shows how to get the information of up to 3 players: + +```shell +curl --location --request GET 'http://localhost:8080/player/limit/3' +``` + +The return value is a list of player information: + +```json +[ + { + "coins": 200, + "goods": 10, + "id": 1 + }, + { + "coins": 0, + "goods": 30, + "id": 2 + }, + { + "coins": 100, + "goods": 20, + "id": 3 + } +] +``` + +#### Get player information by page + +To get paginated player information, you can send a **GET** request to the `/player/page` endpoint. To specify additional parameters, you need to use the URL parameter. The following example shows how to get the information from a page whose `index` is 0, where each page has a maximum `size` of 2 players. + +```shell +curl --location --request GET 'http://localhost:8080/player/page?index=0&size=2' +``` + +The return value is the page with `index` 0, where 2 players are listed per page. In addition, the return value contains pagination information such as offset, total pages, and whether the results are sorted. + +```json +{ + "content": [ + { + "coins": 200, + "goods": 10, + "id": 1 + }, + { + "coins": 0, + "goods": 30, + "id": 2 + } + ], + "empty": false, + "first": true, + "last": false, + "number": 0, + "numberOfElements": 2, + "pageable": { + "offset": 0, + "pageNumber": 0, + "pageSize": 2, + "paged": true, + "sort": { + "empty": true, + "sorted": false, + "unsorted": true + }, + "unpaged": false + }, + "size": 2, + "sort": { + "empty": true, + "sorted": false, + "unsorted": true + }, + "totalElements": 4, + "totalPages": 2 +} +``` + +#### Count players + +To get the number of players, you can send a **GET** request to the `/player/count` endpoint: + +```shell +curl --location --request GET 'http://localhost:8080/player/count' +``` + +The return value is the number of players: + +```json +4 +``` + +#### Player trading + +To initiate a transaction between players, you can send a **PUT** request to the `/player/trade` endpoint. For example: + +```shell +curl --location --request PUT 'http://localhost:8080/player/trade' \ + --header 'Content-Type: application/x-www-form-urlencoded' \ + --data-urlencode 'sellID=1' \ + --data-urlencode 'buyID=2' \ + --data-urlencode 'amount=10' \ + --data-urlencode 'price=100' +``` + +The request uses **Form Data** as the payload. The example request indicates that the seller's ID (`sellID`) is 1, the buyer's ID (`buyID`) is 2, the number of goods purchased (`amount`) is 10, and the number of coins consumed for purchase (`price`) is 100. + +The return value is whether the transaction is successful or not. When there are insufficient goods for the seller, insufficient coins for the buyer, or a database error, the [database transaction](/develop/dev-guide-transaction-overview.md) guarantees that the trade is not successful and no player's coins or goods are lost. + +```json +true +``` + +### Step 6.3 Requests with Shell script + +You can download [this shell script](https://github.com/pingcap-inc/tidb-example-java/blob/main/spring-jpa-hibernate/request.sh) for testing purposes. The script performs the following operations: + +1. Create 10 players in a loop. +2. Get the information of players with the `id` of 1. +3. Get a list of up to 3 players. +4. Get a page of players with the `index` of 0 and the `size` of 2. +5. Get the total number of players. +6. Perform a transaction, where the player with the `id` of 1 is the seller and the player with the `id` of 2 is the buyer, and 10 `goods` are purchased at the cost of 100 `coins`. + +You can run this script with `make request` or `./request.sh`. The result should look like this: + +```shell +cheese@CheesedeMacBook-Pro spring-jpa-hibernate % make request +./request.sh +loop to create 10 players: +1111111111 + +get player 1: +{"id":1,"coins":200,"goods":10} + +get players by limit 3: +[{"id":1,"coins":200,"goods":10},{"id":2,"coins":0,"goods":30},{"id":3,"coins":100,"goods":20}] + +get first players: +{"content":[{"id":1,"coins":200,"goods":10},{"id":2,"coins":0,"goods":30}],"pageable":{"sort":{"empty":true,"unsorted":true,"sorted":false},"offset":0,"pageNumber":0,"pageSize":2,"paged":true,"unpaged":false},"last":false,"totalPages":7,"totalElements":14,"first":true,"size":2,"number":0,"sort":{"empty":true,"unsorted":true,"sorted":false},"numberOfElements":2,"empty":false} + +get players count: +14 + +trade by two players: +false +``` + +## Implementation details + +This subsection describes the components in the sample application project. + +### Overview + +The catalog tree for this example project is shown below (some incomprehensible parts are removed): + +``` +. +├── pom.xml +└── src + └── main + ├── java + │ └── com + │ └── pingcap + │ ├── App.java + │ ├── controller + │ │ └── PlayerController.java + │ ├── dao + │ │ ├── PlayerBean.java + │ │ └── PlayerRepository.java + │ └── service + │ ├── PlayerService.java + │ └── impl + │ └── PlayerServiceImpl.java + └── resources + └── application.yml +``` + +- `pom.xml` declares the project's Maven configuration, such as dependencies and packaging. +- `application.yml` declares the project's user configuration, such as database address, password, and database dialect used. +- `App.java` is the entry point of the project. +- `controller` is the package that exposes the HTTP interface to the outside. +- `service` is the package that implements the interface and logic of the project. +- `dao` is the package that implements the connection to the database and the persistence of the data. + +### Configuration + +This part briefly describes the Maven configuration in the `pom.xml` file and the user configuration in the `application.yml` file. + +#### Maven configuration + +The `pom.xml` file is a Maven configuration file that declares the project's Maven dependencies, packaging methods, and packaging information. You can replicate the process of generating this configuration file by [creating a blank application with the same dependency](#create-a-blank-application-with-the-same-dependency-optional), or copying it directly to your project. + +```xml + + + 4.0.0 + + org.springframework.boot + spring-boot-starter-parent + 3.0.1 + + + + com.pingcap + spring-jpa-hibernate + 0.0.1 + spring-jpa-hibernate + an example for spring boot, jpa, hibernate and TiDB + + + 17 + 17 + 17 + + + + + org.springframework.boot + spring-boot-starter-data-jpa + + + + org.springframework.boot + spring-boot-starter-web + + + + mysql + mysql-connector-java + runtime + + + + org.springframework.boot + spring-boot-starter-test + test + + + + + + + org.springframework.boot + spring-boot-maven-plugin + + + + +``` + +#### User configuration + +The `application.yml` configuration file declares the user configuration, such as database address, password, and the database dialect used. + +```yaml +spring: + datasource: + url: jdbc:mysql://localhost:4000/test + username: root + # password: xxx + driver-class-name: com.mysql.cj.jdbc.Driver + jpa: + show-sql: true + database-platform: org.hibernate.dialect.TiDBDialect + hibernate: + ddl-auto: create-drop +``` + +The configuration is written in [YAML](https://yaml.org/). The fields are described as follows: + +- `spring.datasource.url` : URL of the database connection. +- `spring.datasource.username` : the database username. +- `spring.datasource.password` : the database password. Empty. You need to comment out or delete this field. +- `spring.datasource.driver-class-name` : the database driver. Because TiDB is compatible with MySQL, use a mysql-connector-java driver class `com.mysql.cj.jdbc`. +- `jpa.show-sql` : when this field is set to `true`, the SQL statements run by JPA are printed. +- `jpa.database-platform` : the selected database dialect. Because the application connects to TiDB, choose **TiDB dialect**. Note that this dialect is only available in Hibernate `6.0.0.Beta2` and later versions, so choose the applicable dependency version. +- `jpa.hibernate.ddl-auto` : `create-drop` creates the table at the beginning of the program and deletes the table on exit. Do not set this option in a production environment. Because this is a sample application, this option is set to minimize the impact on the database data. + +### Entry point + +The `App.java` file is the entry point: + +```java +package com.pingcap; + +import org.springframework.boot.SpringApplication; +import org.springframework.boot.autoconfigure.SpringBootApplication; +import org.springframework.boot.context.ApplicationPidFileWriter; + +@SpringBootApplication +public class App { + public static void main(String[] args) { + SpringApplication springApplication = new SpringApplication(App.class); + springApplication.addListeners(new ApplicationPidFileWriter("spring-jpa-hibernate.pid")); + springApplication.run(args); + } +} +``` + +The entry class starts with the standard configuration annotation [`@SpringBootApplication`](https://docs.spring.io/spring-boot/docs/current/api/org/springframework/boot/autoconfigure/SpringBootApplication.html) for Spring Boot applications. For more information, see [Using the `@SpringBootApplication` Annotation](https://docs.spring.io/spring-boot/docs/current/reference/html/using-spring-boot.html#using-boot-using-springbootapplication-annotation) in the Spring Boot official documentation. Then, the program uses the `ApplicationPidFileWriter` to write a PID (process identification number) file called `spring-jpa-hibernate.pid` during application startup. The PID file can be used to close this application from an external source. + +### Data access object + +The `dao` (Data Access Object) package implements the persistence of data objects. + +#### Entity objects + +The `PlayerBean.java` file is an entity object, which corresponds to a table in the database: + +```java +package com.pingcap.dao; + +import jakarta.persistence.*; + +/** + * it's core entity in hibernate + * @Table appoint to table name + */ +@Entity +@Table(name = "player_jpa") +public class PlayerBean { + /** + * @ID primary key + * @GeneratedValue generated way. this field will use generator named "player_id" + * @SequenceGenerator using `sequence` feature to create a generator, + * and it named "player_jpa_id_seq" in database, initial form 1 (by `initialValue` + * parameter default), and every operator will increase 1 (by `allocationSize`) + */ + @Id + @GeneratedValue(generator="player_id") + @SequenceGenerator(name="player_id", sequenceName="player_jpa_id_seq", allocationSize=1) + private Long id; + + /** + * @Column field + */ + @Column(name = "coins") + private Integer coins; + @Column(name = "goods") + private Integer goods; + + public Long getId() { + return id; + } + + public void setId(Long id) { + this.id = id; + } + + public Integer getCoins() { + return coins; + } + + public void setCoins(Integer coins) { + this.coins = coins; + } + + public Integer getGoods() { + return goods; + } + + public void setGoods(Integer goods) { + this.goods = goods; + } +} +``` + +The entity class has several annotations that give Hibernate additional information to bind the entity class to the table. + +- `@Entity` declares that `PlayerBean` is an entity class. +- `@Table` relates this entity class to the `player_jpa` table using the annotation attribute `name`. +- `@Id` declares that this property is related to the primary key column of the table. +- `@GeneratedValue` indicates that the value of this column is generated automatically and should not be set manually. The attribute `generator` is used to specify the name of the generator as `player_id`. +- `@SequenceGenerator` declares a generator that uses [sequence](/sql-statements/sql-statement-create-sequence.md), and uses the annotation attribute `name` to declare the name of the generator as `player_id` (consistent with the name specified in `@GeneratedValue`). The annotation attribute `sequenceName` is used to specify the name of the sequence in the database. Finally, the annotation attribute `allocationSize` is used to declare the sequence's step size to be 1. +- `@Column` declares each private attribute as a column of the `player_jpa` table, and uses the annotation attribute `name` to determine the name of the column corresponding to the attribute. + +#### Repository + +To abstract the database layer, Spring applications use the [`Repository`](https://docs.spring.io/spring-data/jpa/docs/current/reference/html/#repositories) interface, or a sub-interface of the `Repository`. This interface maps to a database object, such as a table. JPA implements some pre-built methods, such as [`INSERT`](/sql-statements/sql-statement-insert.md), or [`SELECT`](/sql-statements/sql-statement-select.md) using the primay key. + +```java +package com.pingcap.dao; + +import jakarta.persistence.LockModeType; +import org.springframework.data.domain.Page; +import org.springframework.data.domain.Pageable; +import org.springframework.data.jpa.repository.JpaRepository; +import org.springframework.data.jpa.repository.Lock; +import org.springframework.data.jpa.repository.Query; +import org.springframework.data.repository.query.Param; +import org.springframework.stereotype.Repository; + +import java.util.List; + +@Repository +public interface PlayerRepository extends JpaRepository { + /** + * use HQL to query by page + * @param pageable a pageable parameter required by hibernate + * @return player list package by page message + */ + @Query(value = "SELECT player_jpa FROM PlayerBean player_jpa") + Page getPlayersByPage(Pageable pageable); + + /** + * use SQL to query by limit, using named parameter + * @param limit sql parameter + * @return player list (max size by limit) + */ + @Query(value = "SELECT * FROM player_jpa LIMIT :limit", nativeQuery = true) + List getPlayersByLimit(@Param("limit") Integer limit); + + /** + * query player and add a lock for update + * @param id player id + * @return player + */ + @Lock(value = LockModeType.PESSIMISTIC_WRITE) + @Query(value = "SELECT player FROM PlayerBean player WHERE player.id = :id") + // @Query(value = "SELECT * FROM player_jpa WHERE id = :id FOR UPDATE", nativeQuery = true) + PlayerBean getPlayerAndLock(@Param("id") Long id); +} +``` + +The `PlayerRepository` interface extends the `JpaRepository` interface used by Spring for JPA data access. The `@Query` annotation is used to tell Hibernate how to implement queries in this interface. Two query syntaxes are used: + +* In the `getPlayersByPage` interface, [Hibernate Query Language](https://docs.jboss.org/hibernate/orm/6.0/userguide/html_single/Hibernate_User_Guide.html#hql) (HQL) is used. +* In the `getPlayersByLimit` interface, native SQL is used. When the interface uses the native SQL syntax, the `@Query` annotation parameter `nativeQuery` must be set to `true`. + +In the SQL for the `getPlayersByLimit` annotation, `:limit` is called a [named parameter](https://docs.jboss.org/hibernate/orm/6.0/userguide/html_single/Hibernate_User_Guide.html#jpql-query-parameters) in Hibernate. Hibernate automatically finds and splices the parameter by name within the interface where the annotation resides. You can also use `@Param` to specify a name different from the parameter for injection. + +In `getPlayerAndLock`, an annotation [`@Lock`](https://docs.spring.io/spring-data/jpa/docs/current/api/org/springframework/data/jpa/repository/Lock.html) is used to declare that pessimistic locking is applied. For details on other locking methods, see [Entity Locking](https://openjpa.apache.org/builds/2.2.2/apache-openjpa/docs/jpa_overview_em_locking.html). The `@Lock` annotation must be used with `HQL`; otherwise, an error occurs. If you want to use SQL directly for locking, you can use the annotation from the comment: + +```java +@Query(value = "SELECT * FROM player_jpa WHERE id = :id FOR UPDATE", nativeQuery = true) +``` + +The SQL statement above uses `FOR UPDATE` to add locks directly. You can also dive deeper into the principles with the TiDB [`SELECT` statement](/sql-statements/sql-statement-select.md). + +### Logic implementation + +The logic implementation layer is the `service` package, which contains the interfaces and logic implemented by the project. + +#### Interface + +The `PlayerService.java` file defines the logical interface and implements the interface rather than writing a class directly. This is to keep the example as close to actual use as possible and to reflect the [open-closed principle](https://en.wikipedia.org/wiki/Open%E2%80%93closed_principle) of the design. You may omit this interface and inject the implementation class directly in the dependency class, but this approach is not recommended. + +```java +package com.pingcap.service; + +import com.pingcap.dao.PlayerBean; +import org.springframework.data.domain.Page; + +import java.util.List; + +public interface PlayerService { + /** + * create players by passing in a List of PlayerBean + * + * @param players will create players list + * @return The number of create accounts + */ + Integer createPlayers(List players); + + /** + * buy goods and transfer funds between one player and another in one transaction + * @param sellId sell player id + * @param buyId buy player id + * @param amount goods amount, if sell player has not enough goods, the trade will break + * @param price price should pay, if buy player has not enough coins, the trade will break + */ + void buyGoods(Long sellId, Long buyId, Integer amount, Integer price) throws RuntimeException; + + /** + * get the player info by id. + * + * @param id player id + * @return the player of this id + */ + PlayerBean getPlayerByID(Long id); + + /** + * get a subset of players from the data store by limit. + * + * @param limit return max size + * @return player list + */ + List getPlayers(Integer limit); + + /** + * get a page of players from the data store. + * + * @param index page index + * @param size page size + * @return player list + */ + Page getPlayersByPage(Integer index, Integer size); + + /** + * count players from the data store. + * + * @return all players count + */ + Long countPlayers(); +} +``` + +#### Implementation (Important) + +The `PlayerService.java` file implements the `PlayerService` interface, which contains all the data processing logic. + +```java +package com.pingcap.service.impl; + +import com.pingcap.dao.PlayerBean; +import com.pingcap.dao.PlayerRepository; +import com.pingcap.service.PlayerService; +import jakarta.transaction.Transactional; +import org.springframework.beans.factory.annotation.Autowired; +import org.springframework.data.domain.Page; +import org.springframework.data.domain.PageRequest; +import org.springframework.stereotype.Service; + +import java.util.List; + +/** + * PlayerServiceImpl implements PlayerService interface + * @Transactional it means every method in this class, will package by a pair of + * transaction.begin() and transaction.commit(). and it will be call + * transaction.rollback() when method throw an exception + */ +@Service +@Transactional +public class PlayerServiceImpl implements PlayerService { + @Autowired + private PlayerRepository playerRepository; + + @Override + public Integer createPlayers(List players) { + return playerRepository.saveAll(players).size(); + } + + @Override + public void buyGoods(Long sellId, Long buyId, Integer amount, Integer price) throws RuntimeException { + PlayerBean buyPlayer = playerRepository.getPlayerAndLock(buyId); + PlayerBean sellPlayer = playerRepository.getPlayerAndLock(sellId); + if (buyPlayer == null || sellPlayer == null) { + throw new RuntimeException("sell or buy player not exist"); + } + + if (buyPlayer.getCoins() < price || sellPlayer.getGoods() < amount) { + throw new RuntimeException("coins or goods not enough, rollback"); + } + + buyPlayer.setGoods(buyPlayer.getGoods() + amount); + buyPlayer.setCoins(buyPlayer.getCoins() - price); + playerRepository.save(buyPlayer); + + sellPlayer.setGoods(sellPlayer.getGoods() - amount); + sellPlayer.setCoins(sellPlayer.getCoins() + price); + playerRepository.save(sellPlayer); + } + + @Override + public PlayerBean getPlayerByID(Long id) { + return playerRepository.findById(id).orElse(null); + } + + @Override + public List getPlayers(Integer limit) { + return playerRepository.getPlayersByLimit(limit); + } + + @Override + public Page getPlayersByPage(Integer index, Integer size) { + return playerRepository.getPlayersByPage(PageRequest.of(index, size)); + } + + @Override + public Long countPlayers() { + return playerRepository.count(); + } +} +``` + +The `@Service` annotation is used to declare that the lifecycle of this object is managed by `Spring`. + +The `PlayerServiceImpl` implementation class also has a [`@Transactional`](https://docs.spring.io/spring-framework/docs/current/reference/html/data-access.html#transaction-declarative-annotations) annotation in addition to the `@Service` annotation. When transaction management is enabled in the application (which can be turned on using [`@EnableTransactionManagement`](https://docs.spring.io/spring-framework/docs/current/javadoc-api/org/springframework/transaction/annotation/EnableTransactionManagement.html), but is turned on by default by `Spring Boot`. You don not need to manually configure it.), `Spring` automatically wraps all objects with the `@Transactional` annotation in a proxy and uses this proxy for object invocation processing. + +You can simply assume that when the agent calls a function inside an object with the `@Transactional` annotation: + +- At the top of the function, it starts the transaction with `transaction.begin()`. +- When the function returns, it calls `transaction.commit()` to commit the transaction. +- When any runtime error occurs, the agent calls `transaction.rollback()` to roll back. + +You can refer to [Database Transactions](/develop/dev-guide-transaction-overview.md) for more information on transactions, or read [Understanding the Spring Framework's Declarative Transaction Implementation](https://docs.spring.io/spring-framework/docs/current/reference/html/data-access.html#tx-decl-explained) on the `Spring` website. + +In all implementation classes, the `buyGoods` function is requires attention. When the function encounters an illogical operation, it throws an exception and directs Hibernate to perform a transaction rollback to prevent incorrect data. + +### External HTTP Interface + +The `controller` package exposes the HTTP interface to the outside world and allows access to the service via the [REST API](https://www.redhat.com/en/topics/api/what-is-a-rest-api#). + +```java +package com.pingcap.controller; + +import com.pingcap.dao.PlayerBean; +import com.pingcap.service.PlayerService; +import org.springframework.beans.factory.annotation.Autowired; +import org.springframework.data.domain.Page; +import org.springframework.lang.NonNull; +import org.springframework.web.bind.annotation.*; + +import java.util.List; + +@RestController +@RequestMapping("/player") +public class PlayerController { + @Autowired + private PlayerService playerService; + + @PostMapping + public Integer createPlayer(@RequestBody @NonNull List playerList) { + return playerService.createPlayers(playerList); + } + + @GetMapping("/{id}") + public PlayerBean getPlayerByID(@PathVariable Long id) { + return playerService.getPlayerByID(id); + } + + @GetMapping("/limit/{limit_size}") + public List getPlayerByLimit(@PathVariable("limit_size") Integer limit) { + return playerService.getPlayers(limit); + } + + @GetMapping("/page") + public Page getPlayerByPage(@RequestParam Integer index, @RequestParam("size") Integer size) { + return playerService.getPlayersByPage(index, size); + } + + @GetMapping("/count") + public Long getPlayersCount() { + return playerService.countPlayers(); + } + + @PutMapping("/trade") + public Boolean trade(@RequestParam Long sellID, @RequestParam Long buyID, @RequestParam Integer amount, @RequestParam Integer price) { + try { + playerService.buyGoods(sellID, buyID, amount, price); + } catch (RuntimeException e) { + return false; + } + + return true; + } +} +``` + +`PlayerController` uses annotations as many as possible to demonstrate features. In real projects, keep the style consistent while following the rules of your company or team. The annotations in `PlayerController` are explained as follows: + +- [`@RestController`](https://docs.spring.io/spring-framework/docs/current/javadoc-api/org/springframework/web/bind/annotation/RestController.html) declares `PlayerController` as a [Web Controller](https://en.wikipedia.org/wiki/Model%E2%80%93view%E2%80%93controller) and serializes the return value as `JSON` output. +- [`@RequestMapping`](https://docs.spring.io/spring-framework/docs/current/javadoc-api/org/springframework/web/bind/annotation/RequestMapping.html) maps the URL endpoint to `/player`, that is, this `Web Controller` only listens for requests sent to the `/player` URL. +- `@Autowired` means `Spring` container can autowire relationships between collaborating beans. The declaration requires a `PlayerService` object, which is an interface and does not specify which implementation class to use. This is assembled by Spring. For the rules of this assembly, see [The IoC container](https://docs.spring.io/spring-framework/docs/3.2.x/spring-framework-reference/html/beans.html) on Spring's official website. +- [`@PostMapping`](https://docs.spring.io/spring-framework/docs/current/javadoc-api/org/springframework/web/bind/annotation/PostMapping.html) declares that this function responds to a [POST](https://developer.mozilla.org/en-US/docs/Web/HTTP/Methods/POST) request in HTTP. + - `@RequestBody` declares that the entire HTTP payload is parsed into the `playerList` parameter. + - `@NonNull` declares that the parameter must not be null; otherwise, it returns an error. +- [`@GetMapping`](https://docs.spring.io/spring-framework/docs/current/javadoc-api/org/springframework/web/bind/annotation/GetMapping.html) declares that this function responds to a [GET](https://developer.mozilla.org/en-US/docs/Web/HTTP/Methods/GET) request in HTTP. + - [`@PathVariable`](https://docs.spring.io/spring-framework/docs/current/javadoc-api/org/springframework/web/bind/annotation/PathVariable.html) shows that the annotation has placeholders like `{id}` and `{limit_size}`, which are bound to the variable annotated by `@PathVariable`. Such binding is based on the annotation attribute `name`. If the annotation attribute `name` is not specified, it is the same as the variable name. The variable name can be omitted, that is, `@PathVariable(name="limit_size")` can be written as `@PathVariable("limit_size")`. +- [`@PutMapping`](https://docs.spring.io/spring-framework/docs/current/javadoc-api/org/springframework/web/bind/annotation/PutMapping.html) declares that this function responds to a [PUT](https://developer.mozilla.org/en-US/docs/Web/HTTP/Methods/PUT) request in HTTP. +- [`@RequestParam`](https://docs.spring.io/spring-framework/docs/current/javadoc-api/org/springframework/web/bind/annotation/RequestParam.html) declares that this function parses URL parameters, form parameters, and other parameters in the request and binds them to the annotated variables. + +## Create a blank application with the same dependency (optional) + +This application is built using [Spring Initializr](https://start.spring.io/). You can quickly get a blank application with the same dependencies as this sample application by clicking on the following options and changing a few configuration items: + +**Project** + +- Maven Project + +**Language** + +- Java + +**Spring Boot** + +- Latest stable version + +**Project Metadata** + +- Group: com.pingcap +- Artifact: spring-jpa-hibernate +- Name: spring-jpa-hibernate +- Package name: com.pingcap +- Packaging: Jar +- Java: 17 + +**Dependencies** + +- Spring Web +- Spring Data JPA +- MySQL Driver + +The complete configuration is as follows: + +![Spring Initializr Configuration](/media/develop/develop-spring-initializr-configuration.png) + +> **Note:** +> +> Although SQL is relatively standardized, each database vendor uses a subset and superset of ANSI SQL defined syntax. This is referred to as the database's dialect. Hibernate handles variations across these dialects through its `org.hibernate.dialect.Dialect` class and the various subclasses for each database vendor. +> +> In most cases, Hibernate will be able to determine the proper Dialect to use by asking some questions of the JDBC Connection during bootstrap. For information on Hibernate's ability to determine the proper Dialect to use (and your ability to influence that resolution), see [Dialect resolution](https://docs.jboss.org/hibernate/orm/6.0/userguide/html_single/Hibernate_User_Guide.html#portability-dialectresolver). +> +> If for some reason it is not able to determine the proper one or you want to use a custom Dialect, you will need to set the `hibernate.dialect` setting. +> +> _—— Excerpt from the Hibernate official documentation: [Database Dialect](https://docs.jboss.org/hibernate/orm/6.0/userguide/html_single/Hibernate_User_Guide.html#database-dialect)_ + +After the configuration, you can get a blank **Spring Boot** application with the same dependencies as the sample application. \ No newline at end of file diff --git a/test/sync_pr_docs/data/markdown-pages/en/tidb/release-6.5/develop/dev-guide-sample-application-python-mysql-connector.md b/test/sync_pr_docs/data/markdown-pages/en/tidb/release-6.5/develop/dev-guide-sample-application-python-mysql-connector.md new file mode 100644 index 00000000..6d7a2c30 --- /dev/null +++ b/test/sync_pr_docs/data/markdown-pages/en/tidb/release-6.5/develop/dev-guide-sample-application-python-mysql-connector.md @@ -0,0 +1,291 @@ +--- +title: Build a Simple CRUD App with TiDB and MySQL Connector/Python +summary: Learn how to build a simple CRUD application with TiDB and MySQL Connector/Python. +aliases: ['/tidb/v6.5/dev-guide-sample-application-python','/tidb/stable/dev-guide-sample-application-python','/tidbcloud/dev-guide-sample-application-python','/tidb/v6.5/dev-guide-outdated-for-python-mysql-connector'] +--- + + + + +# Build a Simple CRUD App with TiDB and MySQL Connector/Python + +[MySQL Connector/Python](https://dev.mysql.com/doc/connector-python/en/) is a popular open-source driver for Python. + +This document describes how to use TiDB and MySQL Connector/Python to build a simple CRUD application. + +> **Note:** +> +> It is recommended to use Python 3.10 or a later Python version. + +## Step 1. Launch your TiDB cluster + + + +The following introduces how to start a TiDB cluster. + +**Use a TiDB Cloud Serverless Tier cluster** + +For detailed steps, see [Create a Serverless Tier cluster](/develop/dev-guide-build-cluster-in-cloud.md#step-1-create-a-serverless-tier-cluster). + +**Use a local cluster** + +For detailed steps, see [Deploy a local test cluster](/quick-start-with-tidb.md#deploy-a-local-test-cluster) or [Deploy a TiDB cluster using TiUP](/production-deployment-using-tiup.md). + + + + + +See [Create a Serverless Tier cluster](/develop/dev-guide-build-cluster-in-cloud.md#step-1-create-a-serverless-tier-cluster). + + + +## Step 2. Get the code + +```shell +git clone https://github.com/pingcap-inc/tidb-example-python.git +``` + +The following uses MySQL Connector/Python 8.0.31 as an example. Drivers for Python are more convenient to use than other languages, but they do not shield the underlying implementation and require manual management of transactions. If there are not a lot of scenarios where SQL is required, it is recommended to use ORM, which can help reduce the coupling of your program. + +```python +import uuid +from typing import List + +from mysql.connector import connect, MySQLConnection +from mysql.connector.cursor import MySQLCursor + + +def get_connection(autocommit: bool = True) -> MySQLConnection: + connection = connect(host='127.0.0.1', + port=4000, + user='root', + password='', + database='test') + connection.autocommit = autocommit + return connection + + +def create_player(cursor: MySQLCursor, player: tuple) -> None: + cursor.execute("INSERT INTO player (id, coins, goods) VALUES (%s, %s, %s)", player) + + +def get_player(cursor: MySQLCursor, player_id: str) -> tuple: + cursor.execute("SELECT id, coins, goods FROM player WHERE id = %s", (player_id,)) + return cursor.fetchone() + + +def get_players_with_limit(cursor: MySQLCursor, limit: int) -> List[tuple]: + cursor.execute("SELECT id, coins, goods FROM player LIMIT %s", (limit,)) + return cursor.fetchall() + + +def random_player(amount: int) -> List[tuple]: + players = [] + for _ in range(amount): + players.append((str(uuid.uuid4()), 10000, 10000)) + + return players + + +def bulk_create_player(cursor: MySQLCursor, players: List[tuple]) -> None: + cursor.executemany("INSERT INTO player (id, coins, goods) VALUES (%s, %s, %s)", players) + + +def get_count(cursor: MySQLCursor) -> int: + cursor.execute("SELECT count(*) FROM player") + return cursor.fetchone()[0] + + +def trade_check(cursor: MySQLCursor, sell_id: str, buy_id: str, amount: int, price: int) -> bool: + get_player_with_lock_sql = "SELECT coins, goods FROM player WHERE id = %s FOR UPDATE" + + # sell player goods check + cursor.execute(get_player_with_lock_sql, (sell_id,)) + _, sell_goods = cursor.fetchone() + if sell_goods < amount: + print(f'sell player {sell_id} goods not enough') + return False + + # buy player coins check + cursor.execute(get_player_with_lock_sql, (buy_id,)) + buy_coins, _ = cursor.fetchone() + if buy_coins < price: + print(f'buy player {buy_id} coins not enough') + return False + + +def trade_update(cursor: MySQLCursor, sell_id: str, buy_id: str, amount: int, price: int) -> None: + update_player_sql = "UPDATE player set goods = goods + %s, coins = coins + %s WHERE id = %s" + + # deduct the goods of seller, and raise his/her the coins + cursor.execute(update_player_sql, (-amount, price, sell_id)) + # deduct the coins of buyer, and raise his/her the goods + cursor.execute(update_player_sql, (amount, -price, buy_id)) + + +def trade(connection: MySQLConnection, sell_id: str, buy_id: str, amount: int, price: int) -> None: + with connection.cursor() as cursor: + if trade_check(cursor, sell_id, buy_id, amount, price) is False: + connection.rollback() + return + + try: + trade_update(cursor, sell_id, buy_id, amount, price) + except Exception as err: + connection.rollback() + print(f'something went wrong: {err}') + else: + connection.commit() + print("trade success") + + +def simple_example() -> None: + with get_connection(autocommit=True) as connection: + with connection.cursor() as cur: + # create a player, who has a coin and a goods. + create_player(cur, ("test", 1, 1)) + + # get this player, and print it. + test_player = get_player(cur, "test") + print(f'id:{test_player[0]}, coins:{test_player[1]}, goods:{test_player[2]}') + + # create players with bulk inserts. + # insert 1919 players totally, with 114 players per batch. + # each player has a random UUID + player_list = random_player(1919) + for idx in range(0, len(player_list), 114): + bulk_create_player(cur, player_list[idx:idx + 114]) + + # print the number of players + count = get_count(cur) + print(f'number of players: {count}') + + # print 3 players. + three_players = get_players_with_limit(cur, 3) + for player in three_players: + print(f'id:{player[0]}, coins:{player[1]}, goods:{player[2]}') + + +def trade_example() -> None: + with get_connection(autocommit=False) as conn: + with conn.cursor() as cur: + # create two players + # player 1: id is "1", has only 100 coins. + # player 2: id is "2", has 114514 coins, and 20 goods. + create_player(cur, ("1", 100, 0)) + create_player(cur, ("2", 114514, 20)) + conn.commit() + + # player 1 wants to buy 10 goods from player 2. + # it will cost 500 coins, but player 1 cannot afford it. + # so this trade will fail, and nobody will lose their coins or goods + trade(conn, sell_id="2", buy_id="1", amount=10, price=500) + + # then player 1 has to reduce the incoming quantity to 2. + # this trade will be successful + trade(conn, sell_id="2", buy_id="1", amount=2, price=100) + + # let's take a look for player 1 and player 2 currently + with conn.cursor() as cur: + _, player1_coin, player1_goods = get_player(cur, "1") + print(f'id:1, coins:{player1_coin}, goods:{player1_goods}') + _, player2_coin, player2_goods = get_player(cur, "2") + print(f'id:2, coins:{player2_coin}, goods:{player2_goods}') + + +simple_example() +trade_example() +``` + +The driver has a lower level of encapsulation than ORM, so there are a lot of SQL statements in the program. Unlike ORM, there is no data object in drivers, so the `Player` queried by the driver is represented as a tuple. + +For more information about how to use MySQL Connector/Python, refer to [MySQL Connector/Python documentation](https://dev.mysql.com/doc/connector-python/en/). + +## Step 3. Run the code + +The following content introduces how to run the code step by step. + +### Step 3.1 Initialize table + +Before running the code, you need to initialize the table manually. If you are using a local TiDB cluster, you can run the following command: + + + +
+ +```shell +mysql --host 127.0.0.1 --port 4000 -u root < player_init.sql +``` + +
+ +
+ +```shell +mycli --host 127.0.0.1 --port 4000 -u root --no-warn < player_init.sql +``` + +
+ +
+ +If you are not using a local cluster, or have not installed a MySQL client, connect to your cluster using your preferred method (such as Navicat, DBeaver, or other GUI tools) and run the SQL statements in the `player_init.sql` file. + +### Step 3.2 Modify parameters for TiDB Cloud + +If you are using a TiDB Cloud Serverless Tier cluster, you need to provide your CA root path and replace `` in the following examples with your CA path. To get the CA root path on your system, refer to [Where is the CA root path on my system?](https://docs.pingcap.com/tidbcloud/secure-connections-to-serverless-tier-clusters#where-is-the-ca-root-path-on-my-system).> + +If you are using a TiDB Cloud Serverless Tier cluster, change the `get_connection` function in `mysql_connector_python_example.py`: + +```python +def get_connection(autocommit: bool = True) -> MySQLConnection: + connection = connect(host='127.0.0.1', + port=4000, + user='root', + password='', + database='test') + connection.autocommit = autocommit + return connection +``` + +Suppose that the password you set is `123456`, and the connection parameters you get from the cluster details page are the following: + +- Endpoint: `xxx.tidbcloud.com` +- Port: `4000` +- User: `2aEp24QWEDLqRFs.root` + +In this case, you can modify the `get_connection` as follows: + +```python +def get_connection(autocommit: bool = True) -> MySQLConnection: + connection = connect( + host="xxx.tidbcloud.com", + port=4000, + user="2aEp24QWEDLqRFs.root", + password="123456", + database="test", + autocommit=autocommit, + ssl_ca='', + ssl_verify_identity=True + ) + connection.autocommit = autocommit + return connection +``` + +### Step 3.3 Run the code + +Before running the code, use the following command to install dependencies: + +```bash +pip3 install -r requirement.txt +``` + +If you need to run the script multiple times, follow the [Table initialization](#step-31-initialize-table) section to initialize the table again before each run. + +```bash +python3 mysql_connector_python_example.py +``` + +## Step 4. Expected output + +[MySQL Connector/Python Expected Output](https://github.com/pingcap-inc/tidb-example-python/blob/main/Expected-Output.md#mysql-connector-python) \ No newline at end of file diff --git a/test/sync_pr_docs/data/markdown-pages/en/tidb/release-6.5/develop/dev-guide-sample-application-python-mysqlclient.md b/test/sync_pr_docs/data/markdown-pages/en/tidb/release-6.5/develop/dev-guide-sample-application-python-mysqlclient.md new file mode 100644 index 00000000..2feaf5b4 --- /dev/null +++ b/test/sync_pr_docs/data/markdown-pages/en/tidb/release-6.5/develop/dev-guide-sample-application-python-mysqlclient.md @@ -0,0 +1,292 @@ +--- +title: Build a Simple CRUD App with TiDB and mysqlclient +summary: Learn how to build a simple CRUD application with TiDB and mysqlclient. +--- + + + + +# Build a Simple CRUD App with TiDB and mysqlclient + +[mysqlclient](https://pypi.org/project/mysqlclient/) is a popular open-source driver for Python. + +This document describes how to use TiDB and mysqlclient to build a simple CRUD application. + +> **Note:** +> +> It is recommended to use Python 3.10 or a later Python version. + +## Step 1. Launch your TiDB cluster + + + +The following introduces how to start a TiDB cluster. + +**Use a TiDB Cloud Serverless Tier cluster** + +For detailed steps, see [Create a Serverless Tier cluster](/develop/dev-guide-build-cluster-in-cloud.md#step-1-create-a-serverless-tier-cluster). + +**Use a local cluster** + +For detailed steps, see [Deploy a local test cluster](/quick-start-with-tidb.md#deploy-a-local-test-cluster) or [Deploy a TiDB cluster using TiUP](/production-deployment-using-tiup.md). + + + + + +See [Create a Serverless Tier cluster](/develop/dev-guide-build-cluster-in-cloud.md#step-1-create-a-serverless-tier-cluster). + + + +## Step 2. Get the code + +```shell +git clone https://github.com/pingcap-inc/tidb-example-python.git +``` + +The following uses mysqlclient 2.1.1 as an example. Drivers for Python are more convenient to use than other languages, but they do not shield the underlying implementation and require manual management of transactions. If there are not a lot of scenarios where SQL is required, it is recommended to use ORM, which can help reduce the coupling of your program. + +```python +import uuid +from typing import List + +import MySQLdb +from MySQLdb import Connection +from MySQLdb.cursors import Cursor + +def get_connection(autocommit: bool = True) -> MySQLdb.Connection: + return MySQLdb.connect( + host="127.0.0.1", + port=4000, + user="root", + password="", + database="test", + autocommit=autocommit + ) + + +def create_player(cursor: Cursor, player: tuple) -> None: + cursor.execute("INSERT INTO player (id, coins, goods) VALUES (%s, %s, %s)", player) + + +def get_player(cursor: Cursor, player_id: str) -> tuple: + cursor.execute("SELECT id, coins, goods FROM player WHERE id = %s", (player_id,)) + return cursor.fetchone() + + +def get_players_with_limit(cursor: Cursor, limit: int) -> List[tuple]: + cursor.execute("SELECT id, coins, goods FROM player LIMIT %s", (limit,)) + return cursor.fetchall() + + +def random_player(amount: int) -> List[tuple]: + players = [] + for _ in range(amount): + players.append((uuid.uuid4(), 10000, 10000)) + + return players + + +def bulk_create_player(cursor: Cursor, players: List[tuple]) -> None: + cursor.executemany("INSERT INTO player (id, coins, goods) VALUES (%s, %s, %s)", players) + + +def get_count(cursor: Cursor) -> None: + cursor.execute("SELECT count(*) FROM player") + return cursor.fetchone()[0] + + +def trade_check(cursor: Cursor, sell_id: str, buy_id: str, amount: int, price: int) -> bool: + get_player_with_lock_sql = "SELECT coins, goods FROM player WHERE id = %s FOR UPDATE" + + # sell player goods check + cursor.execute(get_player_with_lock_sql, (sell_id,)) + _, sell_goods = cursor.fetchone() + if sell_goods < amount: + print(f'sell player {sell_id} goods not enough') + return False + + # buy player coins check + cursor.execute(get_player_with_lock_sql, (buy_id,)) + buy_coins, _ = cursor.fetchone() + if buy_coins < price: + print(f'buy player {buy_id} coins not enough') + return False + + +def trade_update(cursor: Cursor, sell_id: str, buy_id: str, amount: int, price: int) -> None: + update_player_sql = "UPDATE player set goods = goods + %s, coins = coins + %s WHERE id = %s" + + # deduct the goods of seller, and raise his/her the coins + cursor.execute(update_player_sql, (-amount, price, sell_id)) + # deduct the coins of buyer, and raise his/her the goods + cursor.execute(update_player_sql, (amount, -price, buy_id)) + + +def trade(connection: Connection, sell_id: str, buy_id: str, amount: int, price: int) -> None: + with connection.cursor() as cursor: + if trade_check(cursor, sell_id, buy_id, amount, price) is False: + connection.rollback() + return + + try: + trade_update(cursor, sell_id, buy_id, amount, price) + except Exception as err: + connection.rollback() + print(f'something went wrong: {err}') + else: + connection.commit() + print("trade success") + + +def simple_example() -> None: + with get_connection(autocommit=True) as conn: + with conn.cursor() as cur: + # create a player, who has a coin and a goods. + create_player(cur, ("test", 1, 1)) + + # get this player, and print it. + test_player = get_player(cur, "test") + print(f'id:{test_player[0]}, coins:{test_player[1]}, goods:{test_player[2]}') + + # create players with bulk inserts. + # insert 1919 players totally, with 114 players per batch. + # each player has a random UUID + player_list = random_player(1919) + for idx in range(0, len(player_list), 114): + bulk_create_player(cur, player_list[idx:idx + 114]) + + # print the number of players + count = get_count(cur) + print(f'number of players: {count}') + + # print 3 players. + three_players = get_players_with_limit(cur, 3) + for player in three_players: + print(f'id:{player[0]}, coins:{player[1]}, goods:{player[2]}') + + +def trade_example() -> None: + with get_connection(autocommit=False) as conn: + with conn.cursor() as cur: + # create two players + # player 1: id is "1", has only 100 coins. + # player 2: id is "2", has 114514 coins, and 20 goods. + create_player(cur, ("1", 100, 0)) + create_player(cur, ("2", 114514, 20)) + conn.commit() + + # player 1 wants to buy 10 goods from player 2. + # it will cost 500 coins, but player 1 cannot afford it. + # so this trade will fail, and nobody will lose their coins or goods + trade(conn, sell_id="2", buy_id="1", amount=10, price=500) + + # then player 1 has to reduce the incoming quantity to 2. + # this trade will be successful + trade(conn, sell_id="2", buy_id="1", amount=2, price=100) + + # let's take a look for player 1 and player 2 currently + with conn.cursor() as cur: + _, player1_coin, player1_goods = get_player(cur, "1") + print(f'id:1, coins:{player1_coin}, goods:{player1_goods}') + _, player2_coin, player2_goods = get_player(cur, "2") + print(f'id:2, coins:{player2_coin}, goods:{player2_goods}') + + +simple_example() +trade_example() +``` + +The driver has a lower level of encapsulation than ORM, so there are a lot of SQL statements in the program. Unlike ORM, there is no data object in drivers, so the `Player` queried by the driver is represented as a tuple. + +For more information about how to use mysqlclient, refer to [mysqlclient documentation](https://mysqlclient.readthedocs.io/). + +## Step 3. Run the code + +The following content introduces how to run the code step by step. + +### Step 3.1 Initialize table + +Before running the code, you need to initialize the table manually. If you are using a local TiDB cluster, you can run the following command: + + + +
+ +```shell +mysql --host 127.0.0.1 --port 4000 -u root < player_init.sql +``` + +
+ +
+ +```shell +mycli --host 127.0.0.1 --port 4000 -u root --no-warn < player_init.sql +``` + +
+ +
+ +If you are not using a local cluster, or have not installed a MySQL client, connect to your cluster using your preferred method (such as Navicat, DBeaver, or other GUI tools) and run the SQL statements in the `player_init.sql` file. + +### Step 3.2 Modify parameters for TiDB Cloud + +If you are using a TiDB Cloud Serverless Tier cluster, you need to provide your CA root path and replace `` in the following examples with your CA path. To get the CA root path on your system, refer to [Where is the CA root path on my system?](https://docs.pingcap.com/tidbcloud/secure-connections-to-serverless-tier-clusters#where-is-the-ca-root-path-on-my-system). + +If you are using a TiDB Cloud Serverless Tier cluster, change the `get_connection` function in `mysqlclient_example.py`: + +```python +def get_connection(autocommit: bool = True) -> MySQLdb.Connection: + return MySQLdb.connect( + host="127.0.0.1", + port=4000, + user="root", + password="", + database="test", + autocommit=autocommit + ) +``` + +Suppose that the password you set is `123456`, and the connection parameters you get from the cluster details page are the following: + +- Endpoint: `xxx.tidbcloud.com` +- Port: `4000` +- User: `2aEp24QWEDLqRFs.root` + +In this case, you can modify the `get_connection` as follows: + +```python +def get_connection(autocommit: bool = True) -> MySQLdb.Connection: + return MySQLdb.connect( + host="xxx.tidbcloud.com", + port=4000, + user="2aEp24QWEDLqRFs.root", + password="123456", + database="test", + autocommit=autocommit, + ssl_mode="VERIFY_IDENTITY", + ssl={ + "ca": "" + } + ) +``` + +### Step 3.3 Run the code + +Before running the code, use the following command to install dependencies: + +```bash +pip3 install -r requirement.txt +``` + +If you need to run the script multiple times, follow the [Table initialization](#step-31-initialize-table) section to initialize the table again before each run. + +```bash +python3 mysqlclient_example.py +``` + +## Step 4. Expected output + +[mysqlclient Expected Output](https://github.com/pingcap-inc/tidb-example-python/blob/main/Expected-Output.md#mysqlclient) \ No newline at end of file diff --git a/test/sync_pr_docs/data/markdown-pages/en/tidb/release-6.5/develop/dev-guide-sample-application-python-peewee.md b/test/sync_pr_docs/data/markdown-pages/en/tidb/release-6.5/develop/dev-guide-sample-application-python-peewee.md new file mode 100644 index 00000000..9c2d78d1 --- /dev/null +++ b/test/sync_pr_docs/data/markdown-pages/en/tidb/release-6.5/develop/dev-guide-sample-application-python-peewee.md @@ -0,0 +1,255 @@ +--- +title: Build a Simple CRUD App with TiDB and peewee +summary: Learn how to build a simple CRUD application with TiDB and peewee. +--- + + + + +# Build a Simple CRUD App with TiDB and peewee + +[peewee](http://docs.peewee-orm.com/en/latest/) is a popular open-source ORM library for Python. + +This document describes how to use TiDB and peewee to build a simple CRUD application. + +> **Note:** +> +> It is recommended to use Python 3.10 or a later Python version. + +## Step 1. Launch your TiDB cluster + + + +The following introduces how to start a TiDB cluster. + +**Use a TiDB Cloud Serverless Tier cluster** + +For detailed steps, see [Create a Serverless Tier cluster](/develop/dev-guide-build-cluster-in-cloud.md#step-1-create-a-serverless-tier-cluster). + +**Use a local cluster** + +For detailed steps, see [Deploy a local test cluster](/quick-start-with-tidb.md#deploy-a-local-test-cluster) or [Deploy a TiDB cluster using TiUP](/production-deployment-using-tiup.md). + + + + + +See [Create a Serverless Tier cluster](/develop/dev-guide-build-cluster-in-cloud.md#step-1-create-a-serverless-tier-cluster). + + + +## Step 2. Get the code + +```shell +git clone https://github.com/pingcap-inc/tidb-example-python.git +``` + +The following uses peewee 3.15.4 as an example. + +```python +import os +import uuid +from typing import List + +from peewee import * + +from playhouse.db_url import connect + +db = connect('mysql://root:@127.0.0.1:4000/test') + + +class Player(Model): + id = CharField(max_length=36, primary_key=True) + coins = IntegerField() + goods = IntegerField() + + class Meta: + database = db + table_name = "player" + + +def random_player(amount: int) -> List[Player]: + players = [] + for _ in range(amount): + players.append(Player(id=uuid.uuid4(), coins=10000, goods=10000)) + + return players + + +def simple_example() -> None: + # create a player, who has a coin and a goods. + Player.create(id="test", coins=1, goods=1) + + # get this player, and print it. + test_player = Player.select().where(Player.id == "test").get() + print(f'id:{test_player.id}, coins:{test_player.coins}, goods:{test_player.goods}') + + # create players with bulk inserts. + # insert 1919 players totally, with 114 players per batch. + # each player has a random UUID + player_list = random_player(1919) + Player.bulk_create(player_list, 114) + + # print the number of players + count = Player.select().count() + print(f'number of players: {count}') + + # print 3 players. + three_players = Player.select().limit(3) + for player in three_players: + print(f'id:{player.id}, coins:{player.coins}, goods:{player.goods}') + + +def trade_check(sell_id: str, buy_id: str, amount: int, price: int) -> bool: + sell_goods = Player.select(Player.goods).where(Player.id == sell_id).get().goods + if sell_goods < amount: + print(f'sell player {sell_id} goods not enough') + return False + + buy_coins = Player.select(Player.coins).where(Player.id == buy_id).get().coins + if buy_coins < price: + print(f'buy player {buy_id} coins not enough') + return False + + return True + + +def trade(sell_id: str, buy_id: str, amount: int, price: int) -> None: + with db.atomic() as txn: + try: + if trade_check(sell_id, buy_id, amount, price) is False: + txn.rollback() + return + + # deduct the goods of seller, and raise his/her the coins + Player.update(goods=Player.goods - amount, coins=Player.coins + price).where(Player.id == sell_id).execute() + # deduct the coins of buyer, and raise his/her the goods + Player.update(goods=Player.goods + amount, coins=Player.coins - price).where(Player.id == buy_id).execute() + + except Exception as err: + txn.rollback() + print(f'something went wrong: {err}') + else: + txn.commit() + print("trade success") + + +def trade_example() -> None: + # create two players + # player 1: id is "1", has only 100 coins. + # player 2: id is "2", has 114514 coins, and 20 goods. + Player.create(id="1", coins=100, goods=0) + Player.create(id="2", coins=114514, goods=20) + + # player 1 wants to buy 10 goods from player 2. + # it will cost 500 coins, but player 1 cannot afford it. + # so this trade will fail, and nobody will lose their coins or goods + trade(sell_id="2", buy_id="1", amount=10, price=500) + + # then player 1 has to reduce the incoming quantity to 2. + # this trade will be successful + trade(sell_id="2", buy_id="1", amount=2, price=100) + + # let's take a look for player 1 and player 2 currently + after_trade_players = Player.select().where(Player.id.in_(["1", "2"])) + for player in after_trade_players: + print(f'id:{player.id}, coins:{player.coins}, goods:{player.goods}') + + +db.connect() + +# recreate the player table +db.drop_tables([Player]) +db.create_tables([Player]) + +simple_example() +trade_example() +``` + +Compared with using drivers directly, peewee provides an abstraction for the specific details of different databases when you create a database connection. In addition, peewee encapsulates some operations such as session management and CRUD of basic objects, which greatly simplifies the code. + +The `Player` class is a mapping of a table to attributes in the application. Each attribute of `Player` corresponds to a field in the `player` table. To provide SQLAlchemy with more information, the attribute is defined as `id = Column(String(36), primary_key=True)` to indicate the field type and its additional attributes. For example, `id = Column(String(36), primary_key=True)` indicates that the `id` attribute is `String` type, the corresponding field in database is `VARCHAR` type, the length is `36`, and it is a primary key. + +For more information about how to use peewee, refer to [peewee documentation](http://docs.peewee-orm.com/en/latest/). + +## Step 3. Run the code + +The following content introduces how to run the code step by step. + +### Step 3.1 Initialize table + +Before running the code, you need to initialize the table manually. If you are using a local TiDB cluster, you can run the following command: + + + +
+ +```shell +mysql --host 127.0.0.1 --port 4000 -u root < player_init.sql +``` + +
+ +
+ +```shell +mycli --host 127.0.0.1 --port 4000 -u root --no-warn < player_init.sql +``` + +
+ +
+ +If you are not using a local cluster, or have not installed a MySQL client, connect to your cluster using your preferred method (such as Navicat, DBeaver, or other GUI tools) and run the SQL statements in the `player_init.sql` file. + +### Step 3.2 Modify parameters for TiDB Cloud + +If you are using a TiDB Cloud Serverless Tier cluster, you need to provide your CA root path and replace `` in the following examples with your CA path. To get the CA root path on your system, refer to [Where is the CA root path on my system?](https://docs.pingcap.com/tidbcloud/secure-connections-to-serverless-tier-clusters#where-is-the-ca-root-path-on-my-system). + +If you are using a TiDB Cloud Serverless Tier cluster, modify the parameters of the `connect` function in `peewee_example.py`: + +```python +db = connect('mysql://root:@127.0.0.1:4000/test') +``` + +Suppose that the password you set is `123456`, and the connection parameters you get from the cluster details page are the following: + +- Endpoint: `xxx.tidbcloud.com` +- Port: `4000` +- User: `2aEp24QWEDLqRFs.root` + +In this case, you can modify the `connect` as follows: + +- When peewee uses PyMySQL as the driver: + + ```python + db = connect('mysql://2aEp24QWEDLqRFs.root:123456@xxx.tidbcloud.com:4000/test', + ssl_verify_cert=True, ssl_ca="") + ``` + +- When peewee uses mysqlclient as the driver: + + ```python + db = connect('mysql://2aEp24QWEDLqRFs.root:123456@xxx.tidbcloud.com:4000/test', + ssl_mode="VERIFY_IDENTITY", ssl={"ca": ""}) + ``` + +Because peewee will pass parameters to the driver, you need to pay attention to the usage type of the driver when using peewee. + +### Step 3.3 Run the code + +Before running the code, use the following command to install dependencies: + +```bash +pip3 install -r requirement.txt +``` + +If you need to run the script multiple times, follow the [Table initialization](#step-31-initialize-table) section to initialize the table again before each run. + +```bash +python3 peewee_example.py +``` + +## Step 4. Expected output + +[peewee Expected Output](https://github.com/pingcap-inc/tidb-example-python/blob/main/Expected-Output.md#peewee) \ No newline at end of file diff --git a/test/sync_pr_docs/data/markdown-pages/en/tidb/release-6.5/develop/dev-guide-sample-application-python-pymysql.md b/test/sync_pr_docs/data/markdown-pages/en/tidb/release-6.5/develop/dev-guide-sample-application-python-pymysql.md new file mode 100644 index 00000000..1bc9e39c --- /dev/null +++ b/test/sync_pr_docs/data/markdown-pages/en/tidb/release-6.5/develop/dev-guide-sample-application-python-pymysql.md @@ -0,0 +1,287 @@ +--- +title: Build a Simple CRUD App with TiDB and PyMySQL +summary: Learn how to build a simple CRUD application with TiDB and PyMySQL. +--- + + + + +# Build a Simple CRUD App with TiDB and PyMySQL + +[PyMySQL](https://pypi.org/project/PyMySQL/) is a popular open-source driver for Python. + +This document describes how to use TiDB and PyMySQL to build a simple CRUD application. + +> **Note:** +> +> It is recommended to use Python 3.10 or a later Python version. + +## Step 1. Launch your TiDB cluster + + + +The following introduces how to start a TiDB cluster. + +**Use a TiDB Cloud Serverless Tier cluster** + +For detailed steps, see [Create a Serverless Tier cluster](/develop/dev-guide-build-cluster-in-cloud.md#step-1-create-a-serverless-tier-cluster). + +**Use a local cluster** + +For detailed steps, see [Deploy a local test cluster](/quick-start-with-tidb.md#deploy-a-local-test-cluster) or [Deploy a TiDB cluster using TiUP](/production-deployment-using-tiup.md). + + + + + +See [Create a Serverless Tier cluster](/develop/dev-guide-build-cluster-in-cloud.md#step-1-create-a-serverless-tier-cluster). + + + +## Step 2. Get the code + +```shell +git clone https://github.com/pingcap-inc/tidb-example-python.git +``` + +The following uses PyMySQL 1.0.2 as an example. Drivers for Python are more convenient to use than other languages, but they do not shield the underlying implementation and require manual management of transactions. If there are not a lot of scenarios where SQL is required, it is recommended to use ORM, which can help reduce the coupling of your program. + +```python +import uuid +from typing import List + +import pymysql.cursors +from pymysql import Connection +from pymysql.cursors import DictCursor + + +def get_connection(autocommit: bool = False) -> Connection: + return pymysql.connect(host='127.0.0.1', + port=4000, + user='root', + password='', + database='test', + cursorclass=DictCursor, + autocommit=autocommit) + + +def create_player(cursor: DictCursor, player: tuple) -> None: + cursor.execute("INSERT INTO player (id, coins, goods) VALUES (%s, %s, %s)", player) + + +def get_player(cursor: DictCursor, player_id: str) -> dict: + cursor.execute("SELECT id, coins, goods FROM player WHERE id = %s", (player_id,)) + return cursor.fetchone() + + +def get_players_with_limit(cursor: DictCursor, limit: int) -> tuple: + cursor.execute("SELECT id, coins, goods FROM player LIMIT %s", (limit,)) + return cursor.fetchall() + + +def random_player(amount: int) -> List[tuple]: + players = [] + for _ in range(amount): + players.append((uuid.uuid4(), 10000, 10000)) + + return players + + +def bulk_create_player(cursor: DictCursor, players: List[tuple]) -> None: + cursor.executemany("INSERT INTO player (id, coins, goods) VALUES (%s, %s, %s)", players) + + +def get_count(cursor: DictCursor) -> int: + cursor.execute("SELECT count(*) as count FROM player") + return cursor.fetchone()['count'] + + +def trade_check(cursor: DictCursor, sell_id: str, buy_id: str, amount: int, price: int) -> bool: + get_player_with_lock_sql = "SELECT coins, goods FROM player WHERE id = %s FOR UPDATE" + + # sell player goods check + cursor.execute(get_player_with_lock_sql, (sell_id,)) + seller = cursor.fetchone() + if seller['goods'] < amount: + print(f'sell player {sell_id} goods not enough') + return False + + # buy player coins check + cursor.execute(get_player_with_lock_sql, (buy_id,)) + buyer = cursor.fetchone() + if buyer['coins'] < price: + print(f'buy player {buy_id} coins not enough') + return False + + +def trade_update(cursor: DictCursor, sell_id: str, buy_id: str, amount: int, price: int) -> None: + update_player_sql = "UPDATE player set goods = goods + %s, coins = coins + %s WHERE id = %s" + + # deduct the goods of seller, and raise his/her the coins + cursor.execute(update_player_sql, (-amount, price, sell_id)) + # deduct the coins of buyer, and raise his/her the goods + cursor.execute(update_player_sql, (amount, -price, buy_id)) + + +def trade(connection: Connection, sell_id: str, buy_id: str, amount: int, price: int) -> None: + with connection.cursor() as cursor: + if trade_check(cursor, sell_id, buy_id, amount, price) is False: + connection.rollback() + return + + try: + trade_update(cursor, sell_id, buy_id, amount, price) + except Exception as err: + connection.rollback() + print(f'something went wrong: {err}') + else: + connection.commit() + print("trade success") + + +def simple_example() -> None: + with get_connection(autocommit=True) as connection: + with connection.cursor() as cur: + # create a player, who has a coin and a goods. + create_player(cur, ("test", 1, 1)) + + # get this player, and print it. + test_player = get_player(cur, "test") + print(test_player) + + # create players with bulk inserts. + # insert 1919 players totally, with 114 players per batch. + # each player has a random UUID + player_list = random_player(1919) + for idx in range(0, len(player_list), 114): + bulk_create_player(cur, player_list[idx:idx + 114]) + + # print the number of players + count = get_count(cur) + print(f'number of players: {count}') + + # print 3 players. + three_players = get_players_with_limit(cur, 3) + for player in three_players: + print(player) + + +def trade_example() -> None: + with get_connection(autocommit=False) as connection: + with connection.cursor() as cur: + # create two players + # player 1: id is "1", has only 100 coins. + # player 2: id is "2", has 114514 coins, and 20 goods. + create_player(cur, ("1", 100, 0)) + create_player(cur, ("2", 114514, 20)) + connection.commit() + + # player 1 wants to buy 10 goods from player 2. + # it will cost 500 coins, but player 1 cannot afford it. + # so this trade will fail, and nobody will lose their coins or goods + trade(connection, sell_id="2", buy_id="1", amount=10, price=500) + + # then player 1 has to reduce the incoming quantity to 2. + # this trade will be successful + trade(connection, sell_id="2", buy_id="1", amount=2, price=100) + + # let's take a look for player 1 and player 2 currently + with connection.cursor() as cur: + print(get_player(cur, "1")) + print(get_player(cur, "2")) + + +simple_example() +trade_example() +``` + +The driver has a lower level of encapsulation than ORM, so there are a lot of SQL statements in the program. Unlike ORM, there is no data object in drivers, so the `Player` queried by the driver is represented as a dictionary. + +For more information about how to use PyMySQL, refer to [PyMySQL documentation](https://pymysql.readthedocs.io/en/latest/). + +## Step 3. Run the code + +The following content introduces how to run the code step by step. + +### Step 3.1 Initialize table + +Before running the code, you need to initialize the table manually. If you are using a local TiDB cluster, you can run the following command: + + + +
+ +```shell +mysql --host 127.0.0.1 --port 4000 -u root < player_init.sql +``` + +
+ +
+ +```shell +mycli --host 127.0.0.1 --port 4000 -u root --no-warn < player_init.sql +``` + +
+ +
+ +If you are not using a local cluster, or have not installed a MySQL client, connect to your cluster using your preferred method (such as Navicat, DBeaver, or other GUI tools) and run the SQL statements in the `player_init.sql` file. + +### Step 3.2 Modify parameters for TiDB Cloud + +If you are using a TiDB Cloud Serverless Tier cluster, you need to provide your CA root path and replace `` in the following examples with your CA path. To get the CA root path on your system, refer to [Where is the CA root path on my system?](https://docs.pingcap.com/tidbcloud/secure-connections-to-serverless-tier-clusters#where-is-the-ca-root-path-on-my-system). + +If you are using a TiDB Cloud Serverless Tier cluster, change the `get_connection` function in `pymysql_example.py`: + +```python +def get_connection(autocommit: bool = False) -> Connection: + return pymysql.connect(host='127.0.0.1', + port=4000, + user='root', + password='', + database='test', + cursorclass=DictCursor, + autocommit=autocommit) +``` + +Suppose that the password you set is `123456`, and the connection parameters you get from the cluster details page are the following: + +- Endpoint: `xxx.tidbcloud.com` +- Port: `4000` +- User: `2aEp24QWEDLqRFs.root` + +In this case, you can modify the `get_connection` as follows: + +```python +def get_connection(autocommit: bool = False) -> Connection: + return pymysql.connect(host='xxx.tidbcloud.com', + port=4000, + user='2aEp24QWEDLqRFs.root', + password='123546', + database='test', + cursorclass=DictCursor, + autocommit=autocommit, + ssl_ca='', + ssl_verify_cert=True, + ssl_verify_identity=True) +``` + +### Step 3.3 Run the code + +Before running the code, use the following command to install dependencies: + +```bash +pip3 install -r requirement.txt +``` + +If you need to run the script multiple times, follow the [Table initialization](#step-31-initialize-table) section to initialize the table again before each run. + +```bash +python3 pymysql_example.py +``` + +## Step 4. Expected output + +[PyMySQL Expected Output](https://github.com/pingcap-inc/tidb-example-python/blob/main/Expected-Output.md#PyMySQL) \ No newline at end of file diff --git a/test/sync_pr_docs/data/markdown-pages/en/tidb/release-6.5/develop/dev-guide-sample-application-python-sqlalchemy.md b/test/sync_pr_docs/data/markdown-pages/en/tidb/release-6.5/develop/dev-guide-sample-application-python-sqlalchemy.md new file mode 100644 index 00000000..39a3f049 --- /dev/null +++ b/test/sync_pr_docs/data/markdown-pages/en/tidb/release-6.5/develop/dev-guide-sample-application-python-sqlalchemy.md @@ -0,0 +1,249 @@ +--- +title: Build a Simple CRUD App with TiDB and SQLAlchemy +summary: Learn how to build a simple CRUD application with TiDB and SQLAlchemy. +aliases: ['/tidb/v6.5/dev-guide-outdated-for-sqlalchemy'] +--- + + + + +# Build a Simple CRUD App with TiDB and SQLAlchemy + +[SQLAlchemy](https://www.sqlalchemy.org/) is a popular open-source ORM library for Python. + +This document describes how to use TiDB and SQLAlchemy to build a simple CRUD application. + +> **Note:** +> +> It is recommended to use Python 3.10 or a later Python version. + +## Step 1. Launch your TiDB cluster + + + +The following introduces how to start a TiDB cluster. + +**Use a TiDB Cloud Serverless Tier cluster** + +For detailed steps, see [Create a Serverless Tier cluster](/develop/dev-guide-build-cluster-in-cloud.md#step-1-create-a-serverless-tier-cluster). + +**Use a local cluster** + +For detailed steps, see [Deploy a local test cluster](/quick-start-with-tidb.md#deploy-a-local-test-cluster) or [Deploy a TiDB cluster using TiUP](/production-deployment-using-tiup.md). + + + + + +See [Create a Serverless Tier cluster](/develop/dev-guide-build-cluster-in-cloud.md#step-1-create-a-serverless-tier-cluster). + + + +## Step 2. Get the code + +```shell +git clone https://github.com/pingcap-inc/tidb-example-python.git +``` + +The following uses SQLAlchemy 1.44 as an example. + +```python +import uuid +from typing import List + +from sqlalchemy import create_engine, String, Column, Integer, select, func +from sqlalchemy.orm import declarative_base, sessionmaker + +engine = create_engine('mysql://root:@127.0.0.1:4000/test') +Base = declarative_base() +Base.metadata.create_all(engine) +Session = sessionmaker(bind=engine) + + +class Player(Base): + __tablename__ = "player" + + id = Column(String(36), primary_key=True) + coins = Column(Integer) + goods = Column(Integer) + + def __repr__(self): + return f'Player(id={self.id!r}, coins={self.coins!r}, goods={self.goods!r})' + + +def random_player(amount: int) -> List[Player]: + players = [] + for _ in range(amount): + players.append(Player(id=uuid.uuid4(), coins=10000, goods=10000)) + + return players + + +def simple_example() -> None: + with Session() as session: + # create a player, who has a coin and a goods. + session.add(Player(id="test", coins=1, goods=1)) + + # get this player, and print it. + get_test_stmt = select(Player).where(Player.id == "test") + for player in session.scalars(get_test_stmt): + print(player) + + # create players with bulk inserts. + # insert 1919 players totally, with 114 players per batch. + # each player has a random UUID + player_list = random_player(1919) + for idx in range(0, len(player_list), 114): + session.bulk_save_objects(player_list[idx:idx + 114]) + + # print the number of players + count = session.query(func.count(Player.id)).scalar() + print(f'number of players: {count}') + + # print 3 players. + three_players = session.query(Player).limit(3).all() + for player in three_players: + print(player) + + session.commit() + + +def trade_check(session: Session, sell_id: str, buy_id: str, amount: int, price: int) -> bool: + # sell player goods check + sell_player = session.query(Player.goods).filter(Player.id == sell_id).with_for_update().one() + if sell_player.goods < amount: + print(f'sell player {sell_id} goods not enough') + return False + + # buy player coins check + buy_player = session.query(Player.coins).filter(Player.id == buy_id).with_for_update().one() + if buy_player.coins < price: + print(f'buy player {buy_id} coins not enough') + return False + + +def trade(sell_id: str, buy_id: str, amount: int, price: int) -> None: + with Session() as session: + if trade_check(session, sell_id, buy_id, amount, price) is False: + return + + # deduct the goods of seller, and raise his/her the coins + session.query(Player).filter(Player.id == sell_id). \ + update({'goods': Player.goods - amount, 'coins': Player.coins + price}) + # deduct the coins of buyer, and raise his/her the goods + session.query(Player).filter(Player.id == buy_id). \ + update({'goods': Player.goods + amount, 'coins': Player.coins - price}) + + session.commit() + print("trade success") + + +def trade_example() -> None: + with Session() as session: + # create two players + # player 1: id is "1", has only 100 coins. + # player 2: id is "2", has 114514 coins, and 20 goods. + session.add(Player(id="1", coins=100, goods=0)) + session.add(Player(id="2", coins=114514, goods=20)) + session.commit() + + # player 1 wants to buy 10 goods from player 2. + # it will cost 500 coins, but player 1 cannot afford it. + # so this trade will fail, and nobody will lose their coins or goods + trade(sell_id="2", buy_id="1", amount=10, price=500) + + # then player 1 has to reduce the incoming quantity to 2. + # this trade will be successful + trade(sell_id="2", buy_id="1", amount=2, price=100) + + with Session() as session: + traders = session.query(Player).filter(Player.id.in_(("1", "2"))).all() + for player in traders: + print(player) + session.commit() + + +simple_example() +trade_example() +``` + +Compared with using drivers directly, SQLAlchemy provides an abstraction for the specific details of different databases when you create a database connection. In addition, SQLAlchemy encapsulates some operations such as session management and CRUD of basic objects, which greatly simplifies the code. + +The `Player` class is a mapping of a table to attributes in the application. Each attribute of `Player` corresponds to a field in the `player` table. To provide SQLAlchemy with more information, the attribute is defined as `id = Column(String(36), primary_key=True)` to indicate the field type and its additional attributes. For example, `id = Column(String(36), primary_key=True)` indicates that the `id` attribute is `String` type, the corresponding field in database is `VARCHAR` type, the length is `36`, and it is a primary key. + +For more information about how to use SQLAlchemy, refer to [SQLAlchemy documentation](https://www.sqlalchemy.org/). + +## Step 3. Run the code + +The following content introduces how to run the code step by step. + +### Step 3.1 Initialize table + +Before running the code, you need to initialize the table manually. If you are using a local TiDB cluster, you can run the following command: + + + +
+ +```shell +mysql --host 127.0.0.1 --port 4000 -u root < player_init.sql +``` + +
+ +
+ +```shell +mycli --host 127.0.0.1 --port 4000 -u root --no-warn < player_init.sql +``` + +
+ +
+ +If you are not using a local cluster, or have not installed a MySQL client, connect to your cluster using your preferred method (such as Navicat, DBeaver, or other GUI tools) and run the SQL statements in the `player_init.sql` file. + +### Step 3.2 Modify parameters for TiDB Cloud + +If you are using a TiDB Cloud Serverless Tier cluster, you need to provide your CA root path and replace `` in the following examples with your CA path. To get the CA root path on your system, refer to [Where is the CA root path on my system?](https://docs.pingcap.com/tidbcloud/secure-connections-to-serverless-tier-clusters#where-is-the-ca-root-path-on-my-system). + +If you are using a TiDB Cloud Serverless Tier cluster, modify the parameters of the `create_engine` function in `sqlalchemy_example.py`: + +```python +engine = create_engine('mysql://root:@127.0.0.1:4000/test') +``` + +Suppose that the password you set is `123456`, and the connection parameters you get from the cluster details page are the following: + +- Endpoint: `xxx.tidbcloud.com` +- Port: `4000` +- User: `2aEp24QWEDLqRFs.root` + +In this case, you can modify the `create_engine` as follows: + +```python +engine = create_engine('mysql://2aEp24QWEDLqRFs.root:123456@xxx.tidbcloud.com:4000/test', connect_args={ + "ssl_mode": "VERIFY_IDENTITY", + "ssl": { + "ca": "" + } +}) +``` + +### Step 3.3 Run the code + +Before running the code, use the following command to install dependencies: + +```bash +pip3 install -r requirement.txt +``` + +If you need to run the script multiple times, follow the [Table initialization](#step-31-initialize-table) section to initialize the table again before each run. + +```bash +python3 sqlalchemy_example.py +``` + +## Step 4. Expected output + +[SQLAlchemy Expected Output](https://github.com/pingcap-inc/tidb-example-python/blob/main/Expected-Output.md#SQLAlchemy) \ No newline at end of file diff --git a/test/sync_pr_docs/data/markdown-pages/en/tidb/release-6.5/develop/dev-guide-third-party-support.md b/test/sync_pr_docs/data/markdown-pages/en/tidb/release-6.5/develop/dev-guide-third-party-support.md new file mode 100644 index 00000000..ee073c87 --- /dev/null +++ b/test/sync_pr_docs/data/markdown-pages/en/tidb/release-6.5/develop/dev-guide-third-party-support.md @@ -0,0 +1,318 @@ +--- +title: Third-Party Tools Supported by TiDB +summary: Learn about third-party tools supported by TiDB. +--- + +# Third-Party Tools Supported by TiDB + +> **Note:** +> +> This document only lists common [third-party tools](https://en.wikipedia.org/wiki/Third-party_source) supported by TiDB. Some other third-party tools are not listed, not because they are not supported, but because PingCAP is not sure whether they use features that are incompatible with TiDB. + +TiDB is [highly compatible with the MySQL protocol](/mysql-compatibility.md), so most of the MySQL drivers, ORM frameworks, and other tools that adapt to MySQL are compatible with TiDB. This document focuses on these tools and their support levels for TiDB. + +## Support Level + +PingCAP works with the community and provides the following support levels for third-party tools: + +- **_Full_**: Indicates that TiDB is already compatible with most functionalities of the corresponding third-party tool, and maintains compatibility with its newer versions. PingCAP will periodically conduct compatibility tests with the latest version of the tool. +- **_Compatible_**: Indicates that because the corresponding third-party tool is adapted to MySQL and TiDB is highly compatible with the MySQL protocol, so TiDB can use most features of the tool. However, PingCAP has not completed a full test on all features of the tool, which might lead to some unexpected behaviors. + +> **Note:** +> +> Unless specified, support for [Application retry and error handling](/develop/dev-guide-transaction-troubleshoot.md#application-retry-and-error-handling) is not included for **Driver** or **ORM frameworks**. + +If you encounter problems when connecting to TiDB using the tools listed in this document, please submit an [issue](https://github.com/pingcap/tidb/issues/new?assignees=&labels=type%2Fquestion&template=general-question.md) on GitHub with details to promote support on this tool. + +## Driver + +
+ + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + +
LanguageDriverLatest tested versionSupport levelTiDB adapterTutorial
Clibmysqlclient8.0CompatibleN/AN/A
C#(.Net)MySQL Connector/NET8.0CompatibleN/AN/A
ODBCMySQL Connector/ODBC8.0CompatibleN/AN/A
GoGo-MySQL-Driverv1.6.0FullN/ABuild a Simple CRUD App with TiDB and Go-MySQL-Driver
JavaJDBC8.0Full + + Build a Simple CRUD App with TiDB and JDBC
JavaScriptmysqlv2.18.1CompatibleN/AN/A
PHPmysqlndPHP 5.4+CompatibleN/AN/A
PythonMySQL Connector/Python8.0CompatibleN/ABuild a Simple CRUD App with TiDB and MySQL Connector/Python
mysqlclient2.1.1CompatibleN/ABuild a Simple CRUD App with TiDB and mysqlclient
PyMySQL1.0.2CompatibleN/ABuild a Simple CRUD App with TiDB and PyMySQL
+ +## ORM + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + +
LanguageORM frameworkLatest tested versionSupport levelTiDB adapterTutorial
Gogormv1.23.5FullN/ABuild a Simple CRUD App with TiDB and GORM
beegov2.0.3FullN/AN/A
upper/dbv4.5.2FullN/AN/A
xormv1.3.1FullN/AN/A
entv0.11.0CompatibleN/AN/A
JavaHibernate6.1.0.FinalFullN/ABuild a Simple CRUD App with TiDB and Hibernate
MyBatisv3.5.10FullN/ABuild a Simple CRUD App with TiDB and Mybatis
Spring Data JPA2.7.2FullN/ABuild a Simple CRUD App with TiDB and Spring Boot
jOOQv3.16.7 (Open Source)FullN/AN/A
RubyActive Recordv7.0FullN/AN/A
JavaScript / TypeScriptsequelizev6.20.1CompatibleN/AN/A
Knex.jsv1.0.7CompatibleN/AN/A
Prisma Client3.15.1CompatibleN/AN/A
TypeORMv0.3.6CompatibleN/AN/A
PHPlaravelv9.1.10Compatiblelaravel-tidbN/A
PythonDjangov4.0.5Compatibledjango-tidbN/A
peeweev3.14.10CompatibleN/ABuild a Simple CRUD App with TiDB and peewee
SQLAlchemyv1.4.37CompatibleN/ABuild a Simple CRUD App with TiDB and SQLAlchemy
+ +## GUI + +| GUI | Latest tested version | Support level | Tutorial | +| - | - | - | - | +| [DBeaver](https://dbeaver.io/) | 22.1.0 | Compatible | N/A | +| [Navicat for MySQL](https://www.navicat.com/) | 16.0.14 | Compatible | N/A | +| [MySQL Workbench](https://www.mysql.com/products/workbench/) | 8.0 | Compatible | N/A | + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + +
IDEPluginSupport levelTutorial
DataGripN/ACompatibleN/A
IntelliJ IDEAN/ACompatibleN/A
Visual Studio CodeTiDECompatibleN/A
MySQLCompatibleN/A
diff --git a/test/sync_pr_docs/data/markdown-pages/en/tidb/release-6.5/tidb-cloud/tidb-cloud-guide-sample-application-java.md b/test/sync_pr_docs/data/markdown-pages/en/tidb/release-6.5/tidb-cloud/tidb-cloud-guide-sample-application-java.md new file mode 100644 index 00000000..8a296b98 --- /dev/null +++ b/test/sync_pr_docs/data/markdown-pages/en/tidb/release-6.5/tidb-cloud/tidb-cloud-guide-sample-application-java.md @@ -0,0 +1,1730 @@ +--- +title: Build a Simple CRUD App with TiDB and Java +summary: Learn how to build a simple CRUD application with TiDB and Java. +--- + + + + +# Build a Simple CRUD App with TiDB and Java + +This document describes how to use TiDB and Java to build a simple CRUD application. + +> **Note:** +> +> It is recommended to use Java 8 or a later Java version. +> +> If you want to use Spring Boot for application development, refer to [Build the TiDB app using Spring Boot](/develop/dev-guide-sample-application-java-spring-boot.md) + +## Step 1. Launch your TiDB cluster + + + +The following introduces how to start a TiDB cluster. + +**Use a TiDB Cloud Serverless Tier cluster** + +For detailed steps, see [Create a Serverless Tier cluster](/develop/dev-guide-build-cluster-in-cloud.md#step-1-create-a-serverless-tier-cluster). + +**Use a local cluster** + +For detailed steps, see [Deploy a local test cluster](/quick-start-with-tidb.md#deploy-a-local-test-cluster) or [Deploy a TiDB Cluster Using TiUP](/production-deployment-using-tiup.md). + + + + + +See [Create a Serverless Tier cluster](/develop/dev-guide-build-cluster-in-cloud.md#step-1-create-a-serverless-tier-cluster). + + + +## Step 2. Get the code + +```shell +git clone https://github.com/pingcap-inc/tidb-example-java.git +``` + + + +
+ +Compared with [Mybatis](https://mybatis.org/mybatis-3/index.html), the JDBC implementation might be not a best practice, because you need to write error handling logic manually and cannot reuse code easily, which makes your code slightly redundant. + +Mybatis is a popular open-source Java class persistence framework. The following uses [MyBatis Generator](https://mybatis.org/generator/quickstart.html) as a Maven plugin to generate the persistence layer code. + +Change to the `plain-java-mybatis` directory: + +```shell +cd plain-java-mybatis +``` + +The structure of this directory is as follows: + +``` +. +├── Makefile +├── pom.xml +└── src + └── main + ├── java + │   └── com + │   └── pingcap + │   ├── MybatisExample.java + │   ├── dao + │   │   └── PlayerDAO.java + │   └── model + │   ├── Player.java + │   ├── PlayerMapper.java + │   └── PlayerMapperEx.java + └── resources + ├── dbinit.sql + ├── log4j.properties + ├── mapper + │   ├── PlayerMapper.xml + │   └── PlayerMapperEx.xml + ├── mybatis-config.xml + └── mybatis-generator.xml +``` + +The automatically generated files are: + +- `src/main/java/com/pingcap/model/Player.java`: The `Player` entity class. +- `src/main/java/com/pingcap/model/PlayerMapper.java`: The interface of `PlayerMapper`. +- `src/main/resources/mapper/PlayerMapper.xml`: The XML mapping of `Player`. Mybatis uses this configuration to automatically generate the implementation class of the `PlayerMapper` interface. + +The strategy for generating these files is written in `mybatis-generator.xml`, which is the configuration file for [Mybatis Generator](https://mybatis.org/generator/quickstart.html). There are comments in the following configuration file to describe how to use it. + +```xml + + + + + + + + + + + + + + + + + + + + + + + + + + + + + +``` + +`mybatis-generator.xml` is included in `pom.xml` as the configuration of `mybatis-generator-maven-plugin`. + +```xml + + org.mybatis.generator + mybatis-generator-maven-plugin + 1.4.1 + + src/main/resources/mybatis-generator.xml + true + true + + + + + + mysql + mysql-connector-java + 5.1.49 + + + +``` + +Once included in the Maven plugin, you can delete the old generated files and make new ones using `mvn mybatis-generate`. Or you can use `make gen` to delete the old file and generate a new one at the same time. + +> **Note:** +> +> The property `configuration.overwrite` in `mybatis-generator.xml` only ensures that the generated Java code files are overwritten. But the XML mapping files are still written as appended. Therefore, it is recommended to delete the old file before Mybaits Generator generating a new one. + +`Player.java` is a data entity class file generated using Mybatis Generator, which is a mapping of database tables in the application. Each property of the `Player` class corresponds to a field in the `player` table. + +```java +package com.pingcap.model; + +public class Player { + private String id; + + private Integer coins; + + private Integer goods; + + public Player(String id, Integer coins, Integer goods) { + this.id = id; + this.coins = coins; + this.goods = goods; + } + + public Player() { + super(); + } + + public String getId() { + return id; + } + + public void setId(String id) { + this.id = id; + } + + public Integer getCoins() { + return coins; + } + + public void setCoins(Integer coins) { + this.coins = coins; + } + + public Integer getGoods() { + return goods; + } + + public void setGoods(Integer goods) { + this.goods = goods; + } +} +``` + +`PlayerMapper.java` is a mapping interface file generated using Mybatis Generator. This file only defines the interface, and the implementation classes of interface are automatically generated using XML or annotations. + +```java +package com.pingcap.model; + +import com.pingcap.model.Player; + +public interface PlayerMapper { + int deleteByPrimaryKey(String id); + + int insert(Player row); + + int insertSelective(Player row); + + Player selectByPrimaryKey(String id); + + int updateByPrimaryKeySelective(Player row); + + int updateByPrimaryKey(Player row); +} +``` + +`PlayerMapper.xml` is a mapping XML file generated using Mybatis Generator. Mybatis uses this to automatically generate the implementation class of the `PlayerMapper` interface. + +```xml + + + + + + + + + + + + id, coins, goods + + + + delete from player + where id = #{id,jdbcType=VARCHAR} + + + insert into player (id, coins, goods + ) + values (#{id,jdbcType=VARCHAR}, #{coins,jdbcType=INTEGER}, #{goods,jdbcType=INTEGER} + ) + + + insert into player + + + id, + + + coins, + + + goods, + + + + + #{id,jdbcType=VARCHAR}, + + + #{coins,jdbcType=INTEGER}, + + + #{goods,jdbcType=INTEGER}, + + + + + update player + + + coins = #{coins,jdbcType=INTEGER}, + + + goods = #{goods,jdbcType=INTEGER}, + + + where id = #{id,jdbcType=VARCHAR} + + + update player + set coins = #{coins,jdbcType=INTEGER}, + goods = #{goods,jdbcType=INTEGER} + where id = #{id,jdbcType=VARCHAR} + + +``` + +Since Mybatis Generator needs to generate the source code from the table definition, the table needs to be created first. To create the table, you can use `dbinit.sql`. + +```sql +USE test; +DROP TABLE IF EXISTS player; + +CREATE TABLE player ( + `id` VARCHAR(36), + `coins` INTEGER, + `goods` INTEGER, + PRIMARY KEY (`id`) +); +``` + +Split the interface `PlayerMapperEx` additionally to extend from `PlayerMapper` and write a matching `PlayerMapperEx.xml` file. Avoid changing `PlayerMapper.java` and `PlayerMapper.xml` directly. This is to avoid overwrite by Mybatis Generator. + +Define the added interface in `PlayerMapperEx.java`: + +```java +package com.pingcap.model; + +import java.util.List; + +public interface PlayerMapperEx extends PlayerMapper { + Player selectByPrimaryKeyWithLock(String id); + + List selectByLimit(Integer limit); + + Integer count(); +} +``` + +Define the mapping rules in `PlayerMapperEx.xml`: + +```xml + + + + + + + + + + + + id, coins, goods + + + + + + + + + +``` + +`PlayerDAO.java` is a class used to manage data, in which `DAO` means [Data Access Object](https://en.wikipedia.org/wiki/Data_access_object). The class defines a set of data manipulation methods for writing data. In it, Mybatis encapsulates a large number of operations such as object mapping and CRUD of basic objects, which greatly simplifies the code. + +```java +package com.pingcap.dao; + +import com.pingcap.model.Player; +import com.pingcap.model.PlayerMapperEx; +import org.apache.ibatis.session.SqlSession; +import org.apache.ibatis.session.SqlSessionFactory; + +import java.util.List; +import java.util.function.Function; + +public class PlayerDAO { + public static class NotEnoughException extends RuntimeException { + public NotEnoughException(String message) { + super(message); + } + } + + // Run SQL code in a way that automatically handles the + // transaction retry logic, so we don't have to duplicate it in + // various places. + public Object runTransaction(SqlSessionFactory sessionFactory, Function fn) { + Object resultObject = null; + SqlSession session = null; + + try { + // open a session with autoCommit is false + session = sessionFactory.openSession(false); + + // get player mapper + PlayerMapperEx playerMapperEx = session.getMapper(PlayerMapperEx.class); + + resultObject = fn.apply(playerMapperEx); + session.commit(); + System.out.println("APP: COMMIT;"); + } catch (Exception e) { + if (e instanceof NotEnoughException) { + System.out.printf("APP: ROLLBACK BY LOGIC; \n%s\n", e.getMessage()); + } else { + System.out.printf("APP: ROLLBACK BY ERROR; \n%s\n", e.getMessage()); + } + + if (session != null) { + session.rollback(); + } + } finally { + if (session != null) { + session.close(); + } + } + + return resultObject; + } + + public Function createPlayers(List players) { + return playerMapperEx -> { + Integer addedPlayerAmount = 0; + for (Player player: players) { + playerMapperEx.insert(player); + addedPlayerAmount ++; + } + System.out.printf("APP: createPlayers() --> %d\n", addedPlayerAmount); + return addedPlayerAmount; + }; + } + + public Function buyGoods(String sellId, String buyId, Integer amount, Integer price) { + return playerMapperEx -> { + Player sellPlayer = playerMapperEx.selectByPrimaryKeyWithLock(sellId); + Player buyPlayer = playerMapperEx.selectByPrimaryKeyWithLock(buyId); + + if (buyPlayer == null || sellPlayer == null) { + throw new NotEnoughException("sell or buy player not exist"); + } + + if (buyPlayer.getCoins() < price || sellPlayer.getGoods() < amount) { + throw new NotEnoughException("coins or goods not enough, rollback"); + } + + int affectRows = 0; + buyPlayer.setGoods(buyPlayer.getGoods() + amount); + buyPlayer.setCoins(buyPlayer.getCoins() - price); + affectRows += playerMapperEx.updateByPrimaryKey(buyPlayer); + + sellPlayer.setGoods(sellPlayer.getGoods() - amount); + sellPlayer.setCoins(sellPlayer.getCoins() + price); + affectRows += playerMapperEx.updateByPrimaryKey(sellPlayer); + + System.out.printf("APP: buyGoods --> sell: %s, buy: %s, amount: %d, price: %d\n", sellId, buyId, amount, price); + return affectRows; + }; + } + + public Function getPlayerByID(String id) { + return playerMapperEx -> playerMapperEx.selectByPrimaryKey(id); + } + + public Function printPlayers(Integer limit) { + return playerMapperEx -> { + List players = playerMapperEx.selectByLimit(limit); + + for (Player player: players) { + System.out.println("\n[printPlayers]:\n" + player); + } + return 0; + }; + } + + public Function countPlayers() { + return PlayerMapperEx::count; + } +} +``` + +`MybatisExample` is the main class of the `plain-java-mybatis` sample application. It defines the entry functions: + +```java +package com.pingcap; + +import com.pingcap.dao.PlayerDAO; +import com.pingcap.model.Player; +import org.apache.ibatis.io.Resources; +import org.apache.ibatis.session.SqlSessionFactory; +import org.apache.ibatis.session.SqlSessionFactoryBuilder; + +import java.io.IOException; +import java.io.InputStream; +import java.util.Arrays; +import java.util.Collections; + +public class MybatisExample { + public static void main( String[] args ) throws IOException { + // 1. Create a SqlSessionFactory based on our mybatis-config.xml configuration + // file, which defines how to connect to the database. + InputStream inputStream = Resources.getResourceAsStream("mybatis-config.xml"); + SqlSessionFactory sessionFactory = new SqlSessionFactoryBuilder().build(inputStream); + + // 2. And then, create DAO to manager your data + PlayerDAO playerDAO = new PlayerDAO(); + + // 3. Run some simple examples. + + // Create a player who has 1 coin and 1 goods. + playerDAO.runTransaction(sessionFactory, playerDAO.createPlayers( + Collections.singletonList(new Player("test", 1, 1)))); + + // Get a player. + Player testPlayer = (Player)playerDAO.runTransaction(sessionFactory, playerDAO.getPlayerByID("test")); + System.out.printf("PlayerDAO.getPlayer:\n => id: %s\n => coins: %s\n => goods: %s\n", + testPlayer.getId(), testPlayer.getCoins(), testPlayer.getGoods()); + + // Count players amount. + Integer count = (Integer)playerDAO.runTransaction(sessionFactory, playerDAO.countPlayers()); + System.out.printf("PlayerDAO.countPlayers:\n => %d total players\n", count); + + // Print 3 players. + playerDAO.runTransaction(sessionFactory, playerDAO.printPlayers(3)); + + // 4. Getting further. + + // Player 1: id is "1", has only 100 coins. + // Player 2: id is "2", has 114514 coins, and 20 goods. + Player player1 = new Player("1", 100, 0); + Player player2 = new Player("2", 114514, 20); + + // Create two players "by hand", using the INSERT statement on the backend. + int addedCount = (Integer)playerDAO.runTransaction(sessionFactory, + playerDAO.createPlayers(Arrays.asList(player1, player2))); + System.out.printf("PlayerDAO.createPlayers:\n => %d total inserted players\n", addedCount); + + // Player 1 wants to buy 10 goods from player 2. + // It will cost 500 coins, but player 1 cannot afford it. + System.out.println("\nPlayerDAO.buyGoods:\n => this trade will fail"); + Integer updatedCount = (Integer)playerDAO.runTransaction(sessionFactory, + playerDAO.buyGoods(player2.getId(), player1.getId(), 10, 500)); + System.out.printf("PlayerDAO.buyGoods:\n => %d total update players\n", updatedCount); + + // So player 1 has to reduce the incoming quantity to two. + System.out.println("\nPlayerDAO.buyGoods:\n => this trade will success"); + updatedCount = (Integer)playerDAO.runTransaction(sessionFactory, + playerDAO.buyGoods(player2.getId(), player1.getId(), 2, 100)); + System.out.printf("PlayerDAO.buyGoods:\n => %d total update players\n", updatedCount); + } +} +``` + + + +
+ +Compared with Hibernate, the JDBC implementation might be not a best practice, because you need to write error handling logic manually and cannot reuse code easily, which makes your code slightly redundant. + +Hibernate is a popular open-source Java ORM, and it supports TiDB dialect starting from `v6.0.0.Beta2`, which fits TiDB features well. The following instructions take `v6.0.0.Beta2` as an example. + +Change to the `plain-java-hibernate` directory: + +```shell +cd plain-java-hibernate +``` + +The structure of this directory is as follows: + +``` +. +├── Makefile +├── plain-java-hibernate.iml +├── pom.xml +└── src + └── main + ├── java + │ └── com + │ └── pingcap + │ └── HibernateExample.java + └── resources + └── hibernate.cfg.xml +``` + +`hibernate.cfg.xml` is the Hibernate configuration file: + +```xml + + + + + + + com.mysql.cj.jdbc.Driver + org.hibernate.dialect.TiDBDialect + jdbc:mysql://localhost:4000/test + root + + false + + + create-drop + + + true + true + + +``` + +`HibernateExample.java` is the main body of the `plain-java-hibernate`. Compared with JDBC, when using Hibernate, you only need to write the path of the configuration file, because Hibernate avoids differences in database creation between different databases. + +`PlayerDAO` is a class used to manage data, in which `DAO` means [Data Access Object](https://en.wikipedia.org/wiki/Data_access_object). The class defines a set of data manipulation methods for writing data. Compared with JDBC, Hibernate encapsulates a large number of operations such as object mapping and CRUD of basic objects, which greatly simplifies the code. + +`PlayerBean` is a data entity class that is a mapping for tables. Each property of a `PlayerBean` corresponds to a field in the `player` table. Compared with JDBC, `PlayerBean` in Hibernate adds annotations to indicate mapping relationships for more information. + +```java +package com.pingcap; + +import jakarta.persistence.Column; +import jakarta.persistence.Entity; +import jakarta.persistence.Id; +import jakarta.persistence.Table; +import org.hibernate.JDBCException; +import org.hibernate.Session; +import org.hibernate.SessionFactory; +import org.hibernate.Transaction; +import org.hibernate.cfg.Configuration; +import org.hibernate.query.NativeQuery; +import org.hibernate.query.Query; + +import java.util.Arrays; +import java.util.Collections; +import java.util.List; +import java.util.function.Function; + +@Entity +@Table(name = "player_hibernate") +class PlayerBean { + @Id + private String id; + @Column(name = "coins") + private Integer coins; + @Column(name = "goods") + private Integer goods; + + public PlayerBean() { + } + + public PlayerBean(String id, Integer coins, Integer goods) { + this.id = id; + this.coins = coins; + this.goods = goods; + } + + public String getId() { + return id; + } + + public void setId(String id) { + this.id = id; + } + + public Integer getCoins() { + return coins; + } + + public void setCoins(Integer coins) { + this.coins = coins; + } + + public Integer getGoods() { + return goods; + } + + public void setGoods(Integer goods) { + this.goods = goods; + } + + @Override + public String toString() { + return String.format(" %-8s => %10s\n %-8s => %10s\n %-8s => %10s\n", + "id", this.id, "coins", this.coins, "goods", this.goods); + } +} + +/** + * Main class for the basic Hibernate example. + **/ +public class HibernateExample +{ + public static class PlayerDAO { + public static class NotEnoughException extends RuntimeException { + public NotEnoughException(String message) { + super(message); + } + } + + // Run SQL code in a way that automatically handles the + // transaction retry logic so we don't have to duplicate it in + // various places. + public Object runTransaction(Session session, Function fn) { + Object resultObject = null; + + Transaction txn = session.beginTransaction(); + try { + resultObject = fn.apply(session); + txn.commit(); + System.out.println("APP: COMMIT;"); + } catch (JDBCException e) { + System.out.println("APP: ROLLBACK BY JDBC ERROR;"); + txn.rollback(); + } catch (NotEnoughException e) { + System.out.printf("APP: ROLLBACK BY LOGIC; %s", e.getMessage()); + txn.rollback(); + } + return resultObject; + } + + public Function createPlayers(List players) throws JDBCException { + return session -> { + Integer addedPlayerAmount = 0; + for (PlayerBean player: players) { + session.persist(player); + addedPlayerAmount ++; + } + System.out.printf("APP: createPlayers() --> %d\n", addedPlayerAmount); + return addedPlayerAmount; + }; + } + + public Function buyGoods(String sellId, String buyId, Integer amount, Integer price) throws JDBCException { + return session -> { + PlayerBean sellPlayer = session.get(PlayerBean.class, sellId); + PlayerBean buyPlayer = session.get(PlayerBean.class, buyId); + + if (buyPlayer == null || sellPlayer == null) { + throw new NotEnoughException("sell or buy player not exist"); + } + + if (buyPlayer.getCoins() < price || sellPlayer.getGoods() < amount) { + throw new NotEnoughException("coins or goods not enough, rollback"); + } + + buyPlayer.setGoods(buyPlayer.getGoods() + amount); + buyPlayer.setCoins(buyPlayer.getCoins() - price); + session.persist(buyPlayer); + + sellPlayer.setGoods(sellPlayer.getGoods() - amount); + sellPlayer.setCoins(sellPlayer.getCoins() + price); + session.persist(sellPlayer); + + System.out.printf("APP: buyGoods --> sell: %s, buy: %s, amount: %d, price: %d\n", sellId, buyId, amount, price); + return 0; + }; + } + + public Function getPlayerByID(String id) throws JDBCException { + return session -> session.get(PlayerBean.class, id); + } + + public Function printPlayers(Integer limit) throws JDBCException { + return session -> { + NativeQuery limitQuery = session.createNativeQuery("SELECT * FROM player_hibernate LIMIT :limit", PlayerBean.class); + limitQuery.setParameter("limit", limit); + List players = limitQuery.getResultList(); + + for (PlayerBean player: players) { + System.out.println("\n[printPlayers]:\n" + player); + } + return 0; + }; + } + + public Function countPlayers() throws JDBCException { + return session -> { + Query countQuery = session.createQuery("SELECT count(player_hibernate) FROM PlayerBean player_hibernate", Long.class); + return countQuery.getSingleResult(); + }; + } + } + + public static void main(String[] args) { + // 1. Create a SessionFactory based on our hibernate.cfg.xml configuration + // file, which defines how to connect to the database. + SessionFactory sessionFactory + = new Configuration() + .configure("hibernate.cfg.xml") + .addAnnotatedClass(PlayerBean.class) + .buildSessionFactory(); + + try (Session session = sessionFactory.openSession()) { + // 2. And then, create DAO to manager your data. + PlayerDAO playerDAO = new PlayerDAO(); + + // 3. Run some simple example. + + // Create a player who has 1 coin and 1 goods. + playerDAO.runTransaction(session, playerDAO.createPlayers(Collections.singletonList( + new PlayerBean("test", 1, 1)))); + + // Get a player. + PlayerBean testPlayer = (PlayerBean)playerDAO.runTransaction(session, playerDAO.getPlayerByID("test")); + System.out.printf("PlayerDAO.getPlayer:\n => id: %s\n => coins: %s\n => goods: %s\n", + testPlayer.getId(), testPlayer.getCoins(), testPlayer.getGoods()); + + // Count players amount. + Long count = (Long)playerDAO.runTransaction(session, playerDAO.countPlayers()); + System.out.printf("PlayerDAO.countPlayers:\n => %d total players\n", count); + + // Print 3 players. + playerDAO.runTransaction(session, playerDAO.printPlayers(3)); + + // 4. Getting further. + + // Player 1: id is "1", has only 100 coins. + // Player 2: id is "2", has 114514 coins, and 20 goods. + PlayerBean player1 = new PlayerBean("1", 100, 0); + PlayerBean player2 = new PlayerBean("2", 114514, 20); + + // Create two players "by hand", using the INSERT statement on the backend. + int addedCount = (Integer)playerDAO.runTransaction(session, + playerDAO.createPlayers(Arrays.asList(player1, player2))); + System.out.printf("PlayerDAO.createPlayers:\n => %d total inserted players\n", addedCount); + + // Player 1 wants to buy 10 goods from player 2. + // It will cost 500 coins, but player 1 can't afford it. + System.out.println("\nPlayerDAO.buyGoods:\n => this trade will fail"); + Integer updatedCount = (Integer)playerDAO.runTransaction(session, + playerDAO.buyGoods(player2.getId(), player1.getId(), 10, 500)); + System.out.printf("PlayerDAO.buyGoods:\n => %d total update players\n", updatedCount); + + // So player 1 have to reduce his incoming quantity to two. + System.out.println("\nPlayerDAO.buyGoods:\n => this trade will success"); + updatedCount = (Integer)playerDAO.runTransaction(session, + playerDAO.buyGoods(player2.getId(), player1.getId(), 2, 100)); + System.out.printf("PlayerDAO.buyGoods:\n => %d total update players\n", updatedCount); + } finally { + sessionFactory.close(); + } + } +} +``` + +
+ +
+ +Change to the `plain-java-jdbc` directory: + +```shell +cd plain-java-jdbc +``` + +The structure of this directory is as follows: + +``` +. +├── Makefile +├── plain-java-jdbc.iml +├── pom.xml +└── src + └── main + ├── java + │ └── com + │ └── pingcap + │ └── JDBCExample.java + └── resources + └── dbinit.sql +``` + +You can find initialization statements for the table creation in `dbinit.sql`: + +```sql +USE test; +DROP TABLE IF EXISTS player; + +CREATE TABLE player ( + `id` VARCHAR(36), + `coins` INTEGER, + `goods` INTEGER, + PRIMARY KEY (`id`) +); +``` + +`JDBCExample.java` is the main body of the `plain-java-jdbc`. TiDB is highly compatible with the MySQL protocol, so you need to initialize a MySQL source instance `MysqlDataSource` to connect to TiDB. Then, you can initialize `PlayerDAO` for object management and use it to read, edit, add, and delete data. + +`PlayerDAO` is a class used to manage data, in which `DAO` means [Data Access Object](https://en.wikipedia.org/wiki/Data_access_object). The class defines a set of data manipulation methods to provide the ability to write data. + +`PlayerBean` is a data entity class that is a mapping for tables. Each property of a `PlayerBean` corresponds to a field in the `player` table. + +```java +package com.pingcap; + +import com.mysql.cj.jdbc.MysqlDataSource; + +import java.sql.Connection; +import java.sql.PreparedStatement; +import java.sql.ResultSet; +import java.sql.SQLException; +import java.util.*; + +/** + * Main class for the basic JDBC example. + **/ +public class JDBCExample +{ + public static class PlayerBean { + private String id; + private Integer coins; + private Integer goods; + + public PlayerBean() { + } + + public PlayerBean(String id, Integer coins, Integer goods) { + this.id = id; + this.coins = coins; + this.goods = goods; + } + + public String getId() { + return id; + } + + public void setId(String id) { + this.id = id; + } + + public Integer getCoins() { + return coins; + } + + public void setCoins(Integer coins) { + this.coins = coins; + } + + public Integer getGoods() { + return goods; + } + + public void setGoods(Integer goods) { + this.goods = goods; + } + + @Override + public String toString() { + return String.format(" %-8s => %10s\n %-8s => %10s\n %-8s => %10s\n", + "id", this.id, "coins", this.coins, "goods", this.goods); + } + } + + /** + * Data access object used by 'ExampleDataSource'. + * Example for CURD and bulk insert. + */ + public static class PlayerDAO { + private final MysqlDataSource ds; + private final Random rand = new Random(); + + PlayerDAO(MysqlDataSource ds) { + this.ds = ds; + } + + /** + * Create players by passing in a List of PlayerBean. + * + * @param players Will create players list + * @return The number of create accounts + */ + public int createPlayers(List players){ + int rows = 0; + + Connection connection = null; + PreparedStatement preparedStatement = null; + try { + connection = ds.getConnection(); + preparedStatement = connection.prepareStatement("INSERT INTO player (id, coins, goods) VALUES (?, ?, ?)"); + } catch (SQLException e) { + System.out.printf("[createPlayers] ERROR: { state => %s, cause => %s, message => %s }\n", + e.getSQLState(), e.getCause(), e.getMessage()); + e.printStackTrace(); + + return -1; + } + + try { + for (PlayerBean player : players) { + preparedStatement.setString(1, player.getId()); + preparedStatement.setInt(2, player.getCoins()); + preparedStatement.setInt(3, player.getGoods()); + + preparedStatement.execute(); + rows += preparedStatement.getUpdateCount(); + } + } catch (SQLException e) { + System.out.printf("[createPlayers] ERROR: { state => %s, cause => %s, message => %s }\n", + e.getSQLState(), e.getCause(), e.getMessage()); + e.printStackTrace(); + } finally { + try { + connection.close(); + } catch (SQLException e) { + e.printStackTrace(); + } + } + + System.out.printf("\n[createPlayers]:\n '%s'\n", preparedStatement); + return rows; + } + + /** + * Buy goods and transfer funds between one player and another in one transaction. + * @param sellId Sell player id. + * @param buyId Buy player id. + * @param amount Goods amount, if sell player has not enough goods, the trade will break. + * @param price Price should pay, if buy player has not enough coins, the trade will break. + * + * @return The number of effected players. + */ + public int buyGoods(String sellId, String buyId, Integer amount, Integer price) { + int effectPlayers = 0; + + Connection connection = null; + try { + connection = ds.getConnection(); + } catch (SQLException e) { + System.out.printf("[buyGoods] ERROR: { state => %s, cause => %s, message => %s }\n", + e.getSQLState(), e.getCause(), e.getMessage()); + e.printStackTrace(); + return effectPlayers; + } + + try { + connection.setAutoCommit(false); + + PreparedStatement playerQuery = connection.prepareStatement("SELECT * FROM player WHERE id=? OR id=? FOR UPDATE"); + playerQuery.setString(1, sellId); + playerQuery.setString(2, buyId); + playerQuery.execute(); + + PlayerBean sellPlayer = null; + PlayerBean buyPlayer = null; + + ResultSet playerQueryResultSet = playerQuery.getResultSet(); + while (playerQueryResultSet.next()) { + PlayerBean player = new PlayerBean( + playerQueryResultSet.getString("id"), + playerQueryResultSet.getInt("coins"), + playerQueryResultSet.getInt("goods") + ); + + System.out.println("\n[buyGoods]:\n 'check goods and coins enough'"); + System.out.println(player); + + if (sellId.equals(player.getId())) { + sellPlayer = player; + } else { + buyPlayer = player; + } + } + + if (sellPlayer == null || buyPlayer == null) { + throw new SQLException("player not exist."); + } + + if (sellPlayer.getGoods().compareTo(amount) < 0) { + throw new SQLException(String.format("sell player %s goods not enough.", sellId)); + } + + if (buyPlayer.getCoins().compareTo(price) < 0) { + throw new SQLException(String.format("buy player %s coins not enough.", buyId)); + } + + PreparedStatement transfer = connection.prepareStatement("UPDATE player set goods = goods + ?, coins = coins + ? WHERE id=?"); + transfer.setInt(1, -amount); + transfer.setInt(2, price); + transfer.setString(3, sellId); + transfer.execute(); + effectPlayers += transfer.getUpdateCount(); + + transfer.setInt(1, amount); + transfer.setInt(2, -price); + transfer.setString(3, buyId); + transfer.execute(); + effectPlayers += transfer.getUpdateCount(); + + connection.commit(); + + System.out.println("\n[buyGoods]:\n 'trade success'"); + } catch (SQLException e) { + System.out.printf("[buyGoods] ERROR: { state => %s, cause => %s, message => %s }\n", + e.getSQLState(), e.getCause(), e.getMessage()); + + try { + System.out.println("[buyGoods] Rollback"); + + connection.rollback(); + } catch (SQLException ex) { + // do nothing + } + } finally { + try { + connection.close(); + } catch (SQLException e) { + // do nothing + } + } + + return effectPlayers; + } + + /** + * Get the player info by id. + * + * @param id Player id. + * @return The player of this id. + */ + public PlayerBean getPlayer(String id) { + PlayerBean player = null; + + try (Connection connection = ds.getConnection()) { + PreparedStatement preparedStatement = connection.prepareStatement("SELECT * FROM player WHERE id = ?"); + preparedStatement.setString(1, id); + preparedStatement.execute(); + + ResultSet res = preparedStatement.executeQuery(); + if(!res.next()) { + System.out.printf("No players in the table with id %s", id); + } else { + player = new PlayerBean(res.getString("id"), res.getInt("coins"), res.getInt("goods")); + } + } catch (SQLException e) { + System.out.printf("PlayerDAO.getPlayer ERROR: { state => %s, cause => %s, message => %s }\n", + e.getSQLState(), e.getCause(), e.getMessage()); + } + + return player; + } + + /** + * Insert randomized account data (id, coins, goods) using the JDBC fast path for + * bulk inserts. The fastest way to get data into TiDB is using the + * TiDB Lightning(https://docs.pingcap.com/tidb/stable/tidb-lightning-overview). + * However, if you must bulk insert from the application using INSERT SQL, the best + * option is the method shown here. It will require the following: + * + * Add `rewriteBatchedStatements=true` to your JDBC connection settings. + * Setting rewriteBatchedStatements to true now causes CallableStatements + * with batched arguments to be re-written in the form "CALL (...); CALL (...); ..." + * to send the batch in as few client/server round trips as possible. + * https://dev.mysql.com/doc/relnotes/connector-j/5.1/en/news-5-1-3.html + * + * You can see the `rewriteBatchedStatements` param effect logic at + * implement function: `com.mysql.cj.jdbc.StatementImpl.executeBatchUsingMultiQueries` + * + * @param total Add players amount. + * @param batchSize Bulk insert size for per batch. + * + * @return The number of new accounts inserted. + */ + public int bulkInsertRandomPlayers(Integer total, Integer batchSize) { + int totalNewPlayers = 0; + + try (Connection connection = ds.getConnection()) { + // We're managing the commit lifecycle ourselves, so we can + // control the size of our batch inserts. + connection.setAutoCommit(false); + + // In this example we are adding 500 rows to the database, + // but it could be any number. What's important is that + // the batch size is 128. + try (PreparedStatement pstmt = connection.prepareStatement("INSERT INTO player (id, coins, goods) VALUES (?, ?, ?)")) { + for (int i=0; i<=(total/batchSize);i++) { + for (int j=0; j %s row(s) updated in this batch\n", count.length); + } + connection.commit(); + } catch (SQLException e) { + System.out.printf("PlayerDAO.bulkInsertRandomPlayers ERROR: { state => %s, cause => %s, message => %s }\n", + e.getSQLState(), e.getCause(), e.getMessage()); + } + } catch (SQLException e) { + System.out.printf("PlayerDAO.bulkInsertRandomPlayers ERROR: { state => %s, cause => %s, message => %s }\n", + e.getSQLState(), e.getCause(), e.getMessage()); + } + return totalNewPlayers; + } + + + /** + * Print a subset of players from the data store by limit. + * + * @param limit Print max size. + */ + public void printPlayers(Integer limit) { + try (Connection connection = ds.getConnection()) { + PreparedStatement preparedStatement = connection.prepareStatement("SELECT * FROM player LIMIT ?"); + preparedStatement.setInt(1, limit); + preparedStatement.execute(); + + ResultSet res = preparedStatement.executeQuery(); + while (!res.next()) { + PlayerBean player = new PlayerBean(res.getString("id"), + res.getInt("coins"), res.getInt("goods")); + System.out.println("\n[printPlayers]:\n" + player); + } + } catch (SQLException e) { + System.out.printf("PlayerDAO.printPlayers ERROR: { state => %s, cause => %s, message => %s }\n", + e.getSQLState(), e.getCause(), e.getMessage()); + } + } + + + /** + * Count players from the data store. + * + * @return All players count + */ + public int countPlayers() { + int count = 0; + + try (Connection connection = ds.getConnection()) { + PreparedStatement preparedStatement = connection.prepareStatement("SELECT count(*) FROM player"); + preparedStatement.execute(); + + ResultSet res = preparedStatement.executeQuery(); + if(res.next()) { + count = res.getInt(1); + } + } catch (SQLException e) { + System.out.printf("PlayerDAO.countPlayers ERROR: { state => %s, cause => %s, message => %s }\n", + e.getSQLState(), e.getCause(), e.getMessage()); + } + + return count; + } + } + + public static void main(String[] args) { + // 1. Configure the example database connection. + + // 1.1 Create a mysql data source instance. + MysqlDataSource mysqlDataSource = new MysqlDataSource(); + + // 1.2 Set server name, port, database name, username and password. + mysqlDataSource.setServerName("localhost"); + mysqlDataSource.setPortNumber(4000); + mysqlDataSource.setDatabaseName("test"); + mysqlDataSource.setUser("root"); + mysqlDataSource.setPassword(""); + + // Or you can use jdbc string instead. + // mysqlDataSource.setURL("jdbc:mysql://{host}:{port}/test?user={user}&password={password}"); + + // 2. And then, create DAO to manager your data. + PlayerDAO dao = new PlayerDAO(mysqlDataSource); + + // 3. Run some simple example. + + // Create a player, has a coin and a goods. + dao.createPlayers(Collections.singletonList(new PlayerBean("test", 1, 1))); + + // Get a player. + PlayerBean testPlayer = dao.getPlayer("test"); + System.out.printf("PlayerDAO.getPlayer:\n => id: %s\n => coins: %s\n => goods: %s\n", + testPlayer.getId(), testPlayer.getCoins(), testPlayer.getGoods()); + + // Create players with bulk inserts, insert 1919 players totally, and per batch for 114 players. + int addedCount = dao.bulkInsertRandomPlayers(1919, 114); + System.out.printf("PlayerDAO.bulkInsertRandomPlayers:\n => %d total inserted players\n", addedCount); + + // Count players amount. + int count = dao.countPlayers(); + System.out.printf("PlayerDAO.countPlayers:\n => %d total players\n", count); + + // Print 3 players. + dao.printPlayers(3); + + // 4. Getting further. + + // Player 1: id is "1", has only 100 coins. + // Player 2: id is "2", has 114514 coins, and 20 goods. + PlayerBean player1 = new PlayerBean("1", 100, 0); + PlayerBean player2 = new PlayerBean("2", 114514, 20); + + // Create two players "by hand", using the INSERT statement on the backend. + addedCount = dao.createPlayers(Arrays.asList(player1, player2)); + System.out.printf("PlayerDAO.createPlayers:\n => %d total inserted players\n", addedCount); + + // Player 1 wants to buy 10 goods from player 2. + // It will cost 500 coins, but player 1 can't afford it. + System.out.println("\nPlayerDAO.buyGoods:\n => this trade will fail"); + int updatedCount = dao.buyGoods(player2.getId(), player1.getId(), 10, 500); + System.out.printf("PlayerDAO.buyGoods:\n => %d total update players\n", updatedCount); + + // So player 1 have to reduce his incoming quantity to two. + System.out.println("\nPlayerDAO.buyGoods:\n => this trade will success"); + updatedCount = dao.buyGoods(player2.getId(), player1.getId(), 2, 100); + System.out.printf("PlayerDAO.buyGoods:\n => %d total update players\n", updatedCount); + } +} +``` + +
+ + + +## Step 3. Run the code + +The following content introduces how to run the code step by step. + +### Step 3.1 Table initialization + + + +
+ +When using Mybatis, you need to initialize the database tables manually. If you are using a local cluster, and MySQL client has been installed locally, you can run it directly in the `plain-java-mybatis` directory: + +```shell +make prepare +``` + +Or you can execute the following command: + +```shell +mysql --host 127.0.0.1 --port 4000 -u root < src/main/resources/dbinit.sql +``` + +If you are using a non-local cluster or MySQL client has not been installed, connect to your cluster and run the statement in the `src/main/resources/dbinit.sql` file. + +
+ +
+ +No need to initialize tables manually. + +
+ +
+ + + +When using JDBC, you need to initialize the database tables manually. If you are using a local cluster, and MySQL client has been installed locally, you can run it directly in the `plain-java-jdbc` directory: + +```shell +make mysql +``` + +Or you can execute the following command: + +```shell +mysql --host 127.0.0.1 --port 4000 -u root + + + +When using JDBC, you need to connect to your cluster and run the statement in the `src/main/resources/dbinit.sql` file to initialize the database tables manually. + + + +
+ +
+ +### Step 3.2 Modify parameters for TiDB Cloud + + + +
+ +If you are using a TiDB Cloud Serverless Tier cluster, modify the `dataSource.url`, `dataSource.username`, `dataSource.password` in `mybatis-config.xml`. + +```xml + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + +``` + +Suppose that the password you set is `123456`, and the connection parameters you get from the cluster details page are the following: + +- Endpoint: `xxx.tidbcloud.com` +- Port: `4000` +- User: `2aEp24QWEDLqRFs.root` + +In this case, you can modify the parameters in `dataSource` node as follows: + +```xml + + + + + ... + + + + + + + + ... + + +``` + +
+ +
+ +If you are using a TiDB Cloud Serverless Tier cluster, modify the `hibernate.connection.url`, `hibernate.connection.username`, `hibernate.connection.password` in `hibernate.cfg.xml`. + +```xml + + + + + + + com.mysql.cj.jdbc.Driver + org.hibernate.dialect.TiDBDialect + jdbc:mysql://localhost:4000/test + root + + false + + + create-drop + + + true + true + + +``` + +Suppose that the password you set is `123456`, and the connection parameters you get from the cluster details page are the following: + +- Endpoint: `xxx.tidbcloud.com` +- Port: `4000` +- User: `2aEp24QWEDLqRFs.root` + +In this case, you can modify the parameters as follows: + +```xml + + + + + + + com.mysql.cj.jdbc.Driver + org.hibernate.dialect.TiDBDialect + jdbc:mysql://xxx.tidbcloud.com:4000/test?sslMode=VERIFY_IDENTITY&enabledTLSProtocols=TLSv1.2,TLSv1.3 + 2aEp24QWEDLqRFs.root + 123456 + false + + + create-drop + + + true + true + + +``` + +
+ +
+ +If you are using a TiDB Cloud Serverless Tier cluster, modify the parameters of the host, port, user, and password in `JDBCExample.java`: + +```java +mysqlDataSource.setServerName("localhost"); +mysqlDataSource.setPortNumber(4000); +mysqlDataSource.setDatabaseName("test"); +mysqlDataSource.setUser("root"); +mysqlDataSource.setPassword(""); +``` + +Suppose that the password you set is `123456`, and the connection parameters you get from the cluster details page are the following: + +- Endpoint: `xxx.tidbcloud.com` +- Port: `4000` +- User: `2aEp24QWEDLqRFs.root` + +In this case, you can modify the parameters as follows: + +```java +mysqlDataSource.setServerName("xxx.tidbcloud.com"); +mysqlDataSource.setPortNumber(4000); +mysqlDataSource.setDatabaseName("test"); +mysqlDataSource.setUser("2aEp24QWEDLqRFs.root"); +mysqlDataSource.setPassword("123456"); +mysqlDataSource.setSslMode(PropertyDefinitions.SslMode.VERIFY_IDENTITY.name()); +mysqlDataSource.setEnabledTLSProtocols("TLSv1.2,TLSv1.3"); +``` + +
+ +
+ +### Step 3.3 Run + + + +
+ +To run the code, you can run `make prepare`, `make gen`, `make build` and `make run` respectively: + +```shell +make prepare +# this command executes : +# - `mysql --host 127.0.0.1 --port 4000 -u root < src/main/resources/dbinit.sql` +# - `mysql --host 127.0.0.1 --port 4000 -u root -e "TRUNCATE test.player"` + +make gen +# this command executes : +# - `rm -f src/main/java/com/pingcap/model/Player.java` +# - `rm -f src/main/java/com/pingcap/model/PlayerMapper.java` +# - `rm -f src/main/resources/mapper/PlayerMapper.xml` +# - `mvn mybatis-generator:generate` + +make build # this command executes `mvn clean package` +make run # this command executes `java -jar target/plain-java-mybatis-0.0.1-jar-with-dependencies.jar` +``` + +Or you can use the native commands: + +```shell +mysql --host 127.0.0.1 --port 4000 -u root < src/main/resources/dbinit.sql +mysql --host 127.0.0.1 --port 4000 -u root -e "TRUNCATE test.player" +rm -f src/main/java/com/pingcap/model/Player.java +rm -f src/main/java/com/pingcap/model/PlayerMapper.java +rm -f src/main/resources/mapper/PlayerMapper.xml +mvn mybatis-generator:generate +mvn clean package +java -jar target/plain-java-mybatis-0.0.1-jar-with-dependencies.jar +``` + +Or run the `make` command directly, which is a combination of `make prepare`, `make gen`, `make build` and `make run`. + +
+ +
+ +To run the code, you can run `make build` and `make run` respectively: + +```shell +make build # this command executes `mvn clean package` +make run # this command executes `java -jar target/plain-java-jdbc-0.0.1-jar-with-dependencies.jar` +``` + +Or you can use the native commands: + +```shell +mvn clean package +java -jar target/plain-java-jdbc-0.0.1-jar-with-dependencies.jar +``` + +Or run the `make` command directly, which is a combination of `make build` and `make run`. + +
+ +
+ +To run the code, you can run `make build` and `make run` respectively: + +```shell +make build # this command executes `mvn clean package` +make run # this command executes `java -jar target/plain-java-jdbc-0.0.1-jar-with-dependencies.jar` +``` + +Or you can use the native commands: + +```shell +mvn clean package +java -jar target/plain-java-jdbc-0.0.1-jar-with-dependencies.jar +``` + +Or run the `make` command directly, which is a combination of `make build` and `make run`. + +
+ +
+ +## Step 4. Expected output + + + +
+ +[Mybatis Expected Output](https://github.com/pingcap-inc/tidb-example-java/blob/main/Expected-Output.md#plain-java-mybatis) + +
+ +
+ +[Hibernate Expected Output](https://github.com/pingcap-inc/tidb-example-java/blob/main/Expected-Output.md#plain-java-hibernate) + +
+ +
+ +[JDBC Expected Output](https://github.com/pingcap-inc/tidb-example-java/blob/main/Expected-Output.md#plain-java-jdbc) + +
+ +
diff --git a/test/sync_pr_docs_cn/data/markdown-pages/zh/tidb/master/TOC.md b/test/sync_pr_docs_cn/data/markdown-pages/zh/tidb/master/TOC.md new file mode 100644 index 00000000..f4091c4f --- /dev/null +++ b/test/sync_pr_docs_cn/data/markdown-pages/zh/tidb/master/TOC.md @@ -0,0 +1,1188 @@ + + + +- [文档中心](https://docs.pingcap.com/zh) +- 关于 TiDB + - [TiDB 简介](/overview.md) + - [TiDB 7.2 Release Notes](/releases/release-7.2.0.md) + - [功能概览](/basic-features.md) + - [与 MySQL 的兼容性](/mysql-compatibility.md) + - [使用限制](/tidb-limitations.md) + - [荣誉列表](/credits.md) + - [路线图](/tidb-roadmap.md) +- 快速上手 + - [快速上手 TiDB](/quick-start-with-tidb.md) + - [快速上手 HTAP](/quick-start-with-htap.md) + - [SQL 基本操作](/basic-sql-operations.md) + - [深入探索 HTAP](/explore-htap.md) +- 应用开发 + - [概览](/develop/dev-guide-overview.md) + - 快速开始 + - [使用 TiDB Serverless 构建 TiDB 集群](/develop/dev-guide-build-cluster-in-cloud.md) + - [使用 TiDB 的增删改查 SQL](/develop/dev-guide-tidb-crud-sql.md) + - 示例程序 + - Java + - [JDBC](/develop/dev-guide-sample-application-java-jdbc.md) + - [MyBatis](/develop/dev-guide-sample-application-java-mybatis.md) + - [Hibernate](/develop/dev-guide-sample-application-java-hibernate.md) + - [Spring Boot](/develop/dev-guide-sample-application-java-spring-boot.md) + - Go + - [Go-MySQL-Driver](/develop/dev-guide-sample-application-golang-sql-driver.md) + - [GORM](/develop/dev-guide-sample-application-golang-gorm.md) + - Python + - [mysqlclient](/develop/dev-guide-sample-application-python-mysqlclient.md) + - [MySQL Connector/Python](/develop/dev-guide-sample-application-python-mysql-connector.md) + - [PyMySQL](/develop/dev-guide-sample-application-python-pymysql.md) + - [SQLAlchemy](/develop/dev-guide-sample-application-python-sqlalchemy.md) + - [peewee](/develop/dev-guide-sample-application-python-peewee.md) + - [Django](/develop/dev-guide-sample-application-python-django.md) + - 连接到 TiDB + - [选择驱动或 ORM 框架](/develop/dev-guide-choose-driver-or-orm.md) + - [连接到 TiDB](/develop/dev-guide-connect-to-tidb.md) + - [连接池与连接参数](/develop/dev-guide-connection-parameters.md) + - 数据库模式设计 + - [概览](/develop/dev-guide-schema-design-overview.md) + - [创建数据库](/develop/dev-guide-create-database.md) + - [创建表](/develop/dev-guide-create-table.md) + - [创建二级索引](/develop/dev-guide-create-secondary-indexes.md) + - 数据写入 + - [插入数据](/develop/dev-guide-insert-data.md) + - [更新数据](/develop/dev-guide-update-data.md) + - [删除数据](/develop/dev-guide-delete-data.md) + - [使用 TTL (Time to Live) 定期删除过期数据](/time-to-live.md) + - [预处理语句](/develop/dev-guide-prepared-statement.md) + - 数据读取 + - [单表读取](/develop/dev-guide-get-data-from-single-table.md) + - [多表连接查询](/develop/dev-guide-join-tables.md) + - [子查询](/develop/dev-guide-use-subqueries.md) + - [查询结果分页](/develop/dev-guide-paginate-results.md) + - [视图](/develop/dev-guide-use-views.md) + - [临时表](/develop/dev-guide-use-temporary-tables.md) + - [公共表表达式](/develop/dev-guide-use-common-table-expression.md) + - 读取副本数据 + - [Follower Read](/develop/dev-guide-use-follower-read.md) + - [Stale Read](/develop/dev-guide-use-stale-read.md) + - [HTAP 查询](/develop/dev-guide-hybrid-oltp-and-olap-queries.md) + - 事务 + - [概览](/develop/dev-guide-transaction-overview.md) + - [乐观事务和悲观事务](/develop/dev-guide-optimistic-and-pessimistic-transaction.md) + - [事务限制](/develop/dev-guide-transaction-restraints.md) + - [事务错误处理](/develop/dev-guide-transaction-troubleshoot.md) + - 优化 SQL 性能 + - [概览](/develop/dev-guide-optimize-sql-overview.md) + - [SQL 性能调优](/develop/dev-guide-optimize-sql.md) + - [性能调优最佳实践](/develop/dev-guide-optimize-sql-best-practices.md) + - [索引的最佳实践](/develop/dev-guide-index-best-practice.md) + - 其他优化 + - [避免隐式类型转换](/develop/dev-guide-implicit-type-conversion.md) + - [唯一序列号生成方案](/develop/dev-guide-unique-serial-number-generation.md) + - 故障诊断 + - [SQL 或事务问题](/develop/dev-guide-troubleshoot-overview.md) + - [结果集不稳定](/develop/dev-guide-unstable-result-set.md) + - [超时](/develop/dev-guide-timeouts-in-tidb.md) + - 引用文档 + - [Bookshop 示例应用](/develop/dev-guide-bookshop-schema-design.md) + - 规范 + - [命名规范](/develop/dev-guide-object-naming-guidelines.md) + - [SQL 开发规范](/develop/dev-guide-sql-development-specification.md) + - 云原生开发环境 + - [Gitpod](/develop/dev-guide-playground-gitpod.md) + - 第三方工具支持 + - [TiDB 支持的第三方工具](/develop/dev-guide-third-party-support.md) + - [已知的第三方工具兼容问题](/develop/dev-guide-third-party-tools-compatibility.md) + - [TiDB 与 ProxySQL 集成](/develop/dev-guide-proxysql-integration.md) +- 部署标准集群 + - [软硬件环境需求](/hardware-and-software-requirements.md) + - [环境与系统配置检查](/check-before-deployment.md) + - 规划集群拓扑 + - [最小部署拓扑结构](/minimal-deployment-topology.md) + - [TiFlash 部署拓扑](/tiflash-deployment-topology.md) + - [TiCDC 部署拓扑](/ticdc-deployment-topology.md) + - [TiDB Binlog 部署拓扑](/tidb-binlog-deployment-topology.md) + - [TiSpark 部署拓扑](/tispark-deployment-topology.md) + - [跨机房部署拓扑结构](/geo-distributed-deployment-topology.md) + - [混合部署拓扑结构](/hybrid-deployment-topology.md) + - 安装与启动 + - [使用 TiUP 部署](/production-deployment-using-tiup.md) + - [在 Kubernetes 上部署](/tidb-in-kubernetes.md) + - [验证集群状态](/post-installation-check.md) + - 测试集群性能 + - [用 Sysbench 测试 TiDB](/benchmark/benchmark-tidb-using-sysbench.md) + - [对 TiDB 进行 TPC-C 测试](/benchmark/benchmark-tidb-using-tpcc.md) + - [对 TiDB 进行 CH-benCHmark 测试](/benchmark/benchmark-tidb-using-ch.md) +- 数据迁移 + - [数据迁移概述](/migration-overview.md) + - [数据迁移工具](/migration-tools.md) + - [数据导入最佳实践](/tidb-lightning/data-import-best-practices.md) + - 数据迁移场景 + - [从 Aurora 迁移数据到 TiDB](/migrate-aurora-to-tidb.md) + - [从小数据量 MySQL 迁移数据到 TiDB](/migrate-small-mysql-to-tidb.md) + - [从大数据量 MySQL 迁移数据到 TiDB](/migrate-large-mysql-to-tidb.md) + - [从小数据量分库分表 MySQL 合并迁移数据到 TiDB](/migrate-small-mysql-shards-to-tidb.md) + - [从大数据量分库分表 MySQL 合并迁移数据到 TiDB](/migrate-large-mysql-shards-to-tidb.md) + - [从 CSV 文件迁移数据到 TiDB](/migrate-from-csv-files-to-tidb.md) + - [从 SQL 文件迁移数据到 TiDB](/migrate-from-sql-files-to-tidb.md) + - [从 Parquet 文件迁移数据到 TiDB](/migrate-from-parquet-files-to-tidb.md) + - [从 TiDB 集群迁移数据至另一 TiDB 集群](/migrate-from-tidb-to-tidb.md) + - [从 TiDB 集群迁移数据至兼容 MySQL 的数据库](/migrate-from-tidb-to-mysql.md) + - 复杂迁移场景 + - [上游使用 pt/gh-ost 工具的持续同步场景](/migrate-with-pt-ghost.md) + - [下游存在更多列的迁移场景](/migrate-with-more-columns-downstream.md) + - [如何根据类型或 DDL 内容过滤 binlog 事件](/filter-binlog-event.md) + - [如何通过 SQL 表达式过滤 DML binlog 事件](/filter-dml-event.md) +- 数据集成 + - [数据集成概述](/integration-overview.md) + - 数据集成场景 + - [与 Confluent Cloud 和 Snowflake 进行数据集成](/ticdc/integrate-confluent-using-ticdc.md) + - [与 Apache Kafka 和 Apache Flink 进行数据集成](/replicate-data-to-kafka.md) +- 运维操作 + - 升级 TiDB 版本 + - [使用 TiUP 升级](/upgrade-tidb-using-tiup.md) + - [使用 TiDB Operator](https://docs.pingcap.com/zh/tidb-in-kubernetes/stable/upgrade-a-tidb-cluster) + - [平滑升级 TiDB](/smooth-upgrade-tidb.md) + - [TiFlash v6.2 升级帮助](/tiflash-620-upgrade-guide.md) + - 扩缩容 + - [使用 TiUP(推荐)](/scale-tidb-using-tiup.md) + - [使用 TiDB Operator](https://docs.pingcap.com/zh/tidb-in-kubernetes/stable/scale-a-tidb-cluster) + - 备份与恢复 + - [备份与恢复概述](/br/backup-and-restore-overview.md) + - 架构设计 + - [架构概述](/br/backup-and-restore-design.md) + - [快照备份与恢复架构](/br/br-snapshot-architecture.md) + - [日志备份与 PITR 架构](/br/br-log-architecture.md) + - 使用 BR 进行备份与恢复 + - [使用概述](/br/br-use-overview.md) + - [快照备份与恢复](/br/br-snapshot-guide.md) + - [日志备份与 PITR](/br/br-pitr-guide.md) + - [实践示例](/br/backup-and-restore-use-cases.md) + - [备份存储](/br/backup-and-restore-storages.md) + - br cli 命令手册 + - [命令概述](/br/use-br-command-line-tool.md) + - [快照备份与恢复命令手册](/br/br-snapshot-manual.md) + - [日志备份与 PITR 命令手册](/br/br-pitr-manual.md) + - 参考指南 + - BR 特性 + - [自动调节](/br/br-auto-tune.md) + - [批量建表](/br/br-batch-create-table.md) + - [断点备份](/br/br-checkpoint-backup.md) + - [断点恢复](/br/br-checkpoint-restore.md) + - [使用 Dumpling 和 TiDB Lightning 备份与恢复](/backup-and-restore-using-dumpling-lightning.md) + - [备份与恢复 RawKV](/br/rawkv-backup-and-restore.md) + - [增量备份与恢复](/br/br-incremental-guide.md) + - 集群容灾 + - [容灾方案介绍](/dr-solution-introduction.md) + - [基于主备集群的容灾](/dr-secondary-cluster.md) + - [基于多副本的单集群容灾](/dr-multi-replica.md) + - [基于备份与恢复的容灾](/dr-backup-restore.md) + - [使用资源管控 (Resource Control) 实现资源隔离](/tidb-resource-control.md) + - [修改时区](/configure-time-zone.md) + - [日常巡检](/daily-check.md) + - [TiFlash 常用运维操作](/tiflash/maintain-tiflash.md) + - [使用 TiUP 运维集群](/maintain-tidb-using-tiup.md) + - [在线修改集群配置](/dynamic-config.md) + - [在线有损恢复](/online-unsafe-recovery.md) + - [搭建双集群主从复制](/replicate-between-primary-and-secondary-clusters.md) +- 监控与告警 + - [监控框架概述](/tidb-monitoring-framework.md) + - [监控 API](/tidb-monitoring-api.md) + - [手动部署监控](/deploy-monitoring-services.md) + - [将 Grafana 监控数据导出成快照](/exporting-grafana-snapshots.md) + - [TiDB 集群报警规则与处理方法](/alert-rules.md) + - [TiFlash 报警规则与处理方法](/tiflash/tiflash-alert-rules.md) + - [自定义监控组件的配置](/tiup/customized-montior-in-tiup-environment.md) + - [BR 监控告警](/br/br-monitoring-and-alert.md) +- 故障诊断 + - 故障诊断问题汇总 + - [TiDB 集群问题导图](/tidb-troubleshooting-map.md) + - [TiDB 集群常见问题](/troubleshoot-tidb-cluster.md) + - [TiFlash 常见问题](/tiflash/troubleshoot-tiflash.md) + - 故障场景 + - 慢查询 + - [定位慢查询](/identify-slow-queries.md) + - [分析慢查询](/analyze-slow-queries.md) + - [TiDB OOM 故障排查](/troubleshoot-tidb-oom.md) + - [热点问题处理](/troubleshoot-hot-spot-issues.md) + - [CPU 占用过多导致读写延迟增加](/troubleshoot-cpu-issues.md) + - [写冲突与写性能下降](/troubleshoot-write-conflicts.md) + - [磁盘 I/O 过高](/troubleshoot-high-disk-io.md) + - [锁冲突与 TTL 超时](/troubleshoot-lock-conflicts.md) + - [数据索引不一致报错](/troubleshoot-data-inconsistency-errors.md) + - 故障诊断方法 + - [通过 SQL 诊断获取集群诊断信息](/information-schema/information-schema-sql-diagnostics.md) + - [通过 Statement Summary 排查 SQL 性能问题](/statement-summary-tables.md) + - [使用 Top SQL 定位系统资源消耗过多的查询](/dashboard/top-sql.md) + - [通过日志定位消耗系统资源多的查询](/identify-expensive-queries.md) + - [保存和恢复集群现场信息](/sql-plan-replayer.md) + - [获取支持](/support.md) +- 性能调优 + - 优化手册 + - [优化概述](/performance-tuning-overview.md) + - [优化方法](/performance-tuning-methods.md) + - [OLTP 负载性能优化实践](/performance-tuning-practices.md) + - [TiFlash 性能分析方法](/tiflash-performance-tuning-methods.md) + - [TiCDC 性能分析方法](/ticdc-performance-tuning-methods.md) + - [延迟的拆解分析](/latency-breakdown.md) + - 配置调优 + - [操作系统性能参数调优](/tune-operating-system.md) + - [TiDB 内存调优](/configure-memory-usage.md) + - [TiKV 线程调优](/tune-tikv-thread-performance.md) + - [TiKV 内存调优](/tune-tikv-memory-performance.md) + - [TiKV Follower Read](/follower-read.md) + - [Region 性能调优](/tune-region-performance.md) + - [TiFlash 调优](/tiflash/tune-tiflash-performance.md) + - [下推计算结果缓存](/coprocessor-cache.md) + - 垃圾回收 (GC) + - [GC 机制简介](/garbage-collection-overview.md) + - [GC 配置](/garbage-collection-configuration.md) + - SQL 性能调优 + - [SQL 性能调优概览](/sql-tuning-overview.md) + - 理解 TiDB 执行计划 + - [TiDB 执行计划概览](/explain-overview.md) + - [使用 `EXPLAIN` 解读执行计划](/explain-walkthrough.md) + - [MPP 模式查询的执行计划](/explain-mpp.md) + - [索引查询的执行计划](/explain-indexes.md) + - [Join 查询的执行计划](/explain-joins.md) + - [子查询的执行计划](/explain-subqueries.md) + - [聚合查询的执行计划](/explain-aggregation.md) + - [视图查询的执行计划](/explain-views.md) + - [分区查询的执行计划](/explain-partitions.md) + - [开启 IndexMerge 查询的执行计划](/explain-index-merge.md) + - SQL 优化流程 + - [SQL 优化流程概览](/sql-optimization-concepts.md) + - 逻辑优化 + - [逻辑优化概览](/sql-logical-optimization.md) + - [子查询相关的优化](/subquery-optimization.md) + - [列裁剪](/column-pruning.md) + - [关联子查询去关联](/correlated-subquery-optimization.md) + - [Max/Min 消除](/max-min-eliminate.md) + - [谓词下推](/predicate-push-down.md) + - [分区裁剪](/partition-pruning.md) + - [TopN 和 Limit 下推](/topn-limit-push-down.md) + - [Join Reorder](/join-reorder.md) + - [从窗口函数中推导 TopN 或 Limit](/derive-topn-from-window.md) + - 物理优化 + - [物理优化概览](/sql-physical-optimization.md) + - [索引的选择](/choose-index.md) + - [统计信息简介](/statistics.md) + - [错误索引的解决方案](/wrong-index-solution.md) + - [Distinct 优化](/agg-distinct-optimization.md) + - [代价模型](/cost-model.md) + - [Prepare 语句执行计划缓存](/sql-prepared-plan-cache.md) + - [非 Prepare 语句执行计划缓存](/sql-non-prepared-plan-cache.md) + - 控制执行计划 + - [控制执行计划概览](/control-execution-plan.md) + - [Optimizer Hints](/optimizer-hints.md) + - [执行计划管理](/sql-plan-management.md) + - [优化规则及表达式下推的黑名单](/blocklist-control-plan.md) + - [Optimizer Fix Controls](/optimizer-fix-controls.md) +- 教程 + - [单区域多 AZ 部署](/multi-data-centers-in-one-city-deployment.md) + - [双区域多 AZ 部署](/three-data-centers-in-two-cities-deployment.md) + - [单区域双 AZ 部署](/two-data-centers-in-one-city-deployment.md) + - 读取历史数据 + - 使用 Stale Read 功能读取历史数据(推荐) + - [Stale Read 使用场景介绍](/stale-read.md) + - [使用 `AS OF TIMESTAMP` 语法读取历史数据](/as-of-timestamp.md) + - [使用系统变量 `tidb_read_staleness` 读取历史数据](/tidb-read-staleness.md) + - [使用系统变量 `tidb_external_ts` 读取历史数据](/tidb-external-ts.md) + - [使用系统变量 `tidb_snapshot` 读取历史数据](/read-historical-data.md) + - 最佳实践 + - [TiDB 最佳实践](/best-practices/tidb-best-practices.md) + - [Java 应用开发最佳实践](/best-practices/java-app-best-practices.md) + - [HAProxy 最佳实践](/best-practices/haproxy-best-practices.md) + - [高并发写入场景最佳实践](/best-practices/high-concurrency-best-practices.md) + - [Grafana 监控最佳实践](/best-practices/grafana-monitor-best-practices.md) + - [PD 调度策略最佳实践](/best-practices/pd-scheduling-best-practices.md) + - [海量 Region 集群调优](/best-practices/massive-regions-best-practices.md) + - [三节点混合部署最佳实践](/best-practices/three-nodes-hybrid-deployment.md) + - [在三数据中心下就近读取数据](/best-practices/three-dc-local-read.md) + - [使用 UUID](/best-practices/uuid.md) + - [只读存储节点最佳实践](/best-practices/readonly-nodes.md) + - [Placement Rules 使用文档](/configure-placement-rules.md) + - [Load Base Split 使用文档](/configure-load-base-split.md) + - [Store Limit 使用文档](/configure-store-limit.md) + - [DDL 执行原理及最佳实践](/ddl-introduction.md) +- TiDB 工具 + - [功能概览](/ecosystem-tool-user-guide.md) + - [使用场景](/ecosystem-tool-user-case.md) + - [工具下载](/download-ecosystem-tools.md) + - TiUP + - [文档地图](/tiup/tiup-documentation-guide.md) + - [概览](/tiup/tiup-overview.md) + - [术语及核心概念](/tiup/tiup-terminology-and-concepts.md) + - [TiUP 组件管理](/tiup/tiup-component-management.md) + - [FAQ](/tiup/tiup-faq.md) + - [故障排查](/tiup/tiup-troubleshooting-guide.md) + - TiUP 命令参考手册 + - [命令概览](/tiup/tiup-reference.md) + - TiUP 命令 + - [tiup clean](/tiup/tiup-command-clean.md) + - [tiup completion](/tiup/tiup-command-completion.md) + - [tiup env](/tiup/tiup-command-env.md) + - [tiup help](/tiup/tiup-command-help.md) + - [tiup install](/tiup/tiup-command-install.md) + - [tiup list](/tiup/tiup-command-list.md) + - tiup mirror + - [tiup mirror 概览](/tiup/tiup-command-mirror.md) + - [tiup mirror clone](/tiup/tiup-command-mirror-clone.md) + - [tiup mirror genkey](/tiup/tiup-command-mirror-genkey.md) + - [tiup mirror grant](/tiup/tiup-command-mirror-grant.md) + - [tiup mirror init](/tiup/tiup-command-mirror-init.md) + - [tiup mirror merge](/tiup/tiup-command-mirror-merge.md) + - [tiup mirror modify](/tiup/tiup-command-mirror-modify.md) + - [tiup mirror publish](/tiup/tiup-command-mirror-publish.md) + - [tiup mirror rotate](/tiup/tiup-command-mirror-rotate.md) + - [tiup mirror set](/tiup/tiup-command-mirror-set.md) + - [tiup mirror sign](/tiup/tiup-command-mirror-sign.md) + - [tiup status](/tiup/tiup-command-status.md) + - [tiup telemetry](/tiup/tiup-command-telemetry.md) + - [tiup uninstall](/tiup/tiup-command-uninstall.md) + - [tiup update](/tiup/tiup-command-update.md) + - TiUP Cluster 命令 + - [TiUP Cluster 命令概览](/tiup/tiup-component-cluster.md) + - [tiup cluster audit](/tiup/tiup-component-cluster-audit.md) + - [tiup cluster audit cleanup](/tiup/tiup-component-cluster-audit-cleanup.md) + - [tiup cluster check](/tiup/tiup-component-cluster-check.md) + - [tiup cluster clean](/tiup/tiup-component-cluster-clean.md) + - [tiup cluster deploy](/tiup/tiup-component-cluster-deploy.md) + - [tiup cluster destroy](/tiup/tiup-component-cluster-destroy.md) + - [tiup cluster disable](/tiup/tiup-component-cluster-disable.md) + - [tiup cluster display](/tiup/tiup-component-cluster-display.md) + - [tiup cluster edit-config](/tiup/tiup-component-cluster-edit-config.md) + - [tiup cluster enable](/tiup/tiup-component-cluster-enable.md) + - [tiup cluster help](/tiup/tiup-component-cluster-help.md) + - [tiup cluster import](/tiup/tiup-component-cluster-import.md) + - [tiup cluster list](/tiup/tiup-component-cluster-list.md) + - [tiup cluster meta backup](/tiup/tiup-component-cluster-meta-backup.md) + - [tiup cluster meta restore](/tiup/tiup-component-cluster-meta-restore.md) + - [tiup cluster patch](/tiup/tiup-component-cluster-patch.md) + - [tiup cluster prune](/tiup/tiup-component-cluster-prune.md) + - [tiup cluster reload](/tiup/tiup-component-cluster-reload.md) + - [tiup cluster rename](/tiup/tiup-component-cluster-rename.md) + - [tiup cluster replay](/tiup/tiup-component-cluster-replay.md) + - [tiup cluster restart](/tiup/tiup-component-cluster-restart.md) + - [tiup cluster scale-in](/tiup/tiup-component-cluster-scale-in.md) + - [tiup cluster scale-out](/tiup/tiup-component-cluster-scale-out.md) + - [tiup cluster start](/tiup/tiup-component-cluster-start.md) + - [tiup cluster stop](/tiup/tiup-component-cluster-stop.md) + - [tiup cluster template](/tiup/tiup-component-cluster-template.md) + - [tiup cluster upgrade](/tiup/tiup-component-cluster-upgrade.md) + - TiUP DM 命令 + - [TiUP DM 命令概览](/tiup/tiup-component-dm.md) + - [tiup dm audit](/tiup/tiup-component-dm-audit.md) + - [tiup dm deploy](/tiup/tiup-component-dm-deploy.md) + - [tiup dm destroy](/tiup/tiup-component-dm-destroy.md) + - [tiup dm disable](/tiup/tiup-component-dm-disable.md) + - [tiup dm display](/tiup/tiup-component-dm-display.md) + - [tiup dm edit-config](/tiup/tiup-component-dm-edit-config.md) + - [tiup dm enable](/tiup/tiup-component-dm-enable.md) + - [tiup dm help](/tiup/tiup-component-dm-help.md) + - [tiup dm import](/tiup/tiup-component-dm-import.md) + - [tiup dm list](/tiup/tiup-component-dm-list.md) + - [tiup dm patch](/tiup/tiup-component-dm-patch.md) + - [tiup dm prune](/tiup/tiup-component-dm-prune.md) + - [tiup dm reload](/tiup/tiup-component-dm-reload.md) + - [tiup dm replay](/tiup/tiup-component-dm-replay.md) + - [tiup dm restart](/tiup/tiup-component-dm-restart.md) + - [tiup dm scale-in](/tiup/tiup-component-dm-scale-in.md) + - [tiup dm scale-out](/tiup/tiup-component-dm-scale-out.md) + - [tiup dm start](/tiup/tiup-component-dm-start.md) + - [tiup dm stop](/tiup/tiup-component-dm-stop.md) + - [tiup dm template](/tiup/tiup-component-dm-template.md) + - [tiup dm upgrade](/tiup/tiup-component-dm-upgrade.md) + - [TiDB 集群拓扑文件配置](/tiup/tiup-cluster-topology-reference.md) + - [DM 集群拓扑文件配置](/tiup/tiup-dm-topology-reference.md) + - [TiUP 镜像参考指南](/tiup/tiup-mirror-reference.md) + - TiUP 组件文档 + - [tiup-playground 运行本地测试集群](/tiup/tiup-playground.md) + - [tiup-cluster 部署运维生产集群](/tiup/tiup-cluster.md) + - [tiup-mirror 定制离线镜像](/tiup/tiup-mirror.md) + - [tiup-bench 进行 TPCC/TPCH 压力测试](/tiup/tiup-bench.md) + - [TiDB Operator](/tidb-operator-overview.md) + - TiDB Data Migration + - [关于 Data Migration](/dm/dm-overview.md) + - [架构简介](/dm/dm-arch.md) + - [快速开始](/dm/quick-start-with-dm.md) + - [最佳实践](/dm/dm-best-practices.md) + - 部署 DM 集群 + - [软硬件要求](/dm/dm-hardware-and-software-requirements.md) + - [使用 TiUP 联网部署(推荐)](/dm/deploy-a-dm-cluster-using-tiup.md) + - [使用 TiUP 离线部署](/dm/deploy-a-dm-cluster-using-tiup-offline.md) + - [使用 Binary 部署](/dm/deploy-a-dm-cluster-using-binary.md) + - [在 Kubernetes 环境中部署](https://docs.pingcap.com/zh/tidb-in-kubernetes/dev/deploy-tidb-dm) + - 入门指南 + - [创建数据源](/dm/quick-start-create-source.md) + - [数据源操作](/dm/dm-manage-source.md) + - [任务配置向导](/dm/dm-task-configuration-guide.md) + - [分库分表合并](/dm/dm-shard-merge.md) + - [表路由](/dm/dm-table-routing.md) + - [黑白名单](/dm/dm-block-allow-table-lists.md) + - [过滤 binlog 事件](/dm/dm-binlog-event-filter.md) + - [通过 SQL 表达式过滤 DML](/dm/feature-expression-filter.md) + - [Online DDL 工具支持](/dm/dm-online-ddl-tool-support.md) + - 迁移任务操作 + - [任务前置检查](/dm/dm-precheck.md) + - [创建任务](/dm/dm-create-task.md) + - [查询状态](/dm/dm-query-status.md) + - [暂停任务](/dm/dm-pause-task.md) + - [恢复任务](/dm/dm-resume-task.md) + - [停止任务](/dm/dm-stop-task.md) + - 进阶教程 + - 分库分表合并迁移 + - [概述](/dm/feature-shard-merge.md) + - [悲观模式](/dm/feature-shard-merge-pessimistic.md) + - [乐观模式](/dm/feature-shard-merge-optimistic.md) + - [手动处理 Sharding DDL Lock](/dm/manually-handling-sharding-ddl-locks.md) + - [迁移使用 GH-ost/PT-osc 的数据源](/dm/feature-online-ddl.md) + - [上下游列数量不一致的迁移](/migrate-with-more-columns-downstream.md) + - [增量数据校验](/dm/dm-continuous-data-validation.md) + - 运维管理 + - 集群版本升级 + - [使用 TiUP 运维集群(推荐)](/dm/maintain-dm-using-tiup.md) + - [1.0.x 到 2.0+ 手动升级](/dm/manually-upgrade-dm-1.0-to-2.0.md) + - [在线应用 Hotfix 到 DM 集群](/tiup/tiup-component-dm-patch.md) + - 集群运维工具 + - [使用 WebUI 管理迁移任务](/dm/dm-webui-guide.md) + - [使用 dmctl 管理迁移任务](/dm/dmctl-introduction.md) + - 性能调优 + - [性能数据](/dm/dm-benchmark-v5.4.0.md) + - [配置调优](/dm/dm-tune-configuration.md) + - [如何进行压力测试](/dm/dm-performance-test.md) + - [性能问题及处理方法](/dm/dm-handle-performance-issues.md) + - 数据源管理 + - [变更同步的数据源地址](/dm/usage-scenario-master-slave-switch.md) + - 任务管理 + - [处理出错的 DDL 语句](/dm/handle-failed-ddl-statements.md) + - [管理迁移表的表结构](/dm/dm-manage-schema.md) + - [导出和导入集群的数据源和任务配置](/dm/dm-export-import-config.md) + - [处理告警](/dm/dm-handle-alerts.md) + - [日常巡检](/dm/dm-daily-check.md) + - 参考手册 + - 架构组件 + - [DM-worker 说明](/dm/dm-worker-intro.md) + - [安全模式](/dm/dm-safe-mode.md) + - [Relay Log](/dm/relay-log.md) + - [DDL 特殊处理说明](/dm/dm-ddl-compatible.md) + - 运行机制 + - [DML 同步机制](/dm/dm-dml-replication-logic.md) + - 命令行 + - [DM-master & DM-worker](/dm/dm-command-line-flags.md) + - 配置文件 + - [概述](/dm/dm-config-overview.md) + - [数据源配置](/dm/dm-source-configuration-file.md) + - [迁移任务配置](/dm/task-configuration-file-full.md) + - [DM-master 配置](/dm/dm-master-configuration-file.md) + - [DM-worker 配置](/dm/dm-worker-configuration-file.md) + - [Table Selector](/dm/table-selector.md) + - [OpenAPI](/dm/dm-open-api.md) + - [兼容性目录](/dm/dm-compatibility-catalog.md) + - 安全 + - [为 DM 的连接开启加密传输](/dm/dm-enable-tls.md) + - [生成自签名证书](/dm/dm-generate-self-signed-certificates.md) + - 监控告警 + - [监控指标](/dm/monitor-a-dm-cluster.md) + - [告警信息](/dm/dm-alert-rules.md) + - [错误码](/dm/dm-error-handling.md#常见故障处理方法) + - [术语表](/dm/dm-glossary.md) + - 使用示例 + - [使用 DM 迁移数据](/dm/migrate-data-using-dm.md) + - [快速创建迁移任务](/dm/quick-start-create-task.md) + - [分表合并数据迁移最佳实践](/dm/shard-merge-best-practices.md) + - 异常解决 + - [常见问题](/dm/dm-faq.md) + - [错误处理及恢复](/dm/dm-error-handling.md) + - [版本发布历史](/dm/dm-release-notes.md) + - TiDB Lightning + - [概述](/tidb-lightning/tidb-lightning-overview.md) + - [快速上手](/get-started-with-tidb-lightning.md) + - [部署 TiDB Lightning](/tidb-lightning/deploy-tidb-lightning.md) + - [目标数据库要求](/tidb-lightning/tidb-lightning-requirements.md) + - 数据源 + - [文件匹配规则](/tidb-lightning/tidb-lightning-data-source.md) + - [CSV](/tidb-lightning/tidb-lightning-data-source.md#csv) + - [SQL](/tidb-lightning/tidb-lightning-data-source.md#sql) + - [Parquet](/tidb-lightning/tidb-lightning-data-source.md#parquet) + - [自定义文件匹配](/tidb-lightning/tidb-lightning-data-source.md#自定义文件匹配) + - 物理导入模式 + - [概述](/tidb-lightning/tidb-lightning-physical-import-mode.md) + - [必要条件及限制](/tidb-lightning/tidb-lightning-physical-import-mode.md#必要条件及限制) + - [配置及使用](/tidb-lightning/tidb-lightning-physical-import-mode-usage.md) + - [冲突检测](/tidb-lightning/tidb-lightning-physical-import-mode-usage.md#冲突数据检测) + - [性能调优](/tidb-lightning/tidb-lightning-physical-import-mode-usage.md#性能调优) + - 逻辑导入模式 + - [概述](/tidb-lightning/tidb-lightning-logical-import-mode.md) + - [必要条件及限制](/tidb-lightning/tidb-lightning-logical-import-mode.md#必要条件) + - [配置及使用](/tidb-lightning/tidb-lightning-logical-import-mode-usage.md) + - [冲突检测](/tidb-lightning/tidb-lightning-logical-import-mode-usage.md#冲突数据检测) + - [性能调优](/tidb-lightning/tidb-lightning-logical-import-mode-usage.md#性能调优) + - [前置检查](/tidb-lightning/tidb-lightning-prechecks.md) + - [表库过滤](/table-filter.md) + - [断点续传](/tidb-lightning/tidb-lightning-checkpoints.md) + - [并行导入](/tidb-lightning/tidb-lightning-distributed-import.md) + - [可容忍错误](/tidb-lightning/tidb-lightning-error-resolution.md) + - [故障处理](/tidb-lightning/troubleshoot-tidb-lightning.md) + - 参考手册 + - [完整配置文件](/tidb-lightning/tidb-lightning-configuration.md) + - [命令行参数](/tidb-lightning/tidb-lightning-command-line-full.md) + - [监控告警](/tidb-lightning/monitor-tidb-lightning.md) + - [Web 界面](/tidb-lightning/tidb-lightning-web-interface.md) + - [FAQ](/tidb-lightning/tidb-lightning-faq.md) + - [术语表](/tidb-lightning/tidb-lightning-glossary.md) + - [Dumpling](/dumpling-overview.md) + - TiCDC + - [概述](/ticdc/ticdc-overview.md) + - [安装部署与集群运维](/ticdc/deploy-ticdc.md) + - Changefeed + - [Changefeed 概述](/ticdc/ticdc-changefeed-overview.md) + - 创建 Changefeed + - [同步数据到 MySQL 兼容的数据库](/ticdc/ticdc-sink-to-mysql.md) + - [同步数据到 Kafka](/ticdc/ticdc-sink-to-kafka.md) + - [同步数据到存储服务](/ticdc/ticdc-sink-to-cloud-storage.md) + - [管理 Changefeed](/ticdc/ticdc-manage-changefeed.md) + - [日志过滤器](/ticdc/ticdc-filter.md) + - [双向复制](/ticdc/ticdc-bidirectional-replication.md) + - [单行数据正确性校验](/ticdc/ticdc-integrity-check.md) + - 监控告警 + - [基本监控指标](/ticdc/ticdc-summary-monitor.md) + - [详细监控指标](/ticdc/monitor-ticdc.md) + - [报警规则](/ticdc/ticdc-alert-rules.md) + - 参考指南 + - [架构设计与原理](/ticdc/ticdc-architecture.md) + - [TiCDC Server 配置参数](/ticdc/ticdc-server-config.md) + - [TiCDC Changefeed 配置参数](/ticdc/ticdc-changefeed-config.md) + - 输出数据协议 + - [TiCDC Avro Protocol](/ticdc/ticdc-avro-protocol.md) + - [TiCDC Canal-JSON Protocol](/ticdc/ticdc-canal-json.md) + - [TiCDC Open Protocol](/ticdc/ticdc-open-protocol.md) + - [TiCDC CSV Protocol](/ticdc/ticdc-csv.md) + - [TiCDC Open API v2](/ticdc/ticdc-open-api-v2.md) + - [TiCDC Open API v1](/ticdc/ticdc-open-api.md) + - TiCDC 数据消费 + - [基于 Avro 的 TiCDC 行数据 Checksum 校验](/ticdc/ticdc-avro-checksum-verification.md) + - [Storage sink 消费程序编写指引](/ticdc/ticdc-storage-consumer-dev-guide.md) + - [兼容性](/ticdc/ticdc-compatibility.md) + - [故障处理](/ticdc/troubleshoot-ticdc.md) + - [常见问题解答](/ticdc/ticdc-faq.md) + - [术语表](/ticdc/ticdc-glossary.md) + - TiDB Binlog + - [概述](/tidb-binlog/tidb-binlog-overview.md) + - [快速上手](/tidb-binlog/get-started-with-tidb-binlog.md) + - [部署使用](/tidb-binlog/deploy-tidb-binlog.md) + - [运维管理](/tidb-binlog/maintain-tidb-binlog-cluster.md) + - [配置说明](/tidb-binlog/tidb-binlog-configuration-file.md) + - [Pump](/tidb-binlog/tidb-binlog-configuration-file.md#pump) + - [Drainer](/tidb-binlog/tidb-binlog-configuration-file.md#drainer) + - [版本升级](/tidb-binlog/upgrade-tidb-binlog.md) + - [监控告警](/tidb-binlog/monitor-tidb-binlog-cluster.md) + - [增量恢复](/tidb-binlog/tidb-binlog-reparo.md) + - [binlogctl 工具](/tidb-binlog/binlog-control.md) + - [Kafka 自定义开发](/tidb-binlog/binlog-consumer-client.md) + - [TiDB Binlog Relay Log](/tidb-binlog/tidb-binlog-relay-log.md) + - [集群间双向同步](/tidb-binlog/bidirectional-replication-between-tidb-clusters.md) + - [术语表](/tidb-binlog/tidb-binlog-glossary.md) + - 故障诊断 + - [故障诊断](/tidb-binlog/troubleshoot-tidb-binlog.md) + - [常见错误修复](/tidb-binlog/handle-tidb-binlog-errors.md) + - [FAQ](/tidb-binlog/tidb-binlog-faq.md) + - PingCAP Clinic 诊断服务 + - [概述](/clinic/clinic-introduction.md) + - [快速上手](/clinic/quick-start-with-clinic.md) + - [使用 PingCAP Clinic 诊断集群](/clinic/clinic-user-guide-for-tiup.md) + - [使用 PingCAP Clinic 生成诊断报告](/clinic/clinic-report.md) + - [采集 SQL 查询计划信息](/clinic/clinic-collect-sql-query-plan.md) + - [数据采集说明](/clinic/clinic-data-instruction-for-tiup.md) + - TiSpark + - [TiSpark 用户指南](/tispark-overview.md) + - sync-diff-inspector + - [概述](/sync-diff-inspector/sync-diff-inspector-overview.md) + - [不同库名或表名的数据校验](/sync-diff-inspector/route-diff.md) + - [分库分表场景下的数据校验](/sync-diff-inspector/shard-diff.md) + - [TiDB 主从集群的数据校验](/sync-diff-inspector/upstream-downstream-diff.md) + - [基于 DM 同步场景下的数据校验](/sync-diff-inspector/dm-diff.md) + - TiUniManager + - [概述](/tiunimanager/tiunimanager-overview.md) + - [安装和运维](/tiunimanager/tiunimanager-install-and-maintain.md) + - [快速操作](/tiunimanager/tiunimanager-quick-start.md) + - 操作指南 + - [登录与初始化](/tiunimanager/tiunimanager-login-and-initialize.md) + - [管理集群资源](/tiunimanager/tiunimanager-manage-host-resources.md) + - [管理集群](/tiunimanager/tiunimanager-manage-clusters.md) + - [导入与导出数据](/tiunimanager/tiunimanager-import-and-export-data.md) + - [管理任务](/tiunimanager/tiunimanager-manage-tasks.md) + - [管理系统](/tiunimanager/tiunimanager-manage-system.md) + - [FAQ](/tiunimanager/tiunimanager-faq.md) + - 发布版本历史 + - [发布版本汇总](/tiunimanager/tiunimanager-release-notes.md) + - [v1.0.2](/tiunimanager/tiunimanager-release-1.0.2.md) + - [v1.0.1](/tiunimanager/tiunimanager-release-1.0.1.md) + - [v1.0.0](/tiunimanager/tiunimanager-release-1.0.0.md) +- 参考指南 + - 架构 + - [概述](/tidb-architecture.md) + - [存储](/tidb-storage.md) + - [计算](/tidb-computing.md) + - [调度](/tidb-scheduling.md) + - 存储引擎 TiKV + - [TiKV 简介](/tikv-overview.md) + - [RocksDB 简介](/storage-engine/rocksdb-overview.md) + - [Titan 简介](/storage-engine/titan-overview.md) + - [Titan 配置说明](/storage-engine/titan-configuration.md) + - [Partitioned Raft KV](/partitioned-raft-kv.md) + - 存储引擎 TiFlash + - [TiFlash 简介](/tiflash/tiflash-overview.md) + - [构建 TiFlash 副本](/tiflash/create-tiflash-replicas.md) + - [使用 TiDB 读取 TiFlash](/tiflash/use-tidb-to-read-tiflash.md) + - [使用 TiSpark 读取 TiFlash](/tiflash/use-tispark-to-read-tiflash.md) + - [使用 MPP 模式](/tiflash/use-tiflash-mpp-mode.md) + - [TiFlash 存算分离架构与 S3 支持](/tiflash/tiflash-disaggregated-and-s3.md) + - [使用 FastScan 功能](/tiflash/use-fastscan.md) + - [TiFlash 支持的计算下推](/tiflash/tiflash-supported-pushdown-calculations.md) + - [TiFlash 查询结果物化](/tiflash/tiflash-results-materialization.md) + - [TiFlash 延迟物化](/tiflash/tiflash-late-materialization.md) + - [TiFlash 数据落盘](/tiflash/tiflash-spill-disk.md) + - [TiFlash 数据校验](/tiflash/tiflash-data-validation.md) + - [TiFlash 兼容性说明](/tiflash/tiflash-compatibility.md) + - [TiFlash Pipeline Model 执行模型](/tiflash/tiflash-pipeline-model.md) + - [系统变量](/system-variables.md) + - 配置文件参数 + - [tidb-server](/tidb-configuration-file.md) + - [tikv-server](/tikv-configuration-file.md) + - [tiflash-server](/tiflash/tiflash-configuration.md) + - [pd-server](/pd-configuration-file.md) + - CLI + - [tikv-ctl](/tikv-control.md) + - [pd-ctl](/pd-control.md) + - [tidb-ctl](/tidb-control.md) + - [pd-recover](/pd-recover.md) + - [binlog-ctl](/tidb-binlog/binlog-control.md) + - 命令行参数 + - [tidb-server](/command-line-flags-for-tidb-configuration.md) + - [tikv-server](/command-line-flags-for-tikv-configuration.md) + - [tiflash-server](/tiflash/tiflash-command-line-flags.md) + - [pd-server](/command-line-flags-for-pd-configuration.md) + - 监控指标 + - [Overview 面板](/grafana-overview-dashboard.md) + - [Performance Overview 面板](/grafana-performance-overview-dashboard.md) + - [TiDB 面板](/grafana-tidb-dashboard.md) + - [PD 面板](/grafana-pd-dashboard.md) + - [TiKV 面板](/grafana-tikv-dashboard.md) + - [TiFlash 监控指标](/tiflash/monitor-tiflash.md) + - [TiCDC 监控指标](/ticdc/monitor-ticdc.md) + - [Resource Control 监控指标](/grafana-resource-control-dashboard.md) + - 安全加固 + - [为 TiDB 客户端服务端间通信开启加密传输](/enable-tls-between-clients-and-servers.md) + - [为 TiDB 组件间通信开启加密传输](/enable-tls-between-components.md) + - [生成自签名证书](/generate-self-signed-certificates.md) + - [静态加密](/encryption-at-rest.md) + - [为 TiDB 落盘文件开启加密](/enable-disk-spill-encrypt.md) + - [日志脱敏](/log-redaction.md) + - 权限 + - [与 MySQL 安全特性差异](/security-compatibility-with-mysql.md) + - [权限管理](/privilege-management.md) + - [TiDB 用户账户管理](/user-account-management.md) + - [TiDB 密码管理](/password-management.md) + - [基于角色的访问控制](/role-based-access-control.md) + - [TiDB 证书鉴权使用指南](/certificate-authentication.md) + - SQL + - SQL 语言结构和语法 + - 属性 + - [AUTO_INCREMENT](/auto-increment.md) + - [AUTO_RANDOM](/auto-random.md) + - [SHARD_ROW_ID_BITS](/shard-row-id-bits.md) + - [字面值](/literal-values.md) + - [Schema 对象名](/schema-object-names.md) + - [关键字](/keywords.md) + - [用户自定义变量](/user-defined-variables.md) + - [表达式语法](/expression-syntax.md) + - [注释语法](/comment-syntax.md) + - SQL 语句 + - [`ADD COLUMN`](/sql-statements/sql-statement-add-column.md) + - [`ADD INDEX`](/sql-statements/sql-statement-add-index.md) + - [`ADMIN`](/sql-statements/sql-statement-admin.md) + - [`ADMIN CANCEL DDL`](/sql-statements/sql-statement-admin-cancel-ddl.md) + - [`ADMIN CHECKSUM TABLE`](/sql-statements/sql-statement-admin-checksum-table.md) + - [`ADMIN CHECK [TABLE|INDEX]`](/sql-statements/sql-statement-admin-check-table-index.md) + - [`ADMIN CLEANUP`](/sql-statements/sql-statement-admin-cleanup.md) + - [`ADMIN PAUSE DDL`](/sql-statements/sql-statement-admin-pause-ddl.md) + - [`ADMIN RECOVER INDEX`](/sql-statements/sql-statement-admin-recover.md) + - [`ADMIN RESUME DDL`](/sql-statements/sql-statement-admin-resume-ddl.md) + - [`ADMIN SHOW DDL [JOBS|QUERIES]`](/sql-statements/sql-statement-admin-show-ddl.md) + - [`ADMIN SHOW TELEMETRY`](/sql-statements/sql-statement-admin-show-telemetry.md) + - [`ALTER DATABASE`](/sql-statements/sql-statement-alter-database.md) + - [`ALTER INDEX`](/sql-statements/sql-statement-alter-index.md) + - [`ALTER INSTANCE`](/sql-statements/sql-statement-alter-instance.md) + - [`ALTER PLACEMENT POLICY`](/sql-statements/sql-statement-alter-placement-policy.md) + - [`ALTER RESOURCE GROUP`](/sql-statements/sql-statement-alter-resource-group.md) + - [`ALTER TABLE`](/sql-statements/sql-statement-alter-table.md) + - [`ALTER TABLE COMPACT`](/sql-statements/sql-statement-alter-table-compact.md) + - [`ALTER USER`](/sql-statements/sql-statement-alter-user.md) + - [`ANALYZE TABLE`](/sql-statements/sql-statement-analyze-table.md) + - [`BACKUP`](/sql-statements/sql-statement-backup.md) + - [`BATCH`](/sql-statements/sql-statement-batch.md) + - [`BEGIN`](/sql-statements/sql-statement-begin.md) + - [`CALIBRATE RESOURCE`](/sql-statements/sql-statement-calibrate-resource.md) + - [`CANCEL IMPORT JOB`](/sql-statements/sql-statement-cancel-import-job.md) + - [`CHANGE COLUMN`](/sql-statements/sql-statement-change-column.md) + - [`CHANGE DRAINER`](/sql-statements/sql-statement-change-drainer.md) + - [`CHANGE PUMP`](/sql-statements/sql-statement-change-pump.md) + - [`COMMIT`](/sql-statements/sql-statement-commit.md) + - [`CREATE [GLOBAL|SESSION] BINDING`](/sql-statements/sql-statement-create-binding.md) + - [`CREATE DATABASE`](/sql-statements/sql-statement-create-database.md) + - [`CREATE INDEX`](/sql-statements/sql-statement-create-index.md) + - [`CREATE PLACEMENT POLICY`](/sql-statements/sql-statement-create-placement-policy.md) + - [`CREATE RESOURCE GROUP`](/sql-statements/sql-statement-create-resource-group.md) + - [`CREATE ROLE`](/sql-statements/sql-statement-create-role.md) + - [`CREATE SEQUENCE`](/sql-statements/sql-statement-create-sequence.md) + - [`CREATE TABLE LIKE`](/sql-statements/sql-statement-create-table-like.md) + - [`CREATE TABLE`](/sql-statements/sql-statement-create-table.md) + - [`CREATE USER`](/sql-statements/sql-statement-create-user.md) + - [`CREATE VIEW`](/sql-statements/sql-statement-create-view.md) + - [`DEALLOCATE`](/sql-statements/sql-statement-deallocate.md) + - [`DELETE`](/sql-statements/sql-statement-delete.md) + - [`DESC`](/sql-statements/sql-statement-desc.md) + - [`DESCRIBE`](/sql-statements/sql-statement-describe.md) + - [`DO`](/sql-statements/sql-statement-do.md) + - [`DROP [GLOBAL|SESSION] BINDING`](/sql-statements/sql-statement-drop-binding.md) + - [`DROP COLUMN`](/sql-statements/sql-statement-drop-column.md) + - [`DROP DATABASE`](/sql-statements/sql-statement-drop-database.md) + - [`DROP INDEX`](/sql-statements/sql-statement-drop-index.md) + - [`DROP PLACEMENT POLICY`](/sql-statements/sql-statement-drop-placement-policy.md) + - [`DROP RESOURCE GROUP`](/sql-statements/sql-statement-drop-resource-group.md) + - [`DROP ROLE`](/sql-statements/sql-statement-drop-role.md) + - [`DROP SEQUENCE`](/sql-statements/sql-statement-drop-sequence.md) + - [`DROP STATS`](/sql-statements/sql-statement-drop-stats.md) + - [`DROP TABLE`](/sql-statements/sql-statement-drop-table.md) + - [`DROP USER`](/sql-statements/sql-statement-drop-user.md) + - [`DROP VIEW`](/sql-statements/sql-statement-drop-view.md) + - [`EXECUTE`](/sql-statements/sql-statement-execute.md) + - [`EXPLAIN ANALYZE`](/sql-statements/sql-statement-explain-analyze.md) + - [`EXPLAIN`](/sql-statements/sql-statement-explain.md) + - [`FLASHBACK CLUSTER TO TIMESTAMP`](/sql-statements/sql-statement-flashback-to-timestamp.md) + - [`FLASHBACK DATABASE`](/sql-statements/sql-statement-flashback-database.md) + - [`FLASHBACK TABLE`](/sql-statements/sql-statement-flashback-table.md) + - [`FLUSH PRIVILEGES`](/sql-statements/sql-statement-flush-privileges.md) + - [`FLUSH STATUS`](/sql-statements/sql-statement-flush-status.md) + - [`FLUSH TABLES`](/sql-statements/sql-statement-flush-tables.md) + - [`GRANT `](/sql-statements/sql-statement-grant-privileges.md) + - [`GRANT `](/sql-statements/sql-statement-grant-role.md) + - [`IMPORT INTO`](/sql-statements/sql-statement-import-into.md) + - [`INSERT`](/sql-statements/sql-statement-insert.md) + - [`KILL [TIDB]`](/sql-statements/sql-statement-kill.md) + - [`LOAD DATA`](/sql-statements/sql-statement-load-data.md) + - [`LOAD STATS`](/sql-statements/sql-statement-load-stats.md) + - [`LOCK STATS`](/sql-statements/sql-statement-lock-stats.md) + - [`LOCK TABLES` 和 `UNLOCK TABLES`](/sql-statements/sql-statement-lock-tables-and-unlock-tables.md) + - [`MODIFY COLUMN`](/sql-statements/sql-statement-modify-column.md) + - [`PREPARE`](/sql-statements/sql-statement-prepare.md) + - [`RECOVER TABLE`](/sql-statements/sql-statement-recover-table.md) + - [`RENAME INDEX`](/sql-statements/sql-statement-rename-index.md) + - [`RENAME TABLE`](/sql-statements/sql-statement-rename-table.md) + - [`RENAME USER`](/sql-statements/sql-statement-rename-user.md) + - [`REPLACE`](/sql-statements/sql-statement-replace.md) + - [`RESTORE`](/sql-statements/sql-statement-restore.md) + - [`REVOKE `](/sql-statements/sql-statement-revoke-privileges.md) + - [`REVOKE `](/sql-statements/sql-statement-revoke-role.md) + - [`ROLLBACK`](/sql-statements/sql-statement-rollback.md) + - [`SAVEPOINT`](/sql-statements/sql-statement-savepoint.md) + - [`SELECT`](/sql-statements/sql-statement-select.md) + - [`SET DEFAULT ROLE`](/sql-statements/sql-statement-set-default-role.md) + - [`SET [NAMES|CHARACTER SET]`](/sql-statements/sql-statement-set-names.md) + - [`SET PASSWORD`](/sql-statements/sql-statement-set-password.md) + - [`SET RESOURCE GROUP`](/sql-statements/sql-statement-set-resource-group.md) + - [`SET ROLE`](/sql-statements/sql-statement-set-role.md) + - [`SET TRANSACTION`](/sql-statements/sql-statement-set-transaction.md) + - [`SET [GLOBAL|SESSION] `](/sql-statements/sql-statement-set-variable.md) + - [`SHOW [BACKUPS|RESTORES]`](/sql-statements/sql-statement-show-backups.md) + - [`SHOW ANALYZE STATUS`](/sql-statements/sql-statement-show-analyze-status.md) + - [`SHOW [GLOBAL|SESSION] BINDINGS`](/sql-statements/sql-statement-show-bindings.md) + - [`SHOW BUILTINS`](/sql-statements/sql-statement-show-builtins.md) + - [`SHOW CHARACTER SET`](/sql-statements/sql-statement-show-character-set.md) + - [`SHOW COLLATION`](/sql-statements/sql-statement-show-collation.md) + - [`SHOW [FULL] COLUMNS FROM`](/sql-statements/sql-statement-show-columns-from.md) + - [`SHOW CONFIG`](/sql-statements/sql-statement-show-config.md) + - [`SHOW CREATE PLACEMENT POLICY`](/sql-statements/sql-statement-show-create-placement-policy.md) + - [`SHOW CREATE RESOURCE GROUP`](/sql-statements/sql-statement-show-create-resource-group.md) + - [`SHOW CREATE SEQUENCE`](/sql-statements/sql-statement-show-create-sequence.md) + - [`SHOW CREATE TABLE`](/sql-statements/sql-statement-show-create-table.md) + - [`SHOW CREATE DATABASE`](/sql-statements/sql-statement-show-create-database.md) + - [`SHOW CREATE USER`](/sql-statements/sql-statement-show-create-user.md) + - [`SHOW DATABASES`](/sql-statements/sql-statement-show-databases.md) + - [`SHOW DRAINER STATUS`](/sql-statements/sql-statement-show-drainer-status.md) + - [`SHOW ENGINES`](/sql-statements/sql-statement-show-engines.md) + - [`SHOW ERRORS`](/sql-statements/sql-statement-show-errors.md) + - [`SHOW [FULL] FIELDS FROM`](/sql-statements/sql-statement-show-fields-from.md) + - [`SHOW GRANTS`](/sql-statements/sql-statement-show-grants.md) + - [`SHOW IMPORT JOB`](/sql-statements/sql-statement-show-import-job.md) + - [`SHOW INDEX [FROM|IN]`](/sql-statements/sql-statement-show-index.md) + - [`SHOW INDEXES [FROM|IN]`](/sql-statements/sql-statement-show-indexes.md) + - [`SHOW KEYS [FROM|IN]`](/sql-statements/sql-statement-show-keys.md) + - [`SHOW MASTER STATUS`](/sql-statements/sql-statement-show-master-status.md) + - [`SHOW PLACEMENT`](/sql-statements/sql-statement-show-placement.md) + - [`SHOW PLACEMENT FOR`](/sql-statements/sql-statement-show-placement-for.md) + - [`SHOW PLACEMENT LABELS`](/sql-statements/sql-statement-show-placement-labels.md) + - [`SHOW PLUGINS`](/sql-statements/sql-statement-show-plugins.md) + - [`SHOW PRIVILEGES`](/sql-statements/sql-statement-show-privileges.md) + - [`SHOW [FULL] PROCESSSLIST`](/sql-statements/sql-statement-show-processlist.md) + - [`SHOW PROFILES`](/sql-statements/sql-statement-show-profiles.md) + - [`SHOW PUMP STATUS`](/sql-statements/sql-statement-show-pump-status.md) + - [`SHOW SCHEMAS`](/sql-statements/sql-statement-show-schemas.md) + - [`SHOW STATS_HEALTHY`](/sql-statements/sql-statement-show-stats-healthy.md) + - [`SHOW STATS_HISTOGRAMS`](/sql-statements/sql-statement-show-histograms.md) + - [`SHOW STATS_LOCKED`](/sql-statements/sql-statement-show-stats-locked.md) + - [`SHOW STATS_META`](/sql-statements/sql-statement-show-stats-meta.md) + - [`SHOW STATUS`](/sql-statements/sql-statement-show-status.md) + - [`SHOW TABLE NEXT_ROW_ID`](/sql-statements/sql-statement-show-table-next-rowid.md) + - [`SHOW TABLE REGIONS`](/sql-statements/sql-statement-show-table-regions.md) + - [`SHOW TABLE STATUS`](/sql-statements/sql-statement-show-table-status.md) + - [`SHOW [FULL] TABLES`](/sql-statements/sql-statement-show-tables.md) + - [`SHOW [GLOBAL|SESSION] VARIABLES`](/sql-statements/sql-statement-show-variables.md) + - [`SHOW WARNINGS`](/sql-statements/sql-statement-show-warnings.md) + - [`SHUTDOWN`](/sql-statements/sql-statement-shutdown.md) + - [`SPLIT REGION`](/sql-statements/sql-statement-split-region.md) + - [`START TRANSACTION`](/sql-statements/sql-statement-start-transaction.md) + - [`TABLE`](/sql-statements/sql-statement-table.md) + - [`TRACE`](/sql-statements/sql-statement-trace.md) + - [`TRUNCATE`](/sql-statements/sql-statement-truncate.md) + - [`UNLOCK STATS`](/sql-statements/sql-statement-unlock-stats.md) + - [`UPDATE`](/sql-statements/sql-statement-update.md) + - [`USE`](/sql-statements/sql-statement-use.md) + - [`WITH`](/sql-statements/sql-statement-with.md) + - 数据类型 + - [数据类型概述](/data-type-overview.md) + - [数据类型默认值](/data-type-default-values.md) + - [数值类型](/data-type-numeric.md) + - [日期和时间类型](/data-type-date-and-time.md) + - [字符串类型](/data-type-string.md) + - [JSON 类型](/data-type-json.md) + - 函数与操作符 + - [函数与操作符概述](/functions-and-operators/functions-and-operators-overview.md) + - [表达式求值的类型转换](/functions-and-operators/type-conversion-in-expression-evaluation.md) + - [操作符](/functions-and-operators/operators.md) + - [控制流程函数](/functions-and-operators/control-flow-functions.md) + - [字符串函数](/functions-and-operators/string-functions.md) + - [数值函数与操作符](/functions-and-operators/numeric-functions-and-operators.md) + - [日期和时间函数](/functions-and-operators/date-and-time-functions.md) + - [位函数和操作符](/functions-and-operators/bit-functions-and-operators.md) + - [Cast 函数和操作符](/functions-and-operators/cast-functions-and-operators.md) + - [加密和压缩函数](/functions-and-operators/encryption-and-compression-functions.md) + - [锁函数](/functions-and-operators/locking-functions.md) + - [信息函数](/functions-and-operators/information-functions.md) + - [JSON 函数](/functions-and-operators/json-functions.md) + - [GROUP BY 聚合函数](/functions-and-operators/aggregate-group-by-functions.md) + - [窗口函数](/functions-and-operators/window-functions.md) + - [其它函数](/functions-and-operators/miscellaneous-functions.md) + - [精度数学](/functions-and-operators/precision-math.md) + - [集合运算](/functions-and-operators/set-operators.md) + - [下推到 TiKV 的表达式列表](/functions-and-operators/expressions-pushed-down.md) + - [TiDB 特有的函数](/functions-and-operators/tidb-functions.md) + - [Oracle 与 TiDB 函数和语法差异对照](/oracle-functions-to-tidb.md) + - [聚簇索引](/clustered-indexes.md) + - [约束](/constraints.md) + - [生成列](/generated-columns.md) + - [SQL 模式](/sql-mode.md) + - [表属性](/table-attributes.md) + - 事务 + - [事务概览](/transaction-overview.md) + - [隔离级别](/transaction-isolation-levels.md) + - [乐观事务](/optimistic-transaction.md) + - [悲观事务](/pessimistic-transaction.md) + - [非事务 DML 语句](/non-transactional-dml.md) + - [视图](/views.md) + - [分区表](/partitioned-table.md) + - [临时表](/temporary-tables.md) + - [缓存表](/cached-tables.md) + - [外键约束](/foreign-key.md) + - 字符集和排序 + - [概述](/character-set-and-collation.md) + - [GBK](/character-set-gbk.md) + - [Placement Rules in SQL](/placement-rules-in-sql.md) + - 系统表 + - [`mysql`](/mysql-schema.md) + - INFORMATION_SCHEMA + - [Overview](/information-schema/information-schema.md) + - [`ANALYZE_STATUS`](/information-schema/information-schema-analyze-status.md) + - [`CLIENT_ERRORS_SUMMARY_BY_HOST`](/information-schema/client-errors-summary-by-host.md) + - [`CLIENT_ERRORS_SUMMARY_BY_USER`](/information-schema/client-errors-summary-by-user.md) + - [`CLIENT_ERRORS_SUMMARY_GLOBAL`](/information-schema/client-errors-summary-global.md) + - [`CHARACTER_SETS`](/information-schema/information-schema-character-sets.md) + - [`CLUSTER_CONFIG`](/information-schema/information-schema-cluster-config.md) + - [`CLUSTER_HARDWARE`](/information-schema/information-schema-cluster-hardware.md) + - [`CLUSTER_INFO`](/information-schema/information-schema-cluster-info.md) + - [`CLUSTER_LOAD`](/information-schema/information-schema-cluster-load.md) + - [`CLUSTER_LOG`](/information-schema/information-schema-cluster-log.md) + - [`CLUSTER_SYSTEMINFO`](/information-schema/information-schema-cluster-systeminfo.md) + - [`COLLATIONS`](/information-schema/information-schema-collations.md) + - [`COLLATION_CHARACTER_SET_APPLICABILITY`](/information-schema/information-schema-collation-character-set-applicability.md) + - [`COLUMNS`](/information-schema/information-schema-columns.md) + - [`DATA_LOCK_WAITS`](/information-schema/information-schema-data-lock-waits.md) + - [`DDL_JOBS`](/information-schema/information-schema-ddl-jobs.md) + - [`DEADLOCKS`](/information-schema/information-schema-deadlocks.md) + - [`ENGINES`](/information-schema/information-schema-engines.md) + - [`INSPECTION_RESULT`](/information-schema/information-schema-inspection-result.md) + - [`INSPECTION_RULES`](/information-schema/information-schema-inspection-rules.md) + - [`INSPECTION_SUMMARY`](/information-schema/information-schema-inspection-summary.md) + - [`KEY_COLUMN_USAGE`](/information-schema/information-schema-key-column-usage.md) + - [`MEMORY_USAGE`](/information-schema/information-schema-memory-usage.md) + - [`MEMORY_USAGE_OPS_HISTORY`](/information-schema/information-schema-memory-usage-ops-history.md) + - [`METRICS_SUMMARY`](/information-schema/information-schema-metrics-summary.md) + - [`METRICS_TABLES`](/information-schema/information-schema-metrics-tables.md) + - [`PARTITIONS`](/information-schema/information-schema-partitions.md) + - [`PLACEMENT_POLICIES`](/information-schema/information-schema-placement-policies.md) + - [`PROCESSLIST`](/information-schema/information-schema-processlist.md) + - [`REFERENTIAL_CONSTRAINTS`](/information-schema/information-schema-referential-constraints.md) + - [`RESOURCE_GROUPS`](/information-schema/information-schema-resource-groups.md) + - [`SCHEMATA`](/information-schema/information-schema-schemata.md) + - [`SEQUENCES`](/information-schema/information-schema-sequences.md) + - [`SESSION_VARIABLES`](/information-schema/information-schema-session-variables.md) + - [`SLOW_QUERY`](/information-schema/information-schema-slow-query.md) + - [`STATISTICS`](/information-schema/information-schema-statistics.md) + - [`TABLES`](/information-schema/information-schema-tables.md) + - [`TABLE_CONSTRAINTS`](/information-schema/information-schema-table-constraints.md) + - [`TABLE_STORAGE_STATS`](/information-schema/information-schema-table-storage-stats.md) + - [`TIDB_HOT_REGIONS`](/information-schema/information-schema-tidb-hot-regions.md) + - [`TIDB_HOT_REGIONS_HISTORY`](/information-schema/information-schema-tidb-hot-regions-history.md) + - [`TIDB_INDEXES`](/information-schema/information-schema-tidb-indexes.md) + - [`TIDB_SERVERS_INFO`](/information-schema/information-schema-tidb-servers-info.md) + - [`TIDB_TRX`](/information-schema/information-schema-tidb-trx.md) + - [`TIFLASH_REPLICA`](/information-schema/information-schema-tiflash-replica.md) + - [`TIFLASH_SEGMENTS`](/information-schema/information-schema-tiflash-segments.md) + - [`TIFLASH_TABLES`](/information-schema/information-schema-tiflash-tables.md) + - [`TIKV_REGION_PEERS`](/information-schema/information-schema-tikv-region-peers.md) + - [`TIKV_REGION_STATUS`](/information-schema/information-schema-tikv-region-status.md) + - [`TIKV_STORE_STATUS`](/information-schema/information-schema-tikv-store-status.md) + - [`USER_ATTRIBUTES`](/information-schema/information-schema-user-attributes.md) + - [`USER_PRIVILEGES`](/information-schema/information-schema-user-privileges.md) + - [`VARIABLES_INFO`](/information-schema/information-schema-variables-info.md) + - [`VIEWS`](/information-schema/information-schema-views.md) + - [`METRICS_SCHEMA`](/metrics-schema.md) + - [元数据锁](/metadata-lock.md) + - UI + - TiDB Dashboard + - [简介](/dashboard/dashboard-intro.md) + - 运维 + - [部署](/dashboard/dashboard-ops-deploy.md) + - [反向代理](/dashboard/dashboard-ops-reverse-proxy.md) + - [用户管理](/dashboard/dashboard-user.md) + - [安全](/dashboard/dashboard-ops-security.md) + - [访问](/dashboard/dashboard-access.md) + - [概况页面](/dashboard/dashboard-overview.md) + - [集群信息页面](/dashboard/dashboard-cluster-info.md) + - [Top SQL 页面](/dashboard/top-sql.md) + - [流量可视化页面](/dashboard/dashboard-key-visualizer.md) + - [监控关系图](/dashboard/dashboard-metrics-relation.md) + - SQL 语句分析 + - [列表页面](/dashboard/dashboard-statement-list.md) + - [执行详情页面](/dashboard/dashboard-statement-details.md) + - [慢查询页面](/dashboard/dashboard-slow-query.md) + - 集群诊断页面 + - [访问](/dashboard/dashboard-diagnostics-access.md) + - [查看报告](/dashboard/dashboard-diagnostics-report.md) + - [使用示例](/dashboard/dashboard-diagnostics-usage.md) + - [监控指标页面](/dashboard/dashboard-monitoring.md) + - [日志搜索页面](/dashboard/dashboard-log-search.md) + - [资源管控页面](/dashboard/dashboard-resource-manager.md) + - 实例性能分析 + - [手动分析页面](/dashboard/dashboard-profiling.md) + - [持续分析页面](/dashboard/continuous-profiling.md) + - 会话管理与配置 + - [分享会话](/dashboard/dashboard-session-share.md) + - [配置 SSO 登录](/dashboard/dashboard-session-sso.md) + - [常见问题](/dashboard/dashboard-faq.md) + - [遥测](/telemetry.md) + - [错误码](/error-codes.md) + - [通过拓扑 label 进行副本调度](/schedule-replicas-by-topology-labels.md) + - 内部组件介绍 + - [TiDB 后端任务分布式并行执行框架](/tidb-distributed-execution-framework.md) +- 常见问题解答 (FAQ) + - [FAQ 汇总](/faq/faq-overview.md) + - [产品 FAQ](/faq/tidb-faq.md) + - [SQL FAQ](/faq/sql-faq.md) + - [安装部署 FAQ](/faq/deploy-and-maintain-faq.md) + - [迁移 FAQ](/faq/migration-tidb-faq.md) + - [升级 FAQ](/faq/upgrade-faq.md) + - [监控 FAQ](/faq/monitor-faq.md) + - [集群管理 FAQ](/faq/manage-cluster-faq.md) + - [高可用 FAQ](/faq/high-availability-faq.md) + - [高可靠 FAQ](/faq/high-reliability-faq.md) + - [备份恢复 FAQ](/faq/backup-and-restore-faq.md) +- 版本发布历史 + - [发布版本汇总](/releases/release-notes.md) + - [版本发布时间线](/releases/release-timeline.md) + - [TiDB 版本规则](/releases/versioning.md) + - [TiDB 离线包](/binary-package.md) + - v7.2 + - [7.2.0-DMR](/releases/release-7.2.0.md) + - v7.1 + - [7.1.0](/releases/release-7.1.0.md) + - v7.0 + - [7.0.0-DMR](/releases/release-7.0.0.md) + - v6.6 + - [6.6.0-DMR](/releases/release-6.6.0.md) + - v6.5 + - [6.5.3](/releases/release-6.5.3.md) + - [6.5.2](/releases/release-6.5.2.md) + - [6.5.1](/releases/release-6.5.1.md) + - [6.5.0](/releases/release-6.5.0.md) + - v6.4 + - [6.4.0-DMR](/releases/release-6.4.0.md) + - v6.3 + - [6.3.0-DMR](/releases/release-6.3.0.md) + - v6.2 + - [6.2.0-DMR](/releases/release-6.2.0.md) + - v6.1 + - [6.1.7](/releases/release-6.1.7.md) + - [6.1.6](/releases/release-6.1.6.md) + - [6.1.5](/releases/release-6.1.5.md) + - [6.1.4](/releases/release-6.1.4.md) + - [6.1.3](/releases/release-6.1.3.md) + - [6.1.2](/releases/release-6.1.2.md) + - [6.1.1](/releases/release-6.1.1.md) + - [6.1.0](/releases/release-6.1.0.md) + - v6.0 + - [6.0.0-DMR](/releases/release-6.0.0-dmr.md) + - v5.4 + - [5.4.3](/releases/release-5.4.3.md) + - [5.4.2](/releases/release-5.4.2.md) + - [5.4.1](/releases/release-5.4.1.md) + - [5.4.0](/releases/release-5.4.0.md) + - v5.3 + - [5.3.4](/releases/release-5.3.4.md) + - [5.3.3](/releases/release-5.3.3.md) + - [5.3.2](/releases/release-5.3.2.md) + - [5.3.1](/releases/release-5.3.1.md) + - [5.3.0](/releases/release-5.3.0.md) + - v5.2 + - [5.2.4](/releases/release-5.2.4.md) + - [5.2.3](/releases/release-5.2.3.md) + - [5.2.2](/releases/release-5.2.2.md) + - [5.2.1](/releases/release-5.2.1.md) + - [5.2.0](/releases/release-5.2.0.md) + - v5.1 + - [5.1.5](/releases/release-5.1.5.md) + - [5.1.4](/releases/release-5.1.4.md) + - [5.1.3](/releases/release-5.1.3.md) + - [5.1.2](/releases/release-5.1.2.md) + - [5.1.1](/releases/release-5.1.1.md) + - [5.1.0](/releases/release-5.1.0.md) + - v5.0 + - [5.0.6](/releases/release-5.0.6.md) + - [5.0.5](/releases/release-5.0.5.md) + - [5.0.4](/releases/release-5.0.4.md) + - [5.0.3](/releases/release-5.0.3.md) + - [5.0.2](/releases/release-5.0.2.md) + - [5.0.1](/releases/release-5.0.1.md) + - [5.0 GA](/releases/release-5.0.0.md) + - [5.0.0-rc](/releases/release-5.0.0-rc.md) + - v4.0 + - [4.0.16](/releases/release-4.0.16.md) + - [4.0.15](/releases/release-4.0.15.md) + - [4.0.14](/releases/release-4.0.14.md) + - [4.0.13](/releases/release-4.0.13.md) + - [4.0.12](/releases/release-4.0.12.md) + - [4.0.11](/releases/release-4.0.11.md) + - [4.0.10](/releases/release-4.0.10.md) + - [4.0.9](/releases/release-4.0.9.md) + - [4.0.8](/releases/release-4.0.8.md) + - [4.0.7](/releases/release-4.0.7.md) + - [4.0.6](/releases/release-4.0.6.md) + - [4.0.5](/releases/release-4.0.5.md) + - [4.0.4](/releases/release-4.0.4.md) + - [4.0.3](/releases/release-4.0.3.md) + - [4.0.2](/releases/release-4.0.2.md) + - [4.0.1](/releases/release-4.0.1.md) + - [4.0 GA](/releases/release-4.0-ga.md) + - [4.0.0-rc.2](/releases/release-4.0.0-rc.2.md) + - [4.0.0-rc.1](/releases/release-4.0.0-rc.1.md) + - [4.0.0-rc](/releases/release-4.0.0-rc.md) + - [4.0.0-beta.2](/releases/release-4.0.0-beta.2.md) + - [4.0.0-beta.1](/releases/release-4.0.0-beta.1.md) + - [4.0.0-beta](/releases/release-4.0.0-beta.md) + - v3.1 + - [3.1.2](/releases/release-3.1.2.md) + - [3.1.1](/releases/release-3.1.1.md) + - [3.1.0 GA](/releases/release-3.1.0-ga.md) + - [3.1.0-rc](/releases/release-3.1.0-rc.md) + - [3.1.0-beta.2](/releases/release-3.1.0-beta.2.md) + - [3.1.0-beta.1](/releases/release-3.1.0-beta.1.md) + - [3.1.0-beta](/releases/release-3.1.0-beta.md) + - v3.0 + - [3.0.20](/releases/release-3.0.20.md) + - [3.0.19](/releases/release-3.0.19.md) + - [3.0.18](/releases/release-3.0.18.md) + - [3.0.17](/releases/release-3.0.17.md) + - [3.0.16](/releases/release-3.0.16.md) + - [3.0.15](/releases/release-3.0.15.md) + - [3.0.14](/releases/release-3.0.14.md) + - [3.0.13](/releases/release-3.0.13.md) + - [3.0.12](/releases/release-3.0.12.md) + - [3.0.11](/releases/release-3.0.11.md) + - [3.0.10](/releases/release-3.0.10.md) + - [3.0.9](/releases/release-3.0.9.md) + - [3.0.8](/releases/release-3.0.8.md) + - [3.0.7](/releases/release-3.0.7.md) + - [3.0.6](/releases/release-3.0.6.md) + - [3.0.5](/releases/release-3.0.5.md) + - [3.0.4](/releases/release-3.0.4.md) + - [3.0.3](/releases/release-3.0.3.md) + - [3.0.2](/releases/release-3.0.2.md) + - [3.0.1](/releases/release-3.0.1.md) + - [3.0 GA](/releases/release-3.0-ga.md) + - [3.0.0-rc.3](/releases/release-3.0.0-rc.3.md) + - [3.0.0-rc.2](/releases/release-3.0.0-rc.2.md) + - [3.0.0-rc.1](/releases/release-3.0.0-rc.1.md) + - [3.0.0-beta.1](/releases/release-3.0.0-beta.1.md) + - [3.0.0-beta](/releases/release-3.0-beta.md) + - v2.1 + - [2.1.19](/releases/release-2.1.19.md) + - [2.1.18](/releases/release-2.1.18.md) + - [2.1.17](/releases/release-2.1.17.md) + - [2.1.16](/releases/release-2.1.16.md) + - [2.1.15](/releases/release-2.1.15.md) + - [2.1.14](/releases/release-2.1.14.md) + - [2.1.13](/releases/release-2.1.13.md) + - [2.1.12](/releases/release-2.1.12.md) + - [2.1.11](/releases/release-2.1.11.md) + - [2.1.10](/releases/release-2.1.10.md) + - [2.1.9](/releases/release-2.1.9.md) + - [2.1.8](/releases/release-2.1.8.md) + - [2.1.7](/releases/release-2.1.7.md) + - [2.1.6](/releases/release-2.1.6.md) + - [2.1.5](/releases/release-2.1.5.md) + - [2.1.4](/releases/release-2.1.4.md) + - [2.1.3](/releases/release-2.1.3.md) + - [2.1.2](/releases/release-2.1.2.md) + - [2.1.1](/releases/release-2.1.1.md) + - [2.1 GA](/releases/release-2.1-ga.md) + - [2.1 RC5](/releases/release-2.1-rc.5.md) + - [2.1 RC4](/releases/release-2.1-rc.4.md) + - [2.1 RC3](/releases/release-2.1-rc.3.md) + - [2.1 RC2](/releases/release-2.1-rc.2.md) + - [2.1 RC1](/releases/release-2.1-rc.1.md) + - [2.1 Beta](/releases/release-2.1-beta.md) + - v2.0 + - [2.0.11](/releases/release-2.0.11.md) + - [2.0.10](/releases/release-2.0.10.md) + - [2.0.9](/releases/release-2.0.9.md) + - [2.0.8](/releases/release-2.0.8.md) + - [2.0.7](/releases/release-2.0.7.md) + - [2.0.6](/releases/release-2.0.6.md) + - [2.0.5](/releases/release-2.0.5.md) + - [2.0.4](/releases/release-2.0.4.md) + - [2.0.3](/releases/release-2.0.3.md) + - [2.0.2](/releases/release-2.0.2.md) + - [2.0.1](/releases/release-2.0.1.md) + - [2.0](/releases/release-2.0-ga.md) + - [2.0 RC5](/releases/release-2.0-rc.5.md) + - [2.0 RC4](/releases/release-2.0-rc.4.md) + - [2.0 RC3](/releases/release-2.0-rc.3.md) + - [2.0 RC1](/releases/release-2.0-rc.1.md) + - [1.1 Beta](/releases/release-1.1-beta.md) + - [1.1 Alpha](/releases/release-1.1-alpha.md) + - v1.0 + - [1.0](/releases/release-1.0-ga.md) + - [Pre-GA](/releases/release-pre-ga.md) + - [RC4](/releases/release-rc.4.md) + - [RC3](/releases/release-rc.3.md) + - [RC2](/releases/release-rc.2.md) + - [RC1](/releases/release-rc.1.md) +- [术语表](/glossary.md) diff --git a/test/sync_pr_docs_cn/data/markdown-pages/zh/tidb/master/develop/dev-guide-choose-driver-or-orm.md b/test/sync_pr_docs_cn/data/markdown-pages/zh/tidb/master/develop/dev-guide-choose-driver-or-orm.md new file mode 100644 index 00000000..369cec61 --- /dev/null +++ b/test/sync_pr_docs_cn/data/markdown-pages/zh/tidb/master/develop/dev-guide-choose-driver-or-orm.md @@ -0,0 +1,303 @@ +--- +title: 选择驱动或 ORM 框架 +summary: 选择驱动或 ORM 框架连接 TiDB。 +aliases: ['/zh/tidb/dev/choose-driver-or-orm'] +--- + +# 选择驱动或 ORM 框架 + +> **注意:** +> +> TiDB 支持等级说明: +> +> - **Full**:表明 TiDB 已经兼容该工具的绝大多数功能,并且在该工具的新版本中对其保持兼容。PingCAP 将定期地对 [TiDB 支持的第三方工具](/develop/dev-guide-third-party-support.md)中的新版本进行兼容性测试。 +> - **Compatible**:表明由于该工具已适配 MySQL,而 TiDB 高度兼容 MySQL 协议,因此 TiDB 可以兼容该工具的大部分功能。但 PingCAP 并未对该工具作出完整的兼容性验证,有可能出现一些意外的行为。 +> +> 关于更多 TiDB 支持的第三方工具,你可以查看 [TiDB 支持的第三方工具](/develop/dev-guide-third-party-support.md)。 + +TiDB 兼容 MySQL 的协议,但存在部分与 MySQL 不兼容或有差异的特性,具体信息可查看[与 MySQL 兼容性对比](/mysql-compatibility.md)。 + +## Java + +本节介绍 Java 语言的 Driver 及 ORM 的使用方式。 + +### Java Drivers + + +
+ +支持等级:**Full** + +按照 [MySQL 文档](https://dev.mysql.com/doc/connector-j/8.0/en/)中的说明下载并配置 Java JDBC 驱动程序即可使用。对于 TiDB v6.3.0 及以上版本,建议使用 MySQL Connector/J 8.0.33 及以上版本。 + +> **建议:** +> +> 在 8.0.32 之前的 MySQL Connector/J 8.0 版本中存在一个 [bug](https://bugs.mysql.com/bug.php?id=106252),当与 TiDB v6.3.0 之前的版本一起使用时,可能会导致线程卡死。为了避免此问题,建议使用 MySQL Connector/J 8.0.32 或更高版本,或者使用 TiDB JDBC(见 *TiDB-JDBC* 标签)。 + +有关一个完整的实例应用程序,可参阅 [TiDB 和 JDBC 的简单 CRUD 应用程序](/develop/dev-guide-sample-application-java-jdbc.md)。 + +
+
+ +支持等级:**Full** + +[TiDB-JDBC](https://github.com/pingcap/mysql-connector-j) 是基于 MySQL 8.0.29 的定制版本。TiDB-JDBC 基于 MySQL 官方 8.0.29 版本编译,修复了原 JDBC 在 prepare 模式下多参数、多字段 EOF 的错误,并新增 TiCDC snapshot 自动维护和 SM3 认证插件等功能。 + +基于 SM3 的认证仅在 TiDB 版本的 MySQL Connector/J 中支持。 + +如果你使用的是 **Maven**,请将以下内容添加到你的 ``: + +```xml + + io.github.lastincisor + mysql-connector-java + 8.0.29-tidb-1.0.0 + +``` + +如果你需要使用 SM3 认证,请将以下内容添加到你的 ``: + +```xml + + io.github.lastincisor + mysql-connector-java + 8.0.29-tidb-1.0.0 + + + org.bouncycastle + bcprov-jdk15on + 1.67 + + + org.bouncycastle + bcpkix-jdk15on + 1.67 + +``` + +如果你使用的是 Gradle,请将以下内容添加到你的 `dependencies`: + +```gradle +implementation group: 'io.github.lastincisor', name: 'mysql-connector-java', version: '8.0.29-tidb-1.0.0' +implementation group: 'org.bouncycastle', name: 'bcprov-jdk15on', version: '1.67' +implementation group: 'org.bouncycastle', name: 'bcpkix-jdk15on', version: '1.67' +``` + +
+
+ +### Java ORM 框架 + +> **注意:** +> +> - Hibernate 当前[不支持嵌套事务](https://stackoverflow.com/questions/37927208/nested-transaction-in-spring-app-with-jpa-postgres)。 +> - TiDB 从 v6.2.0 版本开始支持 [Savepoint](/sql-statements/sql-statement-savepoint.md)。如需在 `@Transactional` 中使用 `Propagation.NESTED` 事务传播选项,即 `@Transactional(propagation = Propagation.NESTED)`,请确认你的 TiDB 版本为 v6.2.0 或以上。 + + +
+ +支持等级:**Full** + +你可以使用 [Gradle](https://gradle.org/install) 或 [Maven](https://maven.apache.org/install.html) 获取你的应用程序的所有依赖项,且会帮你下载依赖项的间接依赖,而无需你手动管理复杂的依赖关系。注意,只有 Hibernate `6.0.0.Beta2` 及以上版本才支持 TiDB 方言。 + +如果你使用的是 **Maven**,请将以下内容添加到你的 ``: + +```xml + + org.hibernate.orm + hibernate-core + 6.0.0.CR2 + + + + mysql + mysql-connector-java + 5.1.49 + +``` + +如果你使用的是 `Gradle`,请将以下内容添加到你的 `dependencies`: + +```gradle +implementation 'org.hibernate:hibernate-core:6.0.0.CR2' +implementation 'mysql:mysql-connector-java:5.1.49' +``` + +- 有关原生 Java 使用 Hibernate 进行 TiDB 应用程序构建的例子,可参阅 [TiDB 和 Hibernate 的简单 CRUD 应用程序](/develop/dev-guide-sample-application-java-hibernate.md)。 +- 有关 Spring 使用 Spring Data JPA、Hibernate 进行 TiDB 应用程序构建的例子,可参阅[使用 Spring Boot 构建 TiDB 应用程序](/develop/dev-guide-sample-application-java-spring-boot.md)。 + +额外的,你需要在 [Hibernate 配置文件](https://www.tutorialspoint.com/hibernate/hibernate_configuration.htm)中指定 TiDB 方言 `org.hibernate.dialect.TiDBDialect`,此方言在 Hibernate `6.0.0.Beta2` 以上才可支持。若你无法升级 Hibernate 版本,那么请你直接使用 MySQL 5.7 的方言 `org.hibernate.dialect.MySQL57Dialect`。但这可能造成不可预料的使用结果,及部分 TiDB 特有特性的缺失,如:[序列](/sql-statements/sql-statement-create-sequence.md)等。 + +
+ +
+ +支持等级:**Full** + +你可以使用 [Gradle](https://gradle.org/install) 或 [Maven](https://maven.apache.org/install.html) 获取应用程序的所有依赖项包括间接依赖,无需手动管理复杂的依赖关系。 + +如果你使用的是 Maven,请将以下内容添加到你的 ``: + +```xml + + org.mybatis + mybatis + 3.5.9 + + + + mysql + mysql-connector-java + 5.1.49 + +``` + +如果你使用的是 Gradle,请将以下内容添加到你的 `dependencies`: + +```gradle +implementation 'org.mybatis:mybatis:3.5.9' +implementation 'mysql:mysql-connector-java:5.1.49' +``` + +使用 MyBatis 进行 TiDB 应用程序构建的例子,可参阅 [TiDB 和 MyBatis 的简单 CRUD 应用程序](/develop/dev-guide-sample-application-java-mybatis.md)。 + +
+ +
+ +### Java 客户端负载均衡 + +**tidb-loadbalance** + +支持等级:**Full** + +[tidb-loadbalance](https://github.com/pingcap/tidb-loadbalance) 是应用端的负载均衡组件。通过 tidb-loadbalance,你可以实现自动维护 TiDB server 的节点信息,根据节点信息使用 tidb-loadbalance 策略在客户端分发 JDBC 连接。客户端应用与 TiDB server 之间使用 JDBC 直连,性能高于使用负载均衡组件。 + +目前 tidb-loadbalance 已实现轮询、随机、权重等负载均衡策略。 + +> **注意:** +> +> tidb-loadbalance 需配合 mysql-connector-j 一起使用。 + +如果你使用的是 **Maven**,请将以下内容添加到你的 ``: + +```xml + + io.github.lastincisor + mysql-connector-java + 8.0.29-tidb-1.0.0 + + + io.github.lastincisor + tidb-loadbalance + 0.0.5 + +``` + +如果你使用的是 Gradle,请将以下内容添加到你的 `dependencies`: + +```gradle +implementation group: 'io.github.lastincisor', name: 'mysql-connector-java', version: '8.0.29-tidb-1.0.0' +implementation group: 'io.github.lastincisor', name: 'tidb-loadbalance', version: '0.0.5' +``` + +## Golang + +本节介绍 Golang 语言的 Driver 及 ORM 的使用方式。 + +### Golang Drivers + +**go-sql-driver/mysql** + +支持等级:**Full** + +按照 [go-sql-driver/mysql 文档](https://github.com/go-sql-driver/mysql)中的说明获取并配置 Golang 驱动程序即可使用。 + +有关一个完整的实例应用程序,可参阅使用 [TiDB 和 Go-MySQL-Driver 的简单 CRUD 应用程序](/develop/dev-guide-sample-application-golang-sql-driver.md)。 + +### Golang ORM 框架 + +**GORM** + +支持等级:**Full** + +GORM 是一个流行的 Golang 的 ORM 框架,你可以使用 `go get` 获取你的应用程序的所有依赖项。 + +```shell +go get -u gorm.io/gorm +go get -u gorm.io/driver/mysql +``` + +使用 GORM 进行 TiDB 应用程序构建的例子,可参阅 [TiDB 和 GORM 的简单 CRUD 应用程序](/develop/dev-guide-sample-application-golang-gorm.md)。 + +## Python + +本节介绍 Python 语言的 Driver 及 ORM 的使用方式。 + +### Python Drivers + + +
+ +支持等级:**Compatible** + +按照 [PyMySQL 文档](https://pypi.org/project/PyMySQL/)中的说明下载并配置驱动程序即可使用。建议使用 **1.0.2** 及以上版本。 + +使用 PyMySQL 构建 TiDB 应用程序的例子,可参阅 [TiDB 和 PyMySQL 的简单 CRUD 应用程序](/develop/dev-guide-sample-application-python-pymysql.md)。 + +
+
+ +支持等级:**Compatible** + +按照 [mysqlclient 文档](https://pypi.org/project/mysqlclient/)中的说明下载并配置驱动程序即可使用。建议使用 **2.1.1** 及以上版本。 + +使用 mysqlclient 构建 TiDB 应用程序的例子,可参阅 [TiDB 和 mysqlclient 的简单 CRUD 应用程序](/develop/dev-guide-sample-application-python-mysqlclient.md)。 + +
+
+ +支持等级:**Compatible** + +按照 [MySQL Connector/Python 文档](https://dev.mysql.com/doc/connector-python/en/connector-python-installation-binary.html)中的说明下载并配置驱动程序即可使用。建议使用 **8.0.31** 及以上版本。 + +使用 MySQL Connector/Python 构建 TiDB 应用程序的例子,可参阅 [TiDB 和 MySQL Connector/Python 的简单 CRUD 应用程序](/develop/dev-guide-sample-application-python-mysql-connector.md)。 + +
+
+ +### Python ORM 框架 + + + +
+ +支持等级:**Full** + +[Django](https://docs.djangoproject.com/) 是一个流行的 Python 的开发框架,你可以使用 `pip install Django==3.2.16 django-tidb>=3.0.0` 获取你的应用程序的所有依赖项。建议使用 Django **3.2.16** 及以上版本。 + +使用 Django 构建 TiDB 应用程序的例子,可参阅[使用 Django 构建 TiDB 应用程序](/develop/dev-guide-sample-application-python-django.md)。 + +
+ +
+ +支持等级:**Full** + +[SQLAlchemy](https://www.sqlalchemy.org/) 是一个流行的 Python 的 ORM 框架,你可以使用 `pip install SQLAlchemy==1.4.44` 获取你的应用程序的所有依赖项。建议使用 **1.4.44** 及以上版本。 + +使用 SQLAlchemy 构建 TiDB 应用程序的例子,可参阅 [TiDB 和 SQLAlchemy 的简单 CRUD 应用程序](/develop/dev-guide-sample-application-python-sqlalchemy.md)。 + +
+ +
+ +支持等级:**Compatible** + +[peewee](http://docs.peewee-orm.com/en/latest/) 是一个流行的 Python 的 ORM 框架,你可以使用 `pip install peewee==3.15.4` 获取你的应用程序的所有依赖项。建议使用 **3.15.4** 及以上版本。 + +使用 peewee 构建 TiDB 应用程序的例子,可参阅 [TiDB 和 peewee 的简单 CRUD 应用程序](/develop/dev-guide-sample-application-python-peewee.md)。 + +
+ +
diff --git a/test/sync_pr_docs_cn/data/markdown-pages/zh/tidb/master/develop/dev-guide-insert-data.md b/test/sync_pr_docs_cn/data/markdown-pages/zh/tidb/master/develop/dev-guide-insert-data.md new file mode 100644 index 00000000..aef7b5a7 --- /dev/null +++ b/test/sync_pr_docs_cn/data/markdown-pages/zh/tidb/master/develop/dev-guide-insert-data.md @@ -0,0 +1,293 @@ +--- +title: 插入数据 +summary: 插入数据、批量导入数据的方法、最佳实践及例子。 +aliases: ['/zh/tidb/dev/insert-data'] +--- + + + +# 插入数据 + +此页面将展示使用 SQL 语言,配合各种编程语言将数据插入到 TiDB 中。 + +## 在开始之前 + +在阅读本页面之前,你需要准备以下事项: + +- [使用 TiDB Serverless 构建 TiDB 集群](/develop/dev-guide-build-cluster-in-cloud.md)。 +- 阅读[数据库模式概览](/develop/dev-guide-schema-design-overview.md),并[创建数据库](/develop/dev-guide-create-database.md)、[创建表](/develop/dev-guide-create-table.md)、[创建二级索引](/develop/dev-guide-create-secondary-indexes.md)。 + +## 插入行 + +假设你需要插入多行数据,那么会有两种插入的办法,假设需要插入 3 个玩家数据: + +- 一个**多行插入语句**: + + {{< copyable "sql" >}} + + ```sql + INSERT INTO `player` (`id`, `coins`, `goods`) VALUES (1, 1000, 1), (2, 230, 2), (3, 300, 5); + ``` + +- 多个**单行插入语句**: + + {{< copyable "sql" >}} + + ```sql + INSERT INTO `player` (`id`, `coins`, `goods`) VALUES (1, 1000, 1); + INSERT INTO `player` (`id`, `coins`, `goods`) VALUES (2, 230, 2); + INSERT INTO `player` (`id`, `coins`, `goods`) VALUES (3, 300, 5); + ``` + +一般来说使用一个`多行插入语句`,会比多个`单行插入语句`快。 + + +
+ +在 SQL 中插入多行数据的示例: + +```sql +CREATE TABLE `player` (`id` INT, `coins` INT, `goods` INT); +INSERT INTO `player` (`id`, `coins`, `goods`) VALUES (1, 1000, 1), (2, 230, 2); +``` + +有关如何使用此 SQL,可查阅[连接到 TiDB 集群](/develop/dev-guide-build-cluster-in-cloud.md#第-2-步连接到集群)文档部分,按文档步骤使用客户端连接到 TiDB 集群后,输入 SQL 语句即可。 + +
+ +
+ +在 Java 中插入多行数据的示例: + +```java +// ds is an entity of com.mysql.cj.jdbc.MysqlDataSource +try (Connection connection = ds.getConnection()) { + connection.setAutoCommit(false); + + PreparedStatement pstmt = connection.prepareStatement("INSERT INTO player (id, coins, goods) VALUES (?, ?, ?)")) + + // first player + pstmt.setInt(1, 1); + pstmt.setInt(2, 1000); + pstmt.setInt(3, 1); + pstmt.addBatch(); + + // second player + pstmt.setInt(1, 2); + pstmt.setInt(2, 230); + pstmt.setInt(3, 2); + pstmt.addBatch(); + + pstmt.executeBatch(); + connection.commit(); +} catch (SQLException e) { + e.printStackTrace(); +} +``` + +另外,由于 MySQL JDBC Driver 默认设置问题,你需更改部分参数,以获得更好的批量插入性能: + +| 参数 | 作用 | 推荐场景 | 推荐配置 | +| :------------------------: | :-----------------------------------: | :-----------------------------------------------------------------------------------------------------------------------------------------------: | :----------------------: | +| `useServerPrepStmts` | 是否使用服务端开启预处理语句支持 | 在需要多次使用预处理语句时 | `true` | +| `cachePrepStmts` | 客户端是否缓存预处理语句 | `useServerPrepStmts=true` 时 | `true` | +| `prepStmtCacheSqlLimit` | 预处理语句最大大小(默认 256 字符) | 预处理语句大于 256 字符时 | 按实际预处理语句大小配置 | +| `prepStmtCacheSize` | 预处理语句最大缓存数量 (默认 25 条) | 预处理语句数量大于 25 条时 | 按实际预处理语句数量配置 | +| `rewriteBatchedStatements` | 是否重写 Batch 语句 | 需要批量操作时 | `true` | +| `allowMultiQueries` | 开启批量操作 | 因为一个[客户端 Bug](https://bugs.mysql.com/bug.php?id=96623) 在 `rewriteBatchedStatements = true` 和 `useServerPrepStmts = true` 时,需设置此项 | `true` | + +MySQL JDBC Driver 还提供了一个集成配置项:`useConfigs`。当它配置为 `maxPerformance` 时,相当于配置了一组配置,以 `mysql:mysql-connector-java:8.0.28` 为例,`useConfigs=maxPerformance` 包含: + +```properties +cachePrepStmts=true +cacheCallableStmts=true +cacheServerConfiguration=true +useLocalSessionState=true +elideSetAutoCommits=true +alwaysSendSetIsolation=false +enableQueryTimeouts=false +connectionAttributes=none +useInformationSchema=true +``` + +你可以自行查看 `mysql-connector-java-{version}.jar!/com/mysql/cj/configurations/maxPerformance.properties` 来获得对应版本 MySQL JDBC Driver 的 `useConfigs=maxPerformance` 包含配置。 + +在此处给出一个较为的通用场景的 JDBC 连接字符串配置,以 Host: `127.0.0.1`,Port: `4000`,用户名: `root`,密码: 空,默认数据库: `test`为例: + +``` +jdbc:mysql://127.0.0.1:4000/test?user=root&useConfigs=maxPerformance&useServerPrepStmts=true&prepStmtCacheSqlLimit=2048&prepStmtCacheSize=256&rewriteBatchedStatements=true&allowMultiQueries=true +``` + +有关 Java 的完整示例,可参阅: + +- [TiDB 和 JDBC 的简单 CRUD 应用程序](/develop/dev-guide-sample-application-java-jdbc.md) +- [TiDB 和 Hibernate 的简单 CRUD 应用程序](/develop/dev-guide-sample-application-java-hibernate.md) +- [使用 Spring Boot 构建 TiDB 应用程序](/develop/dev-guide-sample-application-java-spring-boot.md) + +
+ +
+ +在 Golang 中插入多行数据的示例: + +```go +package main + +import ( + "database/sql" + "strings" + + _ "github.com/go-sql-driver/mysql" +) + +type Player struct { + ID string + Coins int + Goods int +} + +func bulkInsertPlayers(db *sql.DB, players []Player, batchSize int) error { + tx, err := db.Begin() + if err != nil { + return err + } + + stmt, err := tx.Prepare(buildBulkInsertSQL(batchSize)) + if err != nil { + return err + } + + defer stmt.Close() + + for len(players) > batchSize { + if _, err := stmt.Exec(playerToArgs(players[:batchSize])...); err != nil { + tx.Rollback() + return err + } + + players = players[batchSize:] + } + + if len(players) != 0 { + if _, err := tx.Exec(buildBulkInsertSQL(len(players)), playerToArgs(players)...); err != nil { + tx.Rollback() + return err + } + } + + if err := tx.Commit(); err != nil { + tx.Rollback() + return err + } + + return nil +} + +func playerToArgs(players []Player) []interface{} { + var args []interface{} + for _, player := range players { + args = append(args, player.ID, player.Coins, player.Goods) + } + return args +} + +func buildBulkInsertSQL(amount int) string { + return "INSERT INTO player (id, coins, goods) VALUES (?, ?, ?)" + strings.Repeat(",(?,?,?)", amount-1) +} +``` + +有关 Golang 的完整示例,可参阅: + +- [TiDB 和 Go-MySQL-Driver 的简单 CRUD 应用程序](/develop/dev-guide-sample-application-golang-sql-driver.md) +- [TiDB 和 GORM 的简单 CRUD 应用程序](/develop/dev-guide-sample-application-golang-gorm.md) + +
+ +
+ +在 Python 中插入多行数据的示例: + +```python +import MySQLdb + +connection = MySQLdb.connect( + host="127.0.0.1", + port=4000, + user="root", + password="", + database="bookshop", + autocommit=True +) +with get_connection(autocommit=True) as connection: + + with connection.cursor() as cur: + player_list = random_player(1919) + for idx in range(0, len(player_list), 114): + cur.executemany("INSERT INTO player (id, coins, goods) VALUES (%s, %s, %s)", player_list[idx:idx + 114]) +``` + +有关 Python 的完整示例,可参阅: + +- [TiDB 和 PyMySQL 的简单 CRUD 应用程序](/develop/dev-guide-sample-application-python-pymysql.md) +- [TiDB 和 mysqlclient 的简单 CRUD 应用程序](/develop/dev-guide-sample-application-python-mysqlclient.md) +- [TiDB 和 MySQL Connector/Python 的简单 CRUD 应用程序](/develop/dev-guide-sample-application-python-mysql-connector.md) +- [TiDB 和 SQLAlchemy 的简单 CRUD 应用程序](/develop/dev-guide-sample-application-python-sqlalchemy.md) +- [TiDB 和 peewee 的简单 CRUD 应用程序](/develop/dev-guide-sample-application-python-peewee.md) + +
+ +
+ +## 批量插入 + +如果你需要快速地将大量数据导入 TiDB 集群,最好的方式并不是使用 `INSERT` 语句,这并不是最高效的方法,而且需要你自行处理异常等问题。推荐使用 PingCAP 提供的一系列工具进行数据迁移: + +- 数据导出工具:[Dumpling](/dumpling-overview.md)。可以导出 MySQL 或 TiDB 的数据到本地或 Amazon S3 中。 +- 数据导入工具:[TiDB Lightning](/tidb-lightning/tidb-lightning-overview.md)。可以导入 `Dumpling` 导出的数据、CSV 文件,或者 [Amazon Aurora 生成的 Apache Parquet 文件](/migrate-aurora-to-tidb.md)。同时支持在本地盘或 Amazon S3 云盘读取数据。 +- 数据同步工具:[TiDB Data Migration](/dm/dm-overview.md)。可同步 MySQL、MariaDB、Amazon Aurora 数据库到 TiDB 中。且支持分库分表数据库的迁移。 +- 数据备份恢复工具:[Backup & Restore (BR)](/br/backup-and-restore-overview.md)。相对于 `Dumpling`,BR 更适合**_大数据量_**的场景。 + +## 避免热点 + +在设计表时需要考虑是否存在大量插入行为,若有,需在表设计期间对热点进行规避。请查看[创建表 - 选择主键](/develop/dev-guide-create-table.md#选择主键)部分,并遵从[选择主键时应遵守的规则](/develop/dev-guide-create-table.md#选择主键时应遵守的规则)。 + +更多有关热点问题的处理办法,请参考 [TiDB 热点问题处理](/troubleshoot-hot-spot-issues.md)文档。 + +## 主键为 `AUTO_RANDOM` 表插入数据 + +在插入的表主键为 `AUTO_RANDOM` 时,这时默认情况下,不能指定主键。例如 [bookshop](/develop/dev-guide-bookshop-schema-design.md) 数据库中,可以看到 [users 表](/develop/dev-guide-bookshop-schema-design.md#users-表) 的 `id` 字段含有 `AUTO_RANDOM` 属性。 + +此时,不可使用类似以下 SQL 进行插入: + +```sql +INSERT INTO `bookshop`.`users` (`id`, `balance`, `nickname`) VALUES (1, 0.00, 'nicky'); +``` + +将会产生错误: + +``` +ERROR 8216 (HY000): Invalid auto random: Explicit insertion on auto_random column is disabled. Try to set @@allow_auto_random_explicit_insert = true. +``` + +这是旨在提示你,不建议在插入时手动指定 `AUTO_RANDOM` 的列。这时,你有两种解决办法处理此错误: + +- (推荐) 插入语句中去除此列,使用 TiDB 帮你初始化的 `AUTO_RANDOM` 值。这样符合 `AUTO_RANDOM` 的语义。 + + {{< copyable "sql" >}} + + ```sql + INSERT INTO `bookshop`.`users` (`balance`, `nickname`) VALUES (0.00, 'nicky'); + ``` + +- 如果你确认一定需要指定此列,那么可以使用 [SET 语句](/sql-statements/sql-statement-set-variable.md)通过更改用户变量的方式,允许在插入时,指定 `AUTO_RANDOM` 的列。 + + {{< copyable "sql" >}} + + ```sql + SET @@allow_auto_random_explicit_insert = true; + INSERT INTO `bookshop`.`users` (`id`, `balance`, `nickname`) VALUES (1, 0.00, 'nicky'); + ``` + +## 使用 HTAP + +在 TiDB 中,使用 HTAP 能力无需你在插入数据时进行额外操作。不会有任何额外的插入逻辑,由 TiDB 自动进行数据的一致性保证。你只需要在创建表后,[开启列存副本同步](/develop/dev-guide-create-table.md#使用-htap-能力),就可以直接使用列存副本来加速你的查询。 diff --git a/test/sync_pr_docs_cn/data/markdown-pages/zh/tidb/master/develop/dev-guide-overview.md b/test/sync_pr_docs_cn/data/markdown-pages/zh/tidb/master/develop/dev-guide-overview.md new file mode 100644 index 00000000..ae76dfec --- /dev/null +++ b/test/sync_pr_docs_cn/data/markdown-pages/zh/tidb/master/develop/dev-guide-overview.md @@ -0,0 +1,49 @@ +--- +title: 开发者手册概览 +summary: 整体叙述了开发者手册,罗列了开发者手册的大致脉络。 +aliases: ['/zh/tidb/dev/developer-guide-overview'] +--- + +# 开发者手册概览 + +本文是为应用程序开发者所编写的,如果你对 TiDB 的内部原理感兴趣,或希望参与到 TiDB 的开发中来,那么可前往阅读 [TiDB Kernel Development Guide](https://pingcap.github.io/tidb-dev-guide/) 来获取更多 TiDB 的相关信息。 + +本手册将展示如何使用 TiDB 来快速构建一个应用,并且阐述使用 TiDB 期间可能出现的场景以及可能会遇到的问题。因此,在阅读此页面之前,建议你先行阅读 [TiDB 数据库快速上手指南](/quick-start-with-tidb.md)。 + +此外,你还可以通过视频的形式学习免费的 [TiDB SQL 开发在线课程](https://pingcap.com/zh/courses-catalog/back-end-developer/?utm_source=docs-cn-dev-guide)。 + +## TiDB 基础 + +在你开始使用 TiDB 之前,你需要了解一些关于 TiDB 数据库的一些重要工作机制: + +- 阅读 [TiDB 事务概览](/transaction-overview.md)来了解 TiDB 的事务运作方式或查看[为应用开发程序员准备的事务说明](/develop/dev-guide-transaction-overview.md)查看应用开发程序员需要了解的事务部分。 +- 学习免费在线课程 [TiDB 架构与特点](https://learn.pingcap.com/learner/course/600003/?utm_source=docs-cn-dev-guide),了解构建 TiDB 分布式数据库集群的核心组件及其概念。 +- 了解[应用程序与 TiDB 交互的方式](#应用程序与-tidb-交互的方式)。 + +## TiDB 事务机制 + +TiDB 支持分布式事务,而且提供[乐观事务](/optimistic-transaction.md)与[悲观事务](/pessimistic-transaction.md)两种事务模式。TiDB 当前版本中默认采用 **悲观事务** 模式,这让你在 TiDB 事务时可以像使用传统的单体数据库 (如: MySQL) 事务一样。 + +你可以使用 [BEGIN](/sql-statements/sql-statement-begin.md) 开启一个事务,或者使用 `BEGIN PESSIMISTIC` 显式的指定开启一个**悲观事务**,使用 `BEGIN OPTIMISTIC` 显式的指定开启一个**乐观事务**。随后,使用 [COMMIT](/sql-statements/sql-statement-commit.md) 提交事务,或使用 [ROLLBACK](/sql-statements/sql-statement-rollback.md) 回滚事务。 + +TiDB 会为你保证 `BEGIN` 开始到 `COMMIT` 或 `ROLLBACK` 结束间的所有语句的原子性,即在这期间的所有语句全部成功,或者全部失败。用以保证你在应用开发时所需的数据一致性。 + +若你不清楚**乐观事务**是什么,请暂时不要使用它。因为使用**乐观事务**的前提是需要应用程序可以正确的处理 `COMMIT` 语句所返回的[所有错误](/error-codes.md)。如果不确定应用程序如何处理,请直接使用**悲观事务**。 + +## 应用程序与 TiDB 交互的方式 + +TiDB 高度兼容 MySQL 协议,TiDB 支持[大多数 MySQL 的语法及特性](/mysql-compatibility.md),因此大部分的 MySQL 的连接库都与 TiDB 兼容。如果你的应用程序框架或语言无 PingCAP 的官方适配,那么建议你使用 MySQL 的客户端库。同时,也有越来越多的三方数据库主动支持 TiDB 的差异特性。 + +因为 TiDB 兼容 MySQL 协议,且兼容 MySQL 语法,因此大多数支持 MySQL 的 ORM 也兼容 TiDB。 + +## 扩展阅读 + +- [快速开始](/develop/dev-guide-build-cluster-in-cloud.md) +- [选择驱动或 ORM 框架](/develop/dev-guide-choose-driver-or-orm.md) +- [连接到 TiDB](/develop/dev-guide-connect-to-tidb.md) +- [数据库模式设计](/develop/dev-guide-schema-design-overview.md) +- [数据写入](/develop/dev-guide-insert-data.md) +- [数据读取](/develop/dev-guide-get-data-from-single-table.md) +- [事务](/develop/dev-guide-transaction-overview.md) +- [优化 SQL 性能](/develop/dev-guide-optimize-sql-overview.md) +- [示例程序](/develop/dev-guide-sample-application-java-spring-boot.md) diff --git a/test/sync_pr_docs_cn/data/markdown-pages/zh/tidb/master/develop/dev-guide-playground-gitpod.md b/test/sync_pr_docs_cn/data/markdown-pages/zh/tidb/master/develop/dev-guide-playground-gitpod.md new file mode 100644 index 00000000..531143c2 --- /dev/null +++ b/test/sync_pr_docs_cn/data/markdown-pages/zh/tidb/master/develop/dev-guide-playground-gitpod.md @@ -0,0 +1,169 @@ +--- +title: Gitpod +--- + + + +# Gitpod + +使用 [Gitpod](https://www.gitpod.io/),只需单击一个按钮或链接即可在浏览器中获得完整的开发环境,并且可以立即编写代码。 + +Gitpod 是一个开源 Kubernetes 应用程序(GitHub 仓库地址 ),适用于可直接编写代码的开发环境,可为云中的每个任务提供全新的自动化开发环境,非常迅速。此外,Gitpod 能够将你的开发环境描述为代码,并直接从你的浏览器或桌面 IDE 启动即时、远程和基于云的开发环境。 + +## 快速开始 + +1. Fork 出 TiDB 应用开发的示例代码仓库 [pingcap-inc/tidb-example-java](https://github.com/pingcap-inc/tidb-example-java)。 + +2. 通过浏览器的地址栏,在示例代码仓库的 URL 前加上 `https://gitpod.io/#` 来启动你的 gitpod 工作区。 + + - 例如,`https://gitpod.io/#https://github.com/pingcap-inc/tidb-example-java`。 + + - 支持在 URL 中配置环境变量。例如,`https://gitpod.io/#targetFile=spring-jpa-hibernate_Makefile,targetMode=spring-jpa-hibernate/https://github.com/pingcap-inc/tidb-example-java`。 + +3. 使用列出的提供商之一登录并启动工作区,例如,`Github`。 + +## 使用默认的 Gitpod 配置和环境 + +完成[快速开始](#快速开始) 的步骤之后,Gitpod 会需要一段时间来设置你的工作区。 + +以 [Spring Boot Web](/develop/dev-guide-sample-application-java-spring-boot.md) 应用程序为例,通过 URL `https://gitpod.io/#targetFile=spring-jpa-hibernate_Makefile,targetMode=spring-jpa-hibernate/https://github.com/pingcap-inc/tidb-example-java` 可以创建一个新工作区。 + +完成后,你将看到如下所示的页面。 + +![playground gitpod workspace init](/media/develop/playground-gitpod-workspace-init.png) + +页面中的这个场景使用了 [TiUP](https://docs.pingcap.com/zh/tidb/stable/tiup-overview) 来搭建一个 TiDB Playground。你可以在终端的左侧查看进度。 + +一旦 TiDB Playground 准备就绪,另一个 `Spring JPA Hibernate` 任务将运行。 你可以在终端的右侧查看进度。 + +完成所有任务后,你可以看到如下所示的页面,并在左侧导航栏的 `REMOTE EXPLORER` 中找到你的端口 `8080` URL(Gitpod 支持基于 URL 的端口转发)。 + +![playground gitpod workspace ready](/media/develop/playground-gitpod-workspace-ready.png) + +你可以按照[该指南](/develop/dev-guide-sample-application-java-spring-boot.md#第-6-步http-请求)测试 API。注意请将 URL `http://localhost:8080` 替换为你在 `REMOTE EXPLORER` 中找到的那个。 + +## 使用自定义的 Gitpod 配置和 Docker 镜像 + +### 自定义 Gitpod 配置 + +在项目的根目录中,参考[示例 .gitpod.yml](https://github.com/pingcap-inc/tidb-example-java/blob/main/.gitpod.yml),创建一个 `.gitpod.yml` 文件用于配置 Gitpod 工作空间。 + +```yml +# This configuration file was automatically generated by Gitpod. +# Please adjust to your needs (see https://www.gitpod.io/docs/config-gitpod-file) +# and commit this file to your remote git repository to share the goodness with others. + +# image: +# file: .gitpod.Dockerfile + +tasks: + - name: Open Target File + command: | + if [ -n "$targetFile" ]; then code ${targetFile//[_]//}; fi + - name: TiUP init playground + command: | + $HOME/.tiup/bin/tiup playground + - name: Test Case + openMode: split-right + init: echo "*** Waiting for TiUP Playground Ready! ***" + command: | + gp await-port 3930 + if [ "$targetMode" == "plain-java-jdbc" ] + then + cd plain-java-jdbc + code src/main/resources/dbinit.sql + code src/main/java/com/pingcap/JDBCExample.java + make mysql + elif [ "$targetMode" == "plain-java-hibernate" ] + then + cd plain-java-hibernate + make + elif [ "$targetMode" == "spring-jpa-hibernate" ] + then + cd spring-jpa-hibernate + make + fi +ports: + - port: 8080 + visibility: public + - port: 4000 + visibility: public + - port: 2379-36663 + onOpen: ignore +``` + +### 自定义 Gitpod Docker 镜像 + +默认情况下,Gitpod 使用名为 Workspace-Full 的标准 Docker 镜像作为工作空间的基础。 基于此默认镜像启动的工作区预装了 Docker、Go、Java、Node.js、C/C++、Python、Ruby、Rust、PHP 以及 Homebrew、Tailscale、Nginx 等工具。 + +你可以提供公共 Docker 镜像或 Dockerfile。 并为你的项目安装所需的任何依赖项。 + +这是一个 Dockerfile 示例:[示例 .gitpod.Dockerfile](https://github.com/pingcap-inc/tidb-example-java/blob/main/.gitpod.Dockerfile) + +```dockerfile +FROM gitpod/workspace-java-17 + +RUN sudo apt install mysql-client -y +RUN curl --proto '=https' --tlsv1.2 -sSf https://tiup-mirrors.pingcap.com/install.sh | sh +``` + +然后需要更新`.gitpod.yml`: + +```yml +# This configuration file was automatically generated by Gitpod. +# Please adjust to your needs (see https://www.gitpod.io/docs/config-gitpod-file) +# and commit this file to your remote git repository to share the goodness with others. + +image: + # 在这里导入你的 Dockerfile + file: .gitpod.Dockerfile + +tasks: + - name: Open Target File + command: | + if [ -n "$targetFile" ]; then code ${targetFile//[_]//}; fi + - name: TiUP init playground + command: | + $HOME/.tiup/bin/tiup playground + - name: Test Case + openMode: split-right + init: echo "*** Waiting for TiUP Playground Ready! ***" + command: | + gp await-port 3930 + if [ "$targetMode" == "plain-java-jdbc" ] + then + cd plain-java-jdbc + code src/main/resources/dbinit.sql + code src/main/java/com/pingcap/JDBCExample.java + make mysql + elif [ "$targetMode" == "plain-java-hibernate" ] + then + cd plain-java-hibernate + make + elif [ "$targetMode" == "spring-jpa-hibernate" ] + then + cd spring-jpa-hibernate + make + fi +ports: + - port: 8080 + visibility: public + - port: 4000 + visibility: public + - port: 2379-36663 + onOpen: ignore +``` + +### 应用更改 + +完成对 `.gitpod.yml` 文件配置后,请保证最新的代码已在你对应的 GitHub 代码仓库中可用。 + +访问 `https://gitpod.io/#` 以建立新的 Gitpod 工作区,新工作区会应用最新的代码。 + +访问 `https://gitpod.io/workspaces` 以获取所有建立的工作区。 + +## 总结 + +Gitpod 提供了完整的、自动化的、预配置的云原生开发环境。无需本地配置,你可以直接在浏览器中开发、运行、测试代码。 + +![playground gitpod summary](/media/develop/playground-gitpod-summary.png) diff --git a/test/sync_pr_docs_cn/data/markdown-pages/zh/tidb/master/develop/dev-guide-prepared-statement.md b/test/sync_pr_docs_cn/data/markdown-pages/zh/tidb/master/develop/dev-guide-prepared-statement.md new file mode 100644 index 00000000..dada8dfd --- /dev/null +++ b/test/sync_pr_docs_cn/data/markdown-pages/zh/tidb/master/develop/dev-guide-prepared-statement.md @@ -0,0 +1,233 @@ +--- +title: 预处理语句 +summary: 介绍 TiDB 的预处理语句功能。 +aliases: ['/zh/tidb/dev/prepared-statement'] +--- + +# 预处理语句 + +[预处理语句](/sql-statements/sql-statement-prepare.md)是一种将多个仅有参数不同的 SQL 语句进行模板化的语句,它让 SQL 语句与参数进行了分离。可以用它提升 SQL 语句的: + +- 安全性:因为参数和语句已经分离,所以避免了 [SQL 注入攻击](https://en.wikipedia.org/wiki/SQL_injection)的风险。 +- 性能:因为语句在 TiDB 端被预先解析,后续执行只需要传递参数,节省了完整 SQL 解析、拼接 SQL 语句字符串以及网络传输的代价。 + +在大部分的应用程序中,SQL 语句是可以被枚举的,可以使用有限个 SQL 语句来完成整个应用程序的数据查询,所以使用预处理语句是最佳实践之一。 + +## SQL 语法 + +本节将介绍创建、使用及删除预处理语句的 SQL 语法。 + +### 创建预处理语句 + +```sql +PREPARE {prepared_statement_name} FROM '{prepared_statement_sql}'; +``` + +| 参数 | 描述 | +| :-------------------------: | :------------------------------------: | +| `{prepared_statement_name}` | 预处理语句名称 | +| `{prepared_statement_sql}` | 预处理语句 SQL,以英文半角问号做占位符 | + +你可查看 [PREPARE 语句](/sql-statements/sql-statement-prepare.md) 获得更多信息。 + +### 使用预处理语句 + +预处理语句仅可使用用户变量作为参数,因此,需先使用 [SET 语句](/sql-statements/sql-statement-set-variable.md) 设置变量后,供 [EXECUTE 语句](/sql-statements/sql-statement-execute.md) 调用预处理语句。 + +```sql +SET @{parameter_name} = {parameter_value}; +EXECUTE {prepared_statement_name} USING @{parameter_name}; +``` + +| 参数 | 描述 | +| :-------------------------: | :-------------------------------------------------------------------: | +| `{parameter_name}` | 用户参数名 | +| `{parameter_value}` | 用户参数值 | +| `{prepared_statement_name}` | 预处理语句名称,需和[创建预处理语句](#创建预处理语句)中定义的名称一致 | + +你可查看 [EXECUTE 语句](/sql-statements/sql-statement-execute.md) 获得更多信息。 + +### 删除预处理语句 + +```sql +DEALLOCATE PREPARE {prepared_statement_name}; +``` + +| 参数 | 描述 | +| :-------------------------: | :-------------------------------------------------------------------: | +| `{prepared_statement_name}` | 预处理语句名称,需和[创建预处理语句](#创建预处理语句)中定义的名称一致 | + +你可查看 [DEALLOCATE 语句](/sql-statements/sql-statement-deallocate.md) 获得更多信息。 + +## 例子 + +本节以使用预处理语句,完成查询数据和插入数据两个场景的示例。 + +### 查询示例 + +例如,需要查询 [Bookshop 应用](/develop/dev-guide-bookshop-schema-design.md#books-表) 中,`id` 为 1 的书籍信息。 + + + +
+ +使用 SQL 查询示例: + +```sql +PREPARE `books_query` FROM 'SELECT * FROM `books` WHERE `id` = ?'; +``` + +运行结果为: + +``` +Query OK, 0 rows affected (0.01 sec) +``` + +```sql +SET @id = 1; +``` + +运行结果为: + +``` +Query OK, 0 rows affected (0.04 sec) +``` + +```sql +EXECUTE `books_query` USING @id; +``` + +运行结果为: + +``` ++---------+---------------------------------+--------+---------------------+-------+--------+ +| id | title | type | published_at | stock | price | ++---------+---------------------------------+--------+---------------------+-------+--------+ +| 1 | The Adventures of Pierce Wehner | Comics | 1904-06-06 20:46:25 | 586 | 411.66 | ++---------+---------------------------------+--------+---------------------+-------+--------+ +1 row in set (0.05 sec) +``` + +
+ +
+ +使用 Java 查询示例: + +```java +// ds is an entity of com.mysql.cj.jdbc.MysqlDataSource +try (Connection connection = ds.getConnection()) { + PreparedStatement preparedStatement = connection.prepareStatement("SELECT * FROM `books` WHERE `id` = ?"); + preparedStatement.setLong(1, 1); + + ResultSet res = preparedStatement.executeQuery(); + if(!res.next()) { + System.out.println("No books in the table with id 1"); + } else { + // got book's info, which id is 1 + System.out.println(res.getLong("id")); + System.out.println(res.getString("title")); + System.out.println(res.getString("type")); + } +} catch (SQLException e) { + e.printStackTrace(); +} +``` + +
+ +
+ +### 插入示例 + +还是使用 [books 表](/develop/dev-guide-bookshop-schema-design.md#books-表) 为例,需要插入一个 `title` 为 `TiDB Developer Guide`, `type` 为 `Science & Technology`, `stock` 为 `100`, `price` 为 `0.0`, `published_at` 为 `插入的当前时间` 的书籍信息。需要注意的是,`books` 表的主键包含 `AUTO_RANDOM` 属性,无需指定它。如果你对插入数据还不了解,可以在[插入数据](/develop/dev-guide-insert-data.md)一节了解更多数据插入的相关信息。 + + + +
+ +使用 SQL 插入数据示例如下: + +```sql +PREPARE `books_insert` FROM 'INSERT INTO `books` (`title`, `type`, `stock`, `price`, `published_at`) VALUES (?, ?, ?, ?, ?);'; +``` + +运行结果为: + +``` +Query OK, 0 rows affected (0.03 sec) +``` + +```sql +SET @title = 'TiDB Developer Guide'; +SET @type = 'Science & Technology'; +SET @stock = 100; +SET @price = 0.0; +SET @published_at = NOW(); +``` + +运行结果为: + +``` +Query OK, 0 rows affected (0.04 sec) +``` + +```sql +EXECUTE `books_insert` USING @title, @type, @stock, @price, @published_at; +``` + +运行结果为: + +``` +Query OK, 1 row affected (0.03 sec) +``` + +
+ +
+ +使用 Java 插入数据示例如下: + +```java +try (Connection connection = ds.getConnection()) { + String sql = "INSERT INTO `books` (`title`, `type`, `stock`, `price`, `published_at`) VALUES (?, ?, ?, ?, ?);"; + PreparedStatement preparedStatement = connection.prepareStatement(sql); + + preparedStatement.setString(1, "TiDB Developer Guide"); + preparedStatement.setString(2, "Science & Technology"); + preparedStatement.setInt(3, 100); + preparedStatement.setBigDecimal(4, new BigDecimal("0.0")); + preparedStatement.setTimestamp(5, new Timestamp(Calendar.getInstance().getTimeInMillis())); + + preparedStatement.executeUpdate(); +} catch (SQLException e) { + e.printStackTrace(); +} +``` + +可以看到,JDBC 帮你管控了预处理语句的生命周期,而无需你在应用程序里手动使用预处理语句的创建、使用、删除等。但值得注意的是,因为 TiDB 兼容 MySQL 协议,在客户端使用 MySQL JDBC Driver 的过程中,其默认配置并非开启 **_服务端_** 的预处理语句选项,而是使用客户端的预处理语句。你需要关注以下配置项,来获得在 JDBC 下 TiDB 服务端预处理语句的支持,及在你的使用场景下的最佳配置: + +| 参数 | 作用 | 推荐场景 | 推荐配置 | +| :---------------------: | :-----------------------------------: | :--------------------------: | :----------------------: | +| `useServerPrepStmts` | 是否使用服务端开启预处理语句支持 | 在需要多次使用预处理语句时 | `true` | +| `cachePrepStmts` | 客户端是否缓存预处理语句 | `useServerPrepStmts=true` 时 | `true` | +| `prepStmtCacheSqlLimit` | 预处理语句最大大小(默认 256 字符) | 预处理语句大于 256 字符时 | 按实际预处理语句大小配置 | +| `prepStmtCacheSize` | 预处理语句最大缓存数量 (默认 25 条) | 预处理语句数量大于 25 条时 | 按实际预处理语句数量配置 | + +在此处给出一个较为的通用场景的 JDBC 连接字符串配置,以 Host: `127.0.0.1`,Port: `4000`,用户: `root`,密码: 空,默认数据库: `test`为例: + +``` +jdbc:mysql://127.0.0.1:4000/test?user=root&useConfigs=maxPerformance&useServerPrepStmts=true&prepStmtCacheSqlLimit=2048&prepStmtCacheSize=256&rewriteBatchedStatements=true&allowMultiQueries=true +``` + +你也可以查看[插入行](/develop/dev-guide-insert-data.md#插入行)一章,来查看是否需要在插入数据场景下更改其他 JDBC 的参数。 + +有关 Java 的完整示例,可参阅: + +- [TiDB 和 JDBC 的简单 CRUD 应用程序](/develop/dev-guide-sample-application-java-jdbc.md) +- [TiDB 和 Hibernate 的简单 CRUD 应用程序](/develop/dev-guide-sample-application-java-hibernate.md) +- [使用 Spring Boot 构建 TiDB 应用程序](/develop/dev-guide-sample-application-java-spring-boot.md) + +
+ +
diff --git a/test/sync_pr_docs_cn/data/markdown-pages/zh/tidb/master/develop/dev-guide-sample-application-golang-gorm.md b/test/sync_pr_docs_cn/data/markdown-pages/zh/tidb/master/develop/dev-guide-sample-application-golang-gorm.md new file mode 100644 index 00000000..49f8d1c7 --- /dev/null +++ b/test/sync_pr_docs_cn/data/markdown-pages/zh/tidb/master/develop/dev-guide-sample-application-golang-gorm.md @@ -0,0 +1,291 @@ +--- +title: TiDB 和 GORM 的简单 CRUD 应用程序 +summary: 给出一个 TiDB 和 GORM 的简单 CRUD 应用程序示例。 +--- + + + + +# TiDB 和 GORM 的简单 CRUD 应用程序 + +[GORM](https://gorm.io/) 为当前比较流行的 Golang 开源 ORM 之一。 + +本文档将展示如何使用 TiDB 和 GORM 来构造一个简单的 CRUD 应用程序。 + +> **注意:** +> +> 推荐使用 Golang 1.16 以上版本进行 TiDB 的应用程序的编写。 + +## 第 1 步:启动你的 TiDB 集群 + +本节将介绍 TiDB 集群的启动方法。 + +**使用 TiDB Serverless 集群** + +详细步骤,请参考:[创建 TiDB Serverless 集群](/develop/dev-guide-build-cluster-in-cloud.md#第-1-步创建-tidb-serverless-集群)。 + +**使用本地集群** + +详细步骤,请参考:[部署本地测试 TiDB 集群](/quick-start-with-tidb.md#部署本地测试集群)或[部署正式 TiDB 集群](/production-deployment-using-tiup.md)。 + +## 第 2 步:获取代码 + +```shell +git clone https://github.com/pingcap-inc/tidb-example-golang.git +``` + +此处将以 GORM v1.23.5 版本进行说明。 + +封装一个用于适配 TiDB 事务的工具包 [util](https://github.com/pingcap-inc/tidb-example-golang/tree/main/util),编写以下代码备用: + +```go +package util + +import ( + "gorm.io/gorm" +) + +// TiDBGormBegin start a TiDB and Gorm transaction as a block. If no error is returned, the transaction will be committed. Otherwise, the transaction will be rolled back. +func TiDBGormBegin(db *gorm.DB, pessimistic bool, fc func(tx *gorm.DB) error) (err error) { + session := db.Session(&gorm.Session{}) + if session.Error != nil { + return session.Error + } + + if pessimistic { + session = session.Exec("set @@tidb_txn_mode=pessimistic") + } else { + session = session.Exec("set @@tidb_txn_mode=optimistic") + } + + if session.Error != nil { + return session.Error + } + return session.Transaction(fc) +} +``` + +进入目录 `gorm`: + +```shell +cd gorm +``` + +目录结构如下所示: + +``` +. +├── Makefile +├── go.mod +├── go.sum +└── gorm.go +``` + +其中,`gorm.go` 是 `gorm` 这个示例程序的主体。使用 gorm 时,相较于 go-sql-driver/mysql,gorm 屏蔽了创建数据库连接时,不同数据库差异的细节,其还封装了大量的操作,如 AutoMigrate、基本对象的 CRUD 等,极大地简化了代码量。 + +`Player` 是数据结构体,为数据库表在程序内的映射。`Player` 的每个属性都对应着 `player` 表的一个字段。相较于 go-sql-driver/mysql,gorm 的 `Player` 数据结构体为了给 gorm 提供更多的信息,加入了形如 `gorm:"primaryKey;type:VARCHAR(36);column:id"` 的注解,用来指示映射关系。 + +```go + +package main + +import ( + "fmt" + "math/rand" + + "github.com/google/uuid" + "github.com/pingcap-inc/tidb-example-golang/util" + + "gorm.io/driver/mysql" + "gorm.io/gorm" + "gorm.io/gorm/clause" + "gorm.io/gorm/logger" +) + +type Player struct { + ID string `gorm:"primaryKey;type:VARCHAR(36);column:id"` + Coins int `gorm:"column:coins"` + Goods int `gorm:"column:goods"` +} + +func (*Player) TableName() string { + return "player" +} + +func main() { + // 1. Configure the example database connection. + db := createDB() + + // AutoMigrate for player table + db.AutoMigrate(&Player{}) + + // 2. Run some simple examples. + simpleExample(db) + + // 3. Explore more. + tradeExample(db) +} + +func tradeExample(db *gorm.DB) { + // Player 1: id is "1", has only 100 coins. + // Player 2: id is "2", has 114514 coins, and 20 goods. + player1 := &Player{ID: "1", Coins: 100} + player2 := &Player{ID: "2", Coins: 114514, Goods: 20} + + // Create two players "by hand", using the INSERT statement on the backend. + db.Clauses(clause.OnConflict{UpdateAll: true}).Create(player1) + db.Clauses(clause.OnConflict{UpdateAll: true}).Create(player2) + + // Player 1 wants to buy 10 goods from player 2. + // It will cost 500 coins, but player 1 cannot afford it. + fmt.Println("\nbuyGoods:\n => this trade will fail") + if err := buyGoods(db, player2.ID, player1.ID, 10, 500); err == nil { + panic("there shouldn't be success") + } + + // So player 1 has to reduce the incoming quantity to two. + fmt.Println("\nbuyGoods:\n => this trade will success") + if err := buyGoods(db, player2.ID, player1.ID, 2, 100); err != nil { + panic(err) + } +} + +func simpleExample(db *gorm.DB) { + // Create a player, who has a coin and a goods. + if err := db.Clauses(clause.OnConflict{UpdateAll: true}). + Create(&Player{ID: "test", Coins: 1, Goods: 1}).Error; err != nil { + panic(err) + } + + // Get a player. + var testPlayer Player + db.Find(&testPlayer, "id = ?", "test") + fmt.Printf("getPlayer: %+v\n", testPlayer) + + // Create players with bulk inserts. Insert 1919 players totally, with 114 players per batch. + bulkInsertPlayers := make([]Player, 1919, 1919) + total, batch := 1919, 114 + for i := 0; i < total; i++ { + bulkInsertPlayers[i] = Player{ + ID: uuid.New().String(), + Coins: rand.Intn(10000), + Goods: rand.Intn(10000), + } + } + + if err := db.Session(&gorm.Session{Logger: db.Logger.LogMode(logger.Error)}). + CreateInBatches(bulkInsertPlayers, batch).Error; err != nil { + panic(err) + } + + // Count players amount. + playersCount := int64(0) + db.Model(&Player{}).Count(&playersCount) + fmt.Printf("countPlayers: %d\n", playersCount) + + // Print 3 players. + threePlayers := make([]Player, 3, 3) + db.Limit(3).Find(&threePlayers) + for index, player := range threePlayers { + fmt.Printf("print %d player: %+v\n", index+1, player) + } +} + +func createDB() *gorm.DB { + dsn := "root:@tcp(127.0.0.1:4000)/test?charset=utf8mb4" + db, err := gorm.Open(mysql.Open(dsn), &gorm.Config{ + Logger: logger.Default.LogMode(logger.Info), + }) + if err != nil { + panic(err) + } + + return db +} + +func buyGoods(db *gorm.DB, sellID, buyID string, amount, price int) error { + return util.TiDBGormBegin(db, true, func(tx *gorm.DB) error { + var sellPlayer, buyPlayer Player + if err := tx.Clauses(clause.Locking{Strength: "UPDATE"}). + Find(&sellPlayer, "id = ?", sellID).Error; err != nil { + return err + } + + if sellPlayer.ID != sellID || sellPlayer.Goods < amount { + return fmt.Errorf("sell player %s goods not enough", sellID) + } + + if err := tx.Clauses(clause.Locking{Strength: "UPDATE"}). + Find(&buyPlayer, "id = ?", buyID).Error; err != nil { + return err + } + + if buyPlayer.ID != buyID || buyPlayer.Coins < price { + return fmt.Errorf("buy player %s coins not enough", buyID) + } + + updateSQL := "UPDATE player set goods = goods + ?, coins = coins + ? WHERE id = ?" + if err := tx.Exec(updateSQL, -amount, price, sellID).Error; err != nil { + return err + } + + if err := tx.Exec(updateSQL, amount, -price, buyID).Error; err != nil { + return err + } + + fmt.Println("\n[buyGoods]:\n 'trade success'") + return nil + }) +} +``` + +## 第 3 步:运行代码 + +本节将逐步介绍代码的运行方法。 + +### 第 3 步第 1 部分:TiDB Cloud 更改参数 + +若你使用 TiDB Serverless 集群,更改 `gorm.go` 内 `dsn` 参数值: + +```go +dsn := "root:@tcp(127.0.0.1:4000)/test?charset=utf8mb4" +``` + +若你设定的密码为 `123456`,而且从 TiDB Serverless 集群面板中得到的连接信息为: + +- Endpoint: `xxx.tidbcloud.com` +- Port: `4000` +- User: `2aEp24QWEDLqRFs.root` + +那么此处应将 `mysql.RegisterTLSConfig` 和 `dsn` 更改为: + +```go +mysql.RegisterTLSConfig("register-tidb-tls", &tls.Config { + MinVersion: tls.VersionTLS12, + ServerName: "xxx.tidbcloud.com", +}) + +dsn := "2aEp24QWEDLqRFs.root:123456@tcp(xxx.tidbcloud.com:4000)/test?charset=utf8mb4&tls=register-tidb-tls" +``` + +### 第 3 步第 2 部分:运行 + +你可以分别运行 `make build` 和 `make run` 以运行此代码: + +```shell +make build # this command executes `go build -o bin/gorm-example` +make run # this command executes `./bin/gorm-example` +``` + +或者你也可以直接使用原生的命令: + +```shell +go build -o bin/gorm-example +./bin/gorm-example +``` + +再或者直接运行 `make all` 命令,这是 `make build` 和 `make run` 的组合。 + +## 第 4 步:预期输出 + +[GORM 预期输出](https://github.com/pingcap-inc/tidb-example-golang/blob/main/Expected-Output.md#gorm) diff --git a/test/sync_pr_docs_cn/data/markdown-pages/zh/tidb/master/develop/dev-guide-sample-application-golang-sql-driver.md b/test/sync_pr_docs_cn/data/markdown-pages/zh/tidb/master/develop/dev-guide-sample-application-golang-sql-driver.md new file mode 100644 index 00000000..2adddfc7 --- /dev/null +++ b/test/sync_pr_docs_cn/data/markdown-pages/zh/tidb/master/develop/dev-guide-sample-application-golang-sql-driver.md @@ -0,0 +1,537 @@ +--- +title: TiDB 和 Go-MySQL-Driver 的简单 CRUD 应用程序 +summary: 给出一个 TiDB 和 Go-MySQL-Driver 的简单 CRUD 应用程序示例。 +aliases: ['/zh/tidb/dev/dev-guide-sample-application-golang'] +--- + + + + +# TiDB 和 Go-MySQL-Driver 的简单 CRUD 应用程序 + +本文档将展示如何使用 TiDB 和 [Go-MySQL-Driver](https://github.com/go-sql-driver/mysql) 来构造一个简单的 CRUD 应用程序。 + +> **注意:** +> +> 推荐使用 Golang 1.16 以上版本进行 TiDB 的应用程序的编写。 + +## 第 1 步:启动你的 TiDB 集群 + +本节将介绍 TiDB 集群的启动方法。 + +**使用 TiDB Serverless 集群** + +详细步骤,请参考:[创建 TiDB Serverless 集群](/develop/dev-guide-build-cluster-in-cloud.md#第-1-步创建-tidb-serverless-集群)。 + +**使用本地集群** + +详细步骤,请参考:[部署本地测试 TiDB 集群](/quick-start-with-tidb.md#部署本地测试集群)或[部署正式 TiDB 集群](/production-deployment-using-tiup.md)。 + +## 第 2 步:获取代码 + +```shell +git clone https://github.com/pingcap-inc/tidb-example-golang.git +``` + +进入目录 `sqldriver`: + +```shell +cd sqldriver +``` + +目录结构如下所示: + +``` +. +├── Makefile +├── dao.go +├── go.mod +├── go.sum +├── sql +│   └── dbinit.sql +├── sql.go +└── sqldriver.go +``` + +其中,`dbinit.sql` 为数据表初始化语句: + +```sql +USE test; +DROP TABLE IF EXISTS player; + +CREATE TABLE player ( + `id` VARCHAR(36), + `coins` INTEGER, + `goods` INTEGER, + PRIMARY KEY (`id`) +); +``` + +`sqldriver.go` 是 `sqldriver` 这个示例程序的主体。与 GORM 对比,go-sql-driver/mysql 的实现方式并非最优体验。你需要自行编写错误处理逻辑,手动关闭 `*sql.Rows`,并且代码无法简单复用。这会使你的代码有些冗余。因为 TiDB 与 MySQL 协议兼容,因此,需要初始化一个 MySQL 协议的数据源 `db, err := sql.Open("mysql", dsn)`,以此连接到 TiDB。并在其后,调用 `dao.go` 中的一系列方法,用来管理数据对象,进行增删改查等操作。 + +```go +package main + +import ( + "database/sql" + "fmt" + + _ "github.com/go-sql-driver/mysql" +) + +func main() { + // 1. Configure the example database connection. + dsn := "root:@tcp(127.0.0.1:4000)/test?charset=utf8mb4" + openDB("mysql", dsn, func(db *sql.DB) { + // 2. Run some simple examples. + simpleExample(db) + + // 3. Explore more. + tradeExample(db) + }) +} + +func simpleExample(db *sql.DB) { + // Create a player, who has a coin and a goods. + err := createPlayer(db, Player{ID: "test", Coins: 1, Goods: 1}) + if err != nil { + panic(err) + } + + // Get a player. + testPlayer, err := getPlayer(db, "test") + if err != nil { + panic(err) + } + fmt.Printf("getPlayer: %+v\n", testPlayer) + + // Create players with bulk inserts. Insert 1919 players totally, with 114 players per batch. + + err = bulkInsertPlayers(db, randomPlayers(1919), 114) + if err != nil { + panic(err) + } + + // Count players amount. + playersCount, err := getCount(db) + if err != nil { + panic(err) + } + fmt.Printf("countPlayers: %d\n", playersCount) + + // Print 3 players. + threePlayers, err := getPlayerByLimit(db, 3) + if err != nil { + panic(err) + } + for index, player := range threePlayers { + fmt.Printf("print %d player: %+v\n", index+1, player) + } +} + +func tradeExample(db *sql.DB) { + // Player 1: id is "1", has only 100 coins. + // Player 2: id is "2", has 114514 coins, and 20 goods. + player1 := Player{ID: "1", Coins: 100} + player2 := Player{ID: "2", Coins: 114514, Goods: 20} + + // Create two players "by hand", using the INSERT statement on the backend. + if err := createPlayer(db, player1); err != nil { + panic(err) + } + if err := createPlayer(db, player2); err != nil { + panic(err) + } + + // Player 1 wants to buy 10 goods from player 2. + // It will cost 500 coins, but player 1 cannot afford it. + fmt.Println("\nbuyGoods:\n => this trade will fail") + if err := buyGoods(db, player2.ID, player1.ID, 10, 500); err == nil { + panic("there shouldn't be success") + } + + // So player 1 has to reduce the incoming quantity to two. + fmt.Println("\nbuyGoods:\n => this trade will success") + if err := buyGoods(db, player2.ID, player1.ID, 2, 100); err != nil { + panic(err) + } +} + +func openDB(driverName, dataSourceName string, runnable func(db *sql.DB)) { + db, err := sql.Open(driverName, dataSourceName) + if err != nil { + panic(err) + } + defer db.Close() + + runnable(db) +} +``` + +随后,封装一个用于适配 TiDB 事务的工具包 [util](https://github.com/pingcap-inc/tidb-example-golang/tree/main/util),编写以下代码备用: + +```go +package util + +import ( + "context" + "database/sql" +) + +type TiDBSqlTx struct { + *sql.Tx + conn *sql.Conn + pessimistic bool +} + +func TiDBSqlBegin(db *sql.DB, pessimistic bool) (*TiDBSqlTx, error) { + ctx := context.Background() + conn, err := db.Conn(ctx) + if err != nil { + return nil, err + } + if pessimistic { + _, err = conn.ExecContext(ctx, "set @@tidb_txn_mode=?", "pessimistic") + } else { + _, err = conn.ExecContext(ctx, "set @@tidb_txn_mode=?", "optimistic") + } + if err != nil { + return nil, err + } + tx, err := conn.BeginTx(ctx, nil) + if err != nil { + return nil, err + } + return &TiDBSqlTx{ + conn: conn, + Tx: tx, + pessimistic: pessimistic, + }, nil +} + +func (tx *TiDBSqlTx) Commit() error { + defer tx.conn.Close() + return tx.Tx.Commit() +} + +func (tx *TiDBSqlTx) Rollback() error { + defer tx.conn.Close() + return tx.Tx.Rollback() +} +``` + +在 `dao.go` 中定义一系列数据的操作方法,用来对提供数据的写入能力。这也是本例子中核心部分。 + +```go +package main + +import ( + "database/sql" + "fmt" + "math/rand" + "strings" + + "github.com/google/uuid" + "github.com/pingcap-inc/tidb-example-golang/util" +) + +type Player struct { + ID string + Coins int + Goods int +} + +// createPlayer create a player +func createPlayer(db *sql.DB, player Player) error { + _, err := db.Exec(CreatePlayerSQL, player.ID, player.Coins, player.Goods) + return err +} + +// getPlayer get a player +func getPlayer(db *sql.DB, id string) (Player, error) { + var player Player + + rows, err := db.Query(GetPlayerSQL, id) + if err != nil { + return player, err + } + defer rows.Close() + + if rows.Next() { + err = rows.Scan(&player.ID, &player.Coins, &player.Goods) + if err == nil { + return player, nil + } else { + return player, err + } + } + + return player, fmt.Errorf("can not found player") +} + +// getPlayerByLimit get players by limit +func getPlayerByLimit(db *sql.DB, limit int) ([]Player, error) { + var players []Player + + rows, err := db.Query(GetPlayerByLimitSQL, limit) + if err != nil { + return players, err + } + defer rows.Close() + + for rows.Next() { + player := Player{} + err = rows.Scan(&player.ID, &player.Coins, &player.Goods) + if err == nil { + players = append(players, player) + } else { + return players, err + } + } + + return players, nil +} + +// bulk-insert players +func bulkInsertPlayers(db *sql.DB, players []Player, batchSize int) error { + tx, err := util.TiDBSqlBegin(db, true) + if err != nil { + return err + } + + stmt, err := tx.Prepare(buildBulkInsertSQL(batchSize)) + if err != nil { + return err + } + + defer stmt.Close() + + for len(players) > batchSize { + if _, err := stmt.Exec(playerToArgs(players[:batchSize])...); err != nil { + tx.Rollback() + return err + } + + players = players[batchSize:] + } + + if len(players) != 0 { + if _, err := tx.Exec(buildBulkInsertSQL(len(players)), playerToArgs(players)...); err != nil { + tx.Rollback() + return err + } + } + + if err := tx.Commit(); err != nil { + tx.Rollback() + return err + } + + return nil +} + +func getCount(db *sql.DB) (int, error) { + count := 0 + + rows, err := db.Query(GetCountSQL) + if err != nil { + return count, err + } + + defer rows.Close() + + if rows.Next() { + if err := rows.Scan(&count); err != nil { + return count, err + } + } + + return count, nil +} + +func buyGoods(db *sql.DB, sellID, buyID string, amount, price int) error { + var sellPlayer, buyPlayer Player + + tx, err := util.TiDBSqlBegin(db, true) + if err != nil { + return err + } + + buyExec := func() error { + stmt, err := tx.Prepare(GetPlayerWithLockSQL) + if err != nil { + return err + } + defer stmt.Close() + + sellRows, err := stmt.Query(sellID) + if err != nil { + return err + } + defer sellRows.Close() + + if sellRows.Next() { + if err := sellRows.Scan(&sellPlayer.ID, &sellPlayer.Coins, &sellPlayer.Goods); err != nil { + return err + } + } + sellRows.Close() + + if sellPlayer.ID != sellID || sellPlayer.Goods < amount { + return fmt.Errorf("sell player %s goods not enough", sellID) + } + + buyRows, err := stmt.Query(buyID) + if err != nil { + return err + } + defer buyRows.Close() + + if buyRows.Next() { + if err := buyRows.Scan(&buyPlayer.ID, &buyPlayer.Coins, &buyPlayer.Goods); err != nil { + return err + } + } + buyRows.Close() + + if buyPlayer.ID != buyID || buyPlayer.Coins < price { + return fmt.Errorf("buy player %s coins not enough", buyID) + } + + updateStmt, err := tx.Prepare(UpdatePlayerSQL) + if err != nil { + return err + } + defer updateStmt.Close() + + if _, err := updateStmt.Exec(-amount, price, sellID); err != nil { + return err + } + + if _, err := updateStmt.Exec(amount, -price, buyID); err != nil { + return err + } + + return nil + } + + err = buyExec() + if err == nil { + fmt.Println("\n[buyGoods]:\n 'trade success'") + tx.Commit() + } else { + tx.Rollback() + } + + return err +} + +func playerToArgs(players []Player) []interface{} { + var args []interface{} + for _, player := range players { + args = append(args, player.ID, player.Coins, player.Goods) + } + return args +} + +func buildBulkInsertSQL(amount int) string { + return CreatePlayerSQL + strings.Repeat(",(?,?,?)", amount-1) +} + +func randomPlayers(amount int) []Player { + players := make([]Player, amount, amount) + for i := 0; i < amount; i++ { + players[i] = Player{ + ID: uuid.New().String(), + Coins: rand.Intn(10000), + Goods: rand.Intn(10000), + } + } + + return players +} +``` + +`sql.go` 中存放了 SQL 语句的常量。 + +```go +package main + +const ( + CreatePlayerSQL = "INSERT INTO player (id, coins, goods) VALUES (?, ?, ?)" + GetPlayerSQL = "SELECT id, coins, goods FROM player WHERE id = ?" + GetCountSQL = "SELECT count(*) FROM player" + GetPlayerWithLockSQL = GetPlayerSQL + " FOR UPDATE" + UpdatePlayerSQL = "UPDATE player set goods = goods + ?, coins = coins + ? WHERE id = ?" + GetPlayerByLimitSQL = "SELECT id, coins, goods FROM player LIMIT ?" +) +``` + +## 第 3 步:运行代码 + +本节将逐步介绍代码的运行方法。 + +### 第 3 步第 1 部分:表初始化 + +使用 go-sql-driver/mysql 时,需手动初始化数据库表,若你本地已经安装了 `mysql-client`,且使用本地集群,可直接在 `sqldriver` 目录下运行: + +```shell +make mysql +``` + +或直接执行: + +```shell +mysql --host 127.0.0.1 --port 4000 -u root + + +# TiDB 和 Hibernate 的简单 CRUD 应用程序 + +[Hibernate](https://hibernate.org/) 是当前比较流行的开源 Java 应用持久层框架,且 Hibernate 在版本 `6.0.0.Beta2` 及以后支持了 TiDB 方言,完美适配了 TiDB 的特性。 + +本文档将展示如何使用 TiDB 和 Java 来构造一个简单的 CRUD 应用程序。 + +> **注意:** +> +> 推荐使用 Java 8 及以上版本进行 TiDB 的应用程序的编写。 + +## 拓展学习视频 + +- [使用 Connector/J - TiDB v6](https://learn.pingcap.com/learner/course/840002/?utm_source=docs-cn-dev-guide) +- [在 TiDB 上开发应用的最佳实践 - TiDB v6](https://learn.pingcap.com/learner/course/780002/?utm_source=docs-cn-dev-guide) + +## 第 1 步:启动你的 TiDB 集群 + +本节将介绍 TiDB 集群的启动方法。 + +**使用 TiDB Serverless 集群** + +详细步骤,请参考:[创建 TiDB Serverless 集群](/develop/dev-guide-build-cluster-in-cloud.md#第-1-步创建-tidb-serverless-集群)。 + +**使用本地集群** + +详细步骤,请参考:[部署本地测试 TiDB 集群](/quick-start-with-tidb.md#部署本地测试集群)或[部署正式 TiDB 集群](/production-deployment-using-tiup.md)。 + +## 第 2 步:获取代码 + +```shell +git clone https://github.com/pingcap-inc/tidb-example-java.git +``` + +与 [Hibernate](https://hibernate.org/orm/) 对比,JDBC 的实现方式并非最优体验。你需要自行编写错误处理逻辑,并且代码无法简单复用。这会使你的代码有些冗余。 + +此处将以 `6.0.0.Beta2` 版本进行说明。 + +进入目录 `plain-java-hibernate`: + +```shell +cd plain-java-hibernate +``` + +目录结构如下所示: + +``` +. +├── Makefile +├── plain-java-hibernate.iml +├── pom.xml +└── src + └── main + ├── java + │ └── com + │ └── pingcap + │ └── HibernateExample.java + └── resources + └── hibernate.cfg.xml +``` + +其中,`hibernate.cfg.xml` 为 Hibernate 配置文件,定义了: + +```xml + + + + + + + com.mysql.cj.jdbc.Driver + org.hibernate.dialect.TiDBDialect + jdbc:mysql://localhost:4000/test + root + + false + + + create-drop + + + true + true + + +``` + +`HibernateExample.java` 是 `plain-java-hibernate` 这个示例程序的主体。使用 Hibernate 时,相较于 JDBC,这里仅需写入配置文件地址,Hibernate 屏蔽了创建数据库连接时,不同数据库差异的细节。 + +`PlayerDAO` 是程序用来管理数据对象的类。其中 `DAO` 是 [Data Access Object](https://en.wikipedia.org/wiki/Data_access_object) 的缩写。其中定义了一系列数据的操作方法,用来提供数据的写入能力。相较于 JDBC,Hibernate 封装了大量的操作,如对象映射、基本对象的 CRUD 等,极大地简化了代码量。 + +`PlayerBean` 是数据实体类,为数据库表在程序内的映射。`PlayerBean` 的每个属性都对应着 `player` 表的一个字段。相较于 JDBC,Hibernate 的 `PlayerBean` 实体类为了给 Hibernate 提供更多的信息,加入了注解,用来指示映射关系。 + +```java +package com.pingcap; + +import jakarta.persistence.Column; +import jakarta.persistence.Entity; +import jakarta.persistence.Id; +import jakarta.persistence.Table; +import org.hibernate.JDBCException; +import org.hibernate.Session; +import org.hibernate.SessionFactory; +import org.hibernate.Transaction; +import org.hibernate.cfg.Configuration; +import org.hibernate.query.NativeQuery; +import org.hibernate.query.Query; + +import java.util.Arrays; +import java.util.Collections; +import java.util.List; +import java.util.function.Function; + +@Entity +@Table(name = "player_hibernate") +class PlayerBean { + @Id + private String id; + @Column(name = "coins") + private Integer coins; + @Column(name = "goods") + private Integer goods; + + public PlayerBean() { + } + + public PlayerBean(String id, Integer coins, Integer goods) { + this.id = id; + this.coins = coins; + this.goods = goods; + } + + public String getId() { + return id; + } + + public void setId(String id) { + this.id = id; + } + + public Integer getCoins() { + return coins; + } + + public void setCoins(Integer coins) { + this.coins = coins; + } + + public Integer getGoods() { + return goods; + } + + public void setGoods(Integer goods) { + this.goods = goods; + } + + @Override + public String toString() { + return String.format(" %-8s => %10s\n %-8s => %10s\n %-8s => %10s\n", + "id", this.id, "coins", this.coins, "goods", this.goods); + } +} + +/** + * Main class for the basic Hibernate example. + **/ +public class HibernateExample +{ + public static class PlayerDAO { + public static class NotEnoughException extends RuntimeException { + public NotEnoughException(String message) { + super(message); + } + } + + // Run SQL code in a way that automatically handles the + // transaction retry logic so we don't have to duplicate it in + // various places. + public Object runTransaction(Session session, Function fn) { + Object resultObject = null; + + Transaction txn = session.beginTransaction(); + try { + resultObject = fn.apply(session); + txn.commit(); + System.out.println("APP: COMMIT;"); + } catch (JDBCException e) { + System.out.println("APP: ROLLBACK BY JDBC ERROR;"); + txn.rollback(); + } catch (NotEnoughException e) { + System.out.printf("APP: ROLLBACK BY LOGIC; %s", e.getMessage()); + txn.rollback(); + } + return resultObject; + } + + public Function createPlayers(List players) throws JDBCException { + return session -> { + Integer addedPlayerAmount = 0; + for (PlayerBean player: players) { + session.persist(player); + addedPlayerAmount ++; + } + System.out.printf("APP: createPlayers() --> %d\n", addedPlayerAmount); + return addedPlayerAmount; + }; + } + + public Function buyGoods(String sellId, String buyId, Integer amount, Integer price) throws JDBCException { + return session -> { + PlayerBean sellPlayer = session.get(PlayerBean.class, sellId); + PlayerBean buyPlayer = session.get(PlayerBean.class, buyId); + + if (buyPlayer == null || sellPlayer == null) { + throw new NotEnoughException("sell or buy player not exist"); + } + + if (buyPlayer.getCoins() < price || sellPlayer.getGoods() < amount) { + throw new NotEnoughException("coins or goods not enough, rollback"); + } + + buyPlayer.setGoods(buyPlayer.getGoods() + amount); + buyPlayer.setCoins(buyPlayer.getCoins() - price); + session.persist(buyPlayer); + + sellPlayer.setGoods(sellPlayer.getGoods() - amount); + sellPlayer.setCoins(sellPlayer.getCoins() + price); + session.persist(sellPlayer); + + System.out.printf("APP: buyGoods --> sell: %s, buy: %s, amount: %d, price: %d\n", sellId, buyId, amount, price); + return 0; + }; + } + + public Function getPlayerByID(String id) throws JDBCException { + return session -> session.get(PlayerBean.class, id); + } + + public Function printPlayers(Integer limit) throws JDBCException { + return session -> { + NativeQuery limitQuery = session.createNativeQuery("SELECT * FROM player_hibernate LIMIT :limit", PlayerBean.class); + limitQuery.setParameter("limit", limit); + List players = limitQuery.getResultList(); + + for (PlayerBean player: players) { + System.out.println("\n[printPlayers]:\n" + player); + } + return 0; + }; + } + + public Function countPlayers() throws JDBCException { + return session -> { + Query countQuery = session.createQuery("SELECT count(player_hibernate) FROM PlayerBean player_hibernate", Long.class); + return countQuery.getSingleResult(); + }; + } + } + + public static void main(String[] args) { + // 1. Create a SessionFactory based on our hibernate.cfg.xml configuration + // file, which defines how to connect to the database. + SessionFactory sessionFactory + = new Configuration() + .configure("hibernate.cfg.xml") + .addAnnotatedClass(PlayerBean.class) + .buildSessionFactory(); + + try (Session session = sessionFactory.openSession()) { + // 2. And then, create DAO to manager your data. + PlayerDAO playerDAO = new PlayerDAO(); + + // 3. Run some simple examples. + + // Create a player who has 1 coin and 1 goods. + playerDAO.runTransaction(session, playerDAO.createPlayers(Collections.singletonList( + new PlayerBean("test", 1, 1)))); + + // Get a player. + PlayerBean testPlayer = (PlayerBean)playerDAO.runTransaction(session, playerDAO.getPlayerByID("test")); + System.out.printf("PlayerDAO.getPlayer:\n => id: %s\n => coins: %s\n => goods: %s\n", + testPlayer.getId(), testPlayer.getCoins(), testPlayer.getGoods()); + + // Count players amount. + Long count = (Long)playerDAO.runTransaction(session, playerDAO.countPlayers()); + System.out.printf("PlayerDAO.countPlayers:\n => %d total players\n", count); + + // Print 3 players. + playerDAO.runTransaction(session, playerDAO.printPlayers(3)); + + // 4. Explore more. + + // Player 1: id is "1", has only 100 coins. + // Player 2: id is "2", has 114514 coins, and 20 goods. + PlayerBean player1 = new PlayerBean("1", 100, 0); + PlayerBean player2 = new PlayerBean("2", 114514, 20); + + // Create two players "by hand", using the INSERT statement on the backend. + int addedCount = (Integer)playerDAO.runTransaction(session, + playerDAO.createPlayers(Arrays.asList(player1, player2))); + System.out.printf("PlayerDAO.createPlayers:\n => %d total inserted players\n", addedCount); + + // Player 1 wants to buy 10 goods from player 2. + // It will cost 500 coins, but player 1 cannot afford it. + System.out.println("\nPlayerDAO.buyGoods:\n => this trade will fail"); + Integer updatedCount = (Integer)playerDAO.runTransaction(session, + playerDAO.buyGoods(player2.getId(), player1.getId(), 10, 500)); + System.out.printf("PlayerDAO.buyGoods:\n => %d total update players\n", updatedCount); + + // So player 1 has to reduce the incoming quantity to two. + System.out.println("\nPlayerDAO.buyGoods:\n => this trade will success"); + updatedCount = (Integer)playerDAO.runTransaction(session, + playerDAO.buyGoods(player2.getId(), player1.getId(), 2, 100)); + System.out.printf("PlayerDAO.buyGoods:\n => %d total update players\n", updatedCount); + } finally { + sessionFactory.close(); + } + } +} +``` + +## 第 3 步:运行代码 + +本节将逐步介绍代码的运行方法。 + +### 第 3 步第 1 部分:TiDB Cloud 更改参数 + +若你使用 TiDB Serverless 集群,更改 `hibernate.cfg.xml` 内关于 `hibernate.connection.url`、`hibernate.connection.username`、`hibernate.connection.password` 的参数: + +```xml + + + + + + + com.mysql.cj.jdbc.Driver + org.hibernate.dialect.TiDBDialect + jdbc:mysql://localhost:4000/test + root + + false + + + create-drop + + + true + true + + +``` + +若你设定的密码为 `123456`,而且从 TiDB Serverless 集群面板中得到的连接信息为: + +- Endpoint: `xxx.tidbcloud.com` +- Port: `4000` +- User: `2aEp24QWEDLqRFs.root` + +那么此处应将配置文件更改为: + +```xml + + + + + + + com.mysql.cj.jdbc.Driver + org.hibernate.dialect.TiDBDialect + jdbc:mysql://xxx.tidbcloud.com:4000/test?sslMode=VERIFY_IDENTITY&enabledTLSProtocols=TLSv1.2,TLSv1.3 + 2aEp24QWEDLqRFs.root + 123456 + false + + + create-drop + + + true + true + + +``` + +### 第 3 步第 2 部分:运行 + +你可以分别运行 `make build` 和 `make run` 以运行此代码: + +```shell +make build # this command executes `mvn clean package` +make run # this command executes `java -jar target/plain-java-hibernate-0.0.1-jar-with-dependencies.jar` +``` + +或者你也可以直接使用原生的命令: + +```shell +mvn clean package +java -jar target/plain-java-hibernate-0.0.1-jar-with-dependencies.jar +``` + +再或者直接运行 `make` 命令,这是 `make build` 和 `make run` 的组合。 + +## 第 4 步:预期输出 + +[Hibernate 预期输出](https://github.com/pingcap-inc/tidb-example-java/blob/main/Expected-Output.md#plain-java-hibernate) diff --git a/test/sync_pr_docs_cn/data/markdown-pages/zh/tidb/master/develop/dev-guide-sample-application-java-jdbc.md b/test/sync_pr_docs_cn/data/markdown-pages/zh/tidb/master/develop/dev-guide-sample-application-java-jdbc.md new file mode 100644 index 00000000..250943e0 --- /dev/null +++ b/test/sync_pr_docs_cn/data/markdown-pages/zh/tidb/master/develop/dev-guide-sample-application-java-jdbc.md @@ -0,0 +1,576 @@ +--- +title: TiDB 和 JDBC 的简单 CRUD 应用程序 +summary: 给出一个 TiDB 和 JDBC 的简单 CRUD 应用程序示例。 +--- + + + + +# TiDB 和 JDBC 的简单 CRUD 应用程序 + +本文档将展示如何使用 TiDB 和 JDBC 来构造一个简单的 CRUD 应用程序。 + +> **注意:** +> +> 推荐使用 Java 8 及以上版本进行 TiDB 的应用程序的编写。 + +## 拓展学习视频 + +- [使用 Connector/J - TiDB v6](https://learn.pingcap.com/learner/course/840002/?utm_source=docs-cn-dev-guide) +- [在 TiDB 上开发应用的最佳实践 - TiDB v6](https://learn.pingcap.com/learner/course/780002/?utm_source=docs-cn-dev-guide) + +## 第 1 步:启动你的 TiDB 集群 + +本节将介绍 TiDB 集群的启动方法。 + +**使用 TiDB Serverless 集群** + +详细步骤,请参考:[创建 TiDB Serverless 集群](/develop/dev-guide-build-cluster-in-cloud.md#第-1-步创建-tidb-serverless-集群)。 + +**使用本地集群** + +详细步骤,请参考:[部署本地测试 TiDB 集群](/quick-start-with-tidb.md#部署本地测试集群)或[部署正式 TiDB 集群](/production-deployment-using-tiup.md)。 + +## 第 2 步:获取代码 + +```shell +git clone https://github.com/pingcap-inc/tidb-example-java.git +``` + +进入目录 `plain-java-jdbc`: + +```shell +cd plain-java-jdbc +``` + +目录结构如下所示: + +``` +. +├── Makefile +├── plain-java-jdbc.iml +├── pom.xml +└── src + └── main + ├── java + │ └── com + │ └── pingcap + │ └── JDBCExample.java + └── resources + └── dbinit.sql +``` + +其中,`dbinit.sql` 为数据表初始化语句: + +```sql +USE test; +DROP TABLE IF EXISTS player; + +CREATE TABLE player ( + `id` VARCHAR(36), + `coins` INTEGER, + `goods` INTEGER, + PRIMARY KEY (`id`) +); +``` + +`JDBCExample.java` 是 `plain-java-jdbc` 这个示例程序的主体。因为 TiDB 与 MySQL 协议兼容,因此,需要初始化一个 MySQL 协议的数据源 `MysqlDataSource`,以此连接到 TiDB。并在其后,初始化 `PlayerDAO`,用来管理数据对象,进行增删改查等操作。 + +`PlayerDAO` 是程序用来管理数据对象的类。其中 `DAO` 是 [Data Access Object](https://en.wikipedia.org/wiki/Data_access_object) 的缩写。在其中定义了一系列数据的操作方法,用来对提供数据的写入能力。 + +`PlayerBean` 是数据实体类,为数据库表在程序内的映射。`PlayerBean` 的每个属性都对应着 `player` 表的一个字段。 + +```java +package com.pingcap; + +import com.mysql.cj.jdbc.MysqlDataSource; + +import java.sql.Connection; +import java.sql.PreparedStatement; +import java.sql.ResultSet; +import java.sql.SQLException; +import java.util.*; + +/** + * Main class for the basic JDBC example. + **/ +public class JDBCExample +{ + public static class PlayerBean { + private String id; + private Integer coins; + private Integer goods; + + public PlayerBean() { + } + + public PlayerBean(String id, Integer coins, Integer goods) { + this.id = id; + this.coins = coins; + this.goods = goods; + } + + public String getId() { + return id; + } + + public void setId(String id) { + this.id = id; + } + + public Integer getCoins() { + return coins; + } + + public void setCoins(Integer coins) { + this.coins = coins; + } + + public Integer getGoods() { + return goods; + } + + public void setGoods(Integer goods) { + this.goods = goods; + } + + @Override + public String toString() { + return String.format(" %-8s => %10s\n %-8s => %10s\n %-8s => %10s\n", + "id", this.id, "coins", this.coins, "goods", this.goods); + } + } + + /** + * Data access object used by 'ExampleDataSource'. + * Example for CURD and bulk insert. + */ + public static class PlayerDAO { + private final MysqlDataSource ds; + private final Random rand = new Random(); + + PlayerDAO(MysqlDataSource ds) { + this.ds = ds; + } + + /** + * Create players by passing in a List of PlayerBean. + * + * @param players Will create players list + * @return The number of create accounts + */ + public int createPlayers(List players){ + int rows = 0; + + Connection connection = null; + PreparedStatement preparedStatement = null; + try { + connection = ds.getConnection(); + preparedStatement = connection.prepareStatement("INSERT INTO player (id, coins, goods) VALUES (?, ?, ?)"); + } catch (SQLException e) { + System.out.printf("[createPlayers] ERROR: { state => %s, cause => %s, message => %s }\n", + e.getSQLState(), e.getCause(), e.getMessage()); + e.printStackTrace(); + + return -1; + } + + try { + for (PlayerBean player : players) { + preparedStatement.setString(1, player.getId()); + preparedStatement.setInt(2, player.getCoins()); + preparedStatement.setInt(3, player.getGoods()); + + preparedStatement.execute(); + rows += preparedStatement.getUpdateCount(); + } + } catch (SQLException e) { + System.out.printf("[createPlayers] ERROR: { state => %s, cause => %s, message => %s }\n", + e.getSQLState(), e.getCause(), e.getMessage()); + e.printStackTrace(); + } finally { + try { + connection.close(); + } catch (SQLException e) { + e.printStackTrace(); + } + } + + System.out.printf("\n[createPlayers]:\n '%s'\n", preparedStatement); + return rows; + } + + /** + * Buy goods and transfer funds between one player and another in one transaction. + * @param sellId Sell player id. + * @param buyId Buy player id. + * @param amount Goods amount, if sell player has not enough goods, the trade will break. + * @param price Price should pay, if buy player has not enough coins, the trade will break. + * + * @return The number of effected players. + */ + public int buyGoods(String sellId, String buyId, Integer amount, Integer price) { + int effectPlayers = 0; + + Connection connection = null; + try { + connection = ds.getConnection(); + } catch (SQLException e) { + System.out.printf("[buyGoods] ERROR: { state => %s, cause => %s, message => %s }\n", + e.getSQLState(), e.getCause(), e.getMessage()); + e.printStackTrace(); + return effectPlayers; + } + + try { + connection.setAutoCommit(false); + + PreparedStatement playerQuery = connection.prepareStatement("SELECT * FROM player WHERE id=? OR id=? FOR UPDATE"); + playerQuery.setString(1, sellId); + playerQuery.setString(2, buyId); + playerQuery.execute(); + + PlayerBean sellPlayer = null; + PlayerBean buyPlayer = null; + + ResultSet playerQueryResultSet = playerQuery.getResultSet(); + while (playerQueryResultSet.next()) { + PlayerBean player = new PlayerBean( + playerQueryResultSet.getString("id"), + playerQueryResultSet.getInt("coins"), + playerQueryResultSet.getInt("goods") + ); + + System.out.println("\n[buyGoods]:\n 'check goods and coins enough'"); + System.out.println(player); + + if (sellId.equals(player.getId())) { + sellPlayer = player; + } else { + buyPlayer = player; + } + } + + if (sellPlayer == null || buyPlayer == null) { + throw new SQLException("player not exist."); + } + + if (sellPlayer.getGoods().compareTo(amount) < 0) { + throw new SQLException(String.format("sell player %s goods not enough.", sellId)); + } + + if (buyPlayer.getCoins().compareTo(price) < 0) { + throw new SQLException(String.format("buy player %s coins not enough.", buyId)); + } + + PreparedStatement transfer = connection.prepareStatement("UPDATE player set goods = goods + ?, coins = coins + ? WHERE id=?"); + transfer.setInt(1, -amount); + transfer.setInt(2, price); + transfer.setString(3, sellId); + transfer.execute(); + effectPlayers += transfer.getUpdateCount(); + + transfer.setInt(1, amount); + transfer.setInt(2, -price); + transfer.setString(3, buyId); + transfer.execute(); + effectPlayers += transfer.getUpdateCount(); + + connection.commit(); + + System.out.println("\n[buyGoods]:\n 'trade success'"); + } catch (SQLException e) { + System.out.printf("[buyGoods] ERROR: { state => %s, cause => %s, message => %s }\n", + e.getSQLState(), e.getCause(), e.getMessage()); + + try { + System.out.println("[buyGoods] Rollback"); + + connection.rollback(); + } catch (SQLException ex) { + // do nothing + } + } finally { + try { + connection.close(); + } catch (SQLException e) { + // do nothing + } + } + + return effectPlayers; + } + + /** + * Get the player info by id. + * + * @param id Player id. + * @return The player of this id. + */ + public PlayerBean getPlayer(String id) { + PlayerBean player = null; + + try (Connection connection = ds.getConnection()) { + PreparedStatement preparedStatement = connection.prepareStatement("SELECT * FROM player WHERE id = ?"); + preparedStatement.setString(1, id); + preparedStatement.execute(); + + ResultSet res = preparedStatement.executeQuery(); + if(!res.next()) { + System.out.printf("No players in the table with id %s", id); + } else { + player = new PlayerBean(res.getString("id"), res.getInt("coins"), res.getInt("goods")); + } + } catch (SQLException e) { + System.out.printf("PlayerDAO.getPlayer ERROR: { state => %s, cause => %s, message => %s }\n", + e.getSQLState(), e.getCause(), e.getMessage()); + } + + return player; + } + + /** + * Insert randomized account data (id, coins, goods) using the JDBC fast path for + * bulk inserts. The fastest way to get data into TiDB is using the + * TiDB Lightning(https://docs.pingcap.com/tidb/stable/tidb-lightning-overview). + * However, if you must bulk insert from the application using INSERT SQL, the best + * option is the method shown here. It will require the following: + * + * Add `rewriteBatchedStatements=true` to your JDBC connection settings. + * Setting rewriteBatchedStatements to true now causes CallableStatements + * with batched arguments to be re-written in the form "CALL (...); CALL (...); ..." + * to send the batch in as few client/server round trips as possible. + * https://dev.mysql.com/doc/relnotes/connector-j/5.1/en/news-5-1-3.html + * + * You can see the `rewriteBatchedStatements` param effect logic at + * implement function: `com.mysql.cj.jdbc.StatementImpl.executeBatchUsingMultiQueries` + * + * @param total Add players amount. + * @param batchSize Bulk insert size for per batch. + * + * @return The number of new accounts inserted. + */ + public int bulkInsertRandomPlayers(Integer total, Integer batchSize) { + int totalNewPlayers = 0; + + try (Connection connection = ds.getConnection()) { + // We're managing the commit lifecycle ourselves, so we can + // control the size of our batch inserts. + connection.setAutoCommit(false); + + // In this example we are adding 500 rows to the database, + // but it could be any number. What's important is that + // the batch size is 128. + try (PreparedStatement pstmt = connection.prepareStatement("INSERT INTO player (id, coins, goods) VALUES (?, ?, ?)")) { + for (int i=0; i<=(total/batchSize);i++) { + for (int j=0; j %s row(s) updated in this batch\n", count.length); + } + connection.commit(); + } catch (SQLException e) { + System.out.printf("PlayerDAO.bulkInsertRandomPlayers ERROR: { state => %s, cause => %s, message => %s }\n", + e.getSQLState(), e.getCause(), e.getMessage()); + } + } catch (SQLException e) { + System.out.printf("PlayerDAO.bulkInsertRandomPlayers ERROR: { state => %s, cause => %s, message => %s }\n", + e.getSQLState(), e.getCause(), e.getMessage()); + } + return totalNewPlayers; + } + + + /** + * Print a subset of players from the data store by limit. + * + * @param limit Print max size. + */ + public void printPlayers(Integer limit) { + try (Connection connection = ds.getConnection()) { + PreparedStatement preparedStatement = connection.prepareStatement("SELECT * FROM player LIMIT ?"); + preparedStatement.setInt(1, limit); + preparedStatement.execute(); + + ResultSet res = preparedStatement.executeQuery(); + while (!res.next()) { + PlayerBean player = new PlayerBean(res.getString("id"), + res.getInt("coins"), res.getInt("goods")); + System.out.println("\n[printPlayers]:\n" + player); + } + } catch (SQLException e) { + System.out.printf("PlayerDAO.printPlayers ERROR: { state => %s, cause => %s, message => %s }\n", + e.getSQLState(), e.getCause(), e.getMessage()); + } + } + + + /** + * Count players from the data store. + * + * @return All players count + */ + public int countPlayers() { + int count = 0; + + try (Connection connection = ds.getConnection()) { + PreparedStatement preparedStatement = connection.prepareStatement("SELECT count(*) FROM player"); + preparedStatement.execute(); + + ResultSet res = preparedStatement.executeQuery(); + if(res.next()) { + count = res.getInt(1); + } + } catch (SQLException e) { + System.out.printf("PlayerDAO.countPlayers ERROR: { state => %s, cause => %s, message => %s }\n", + e.getSQLState(), e.getCause(), e.getMessage()); + } + + return count; + } + } + + public static void main(String[] args) { + // 1. Configure the example database connection. + + // 1.1 Create a mysql data source instance. + MysqlDataSource mysqlDataSource = new MysqlDataSource(); + + // 1.2 Set server name, port, database name, username and password. + mysqlDataSource.setServerName("localhost"); + mysqlDataSource.setPortNumber(4000); + mysqlDataSource.setDatabaseName("test"); + mysqlDataSource.setUser("root"); + mysqlDataSource.setPassword(""); + + // Or you can use jdbc string instead. + // mysqlDataSource.setURL("jdbc:mysql://{host}:{port}/test?user={user}&password={password}"); + + // 2. And then, create DAO to manager your data. + PlayerDAO dao = new PlayerDAO(mysqlDataSource); + + // 3. Run some simple examples. + + // Create a player, who has a coin and a goods.. + dao.createPlayers(Collections.singletonList(new PlayerBean("test", 1, 1))); + + // Get a player. + PlayerBean testPlayer = dao.getPlayer("test"); + System.out.printf("PlayerDAO.getPlayer:\n => id: %s\n => coins: %s\n => goods: %s\n", + testPlayer.getId(), testPlayer.getCoins(), testPlayer.getGoods()); + + // Create players with bulk inserts. Insert 1919 players totally, with 114 players per batch. + int addedCount = dao.bulkInsertRandomPlayers(1919, 114); + System.out.printf("PlayerDAO.bulkInsertRandomPlayers:\n => %d total inserted players\n", addedCount); + + // Count players amount. + int count = dao.countPlayers(); + System.out.printf("PlayerDAO.countPlayers:\n => %d total players\n", count); + + // Print 3 players. + dao.printPlayers(3); + + // 4. Explore more. + + // Player 1: id is "1", has only 100 coins. + // Player 2: id is "2", has 114514 coins, and 20 goods. + PlayerBean player1 = new PlayerBean("1", 100, 0); + PlayerBean player2 = new PlayerBean("2", 114514, 20); + + // Create two players "by hand", using the INSERT statement on the backend. + addedCount = dao.createPlayers(Arrays.asList(player1, player2)); + System.out.printf("PlayerDAO.createPlayers:\n => %d total inserted players\n", addedCount); + + // Player 1 wants to buy 10 goods from player 2. + // It will cost 500 coins, but player 1 cannot afford it. + System.out.println("\nPlayerDAO.buyGoods:\n => this trade will fail"); + int updatedCount = dao.buyGoods(player2.getId(), player1.getId(), 10, 500); + System.out.printf("PlayerDAO.buyGoods:\n => %d total update players\n", updatedCount); + + // So player 1 has to reduce the incoming quantity to two. + System.out.println("\nPlayerDAO.buyGoods:\n => this trade will success"); + updatedCount = dao.buyGoods(player2.getId(), player1.getId(), 2, 100); + System.out.printf("PlayerDAO.buyGoods:\n => %d total update players\n", updatedCount); + } +} +``` + +## 第 3 步:运行代码 + +本节将逐步介绍代码的运行方法。 + +### 第 3 步第 1 部分:JDBC 表初始化 + +使用 JDBC 时,需手动初始化数据库表,若你本地已经安装了 `mysql-client`,且使用本地集群,可直接在 `plain-java-jdbc` 目录下运行: + +```shell +make mysql +``` + +或直接执行: + +```shell +mysql --host 127.0.0.1 --port 4000 -u root + + +# TiDB 和 MyBatis 的简单 CRUD 应用程序 + +[Mybatis](https://mybatis.org/mybatis-3/index.html) 是当前比较流行的开源 Java 应用持久层框架。 + +本文档将展示如何使用 TiDB 和 MyBatis 来构造一个简单的 CRUD 应用程序。 + +> **注意:** +> +> 推荐使用 Java 8 及以上版本进行 TiDB 的应用程序的编写。 + +## 拓展学习视频 + +- [使用 Connector/J - TiDB v6](https://learn.pingcap.com/learner/course/840002/?utm_source=docs-cn-dev-guide) +- [在 TiDB 上开发应用的最佳实践 - TiDB v6](https://learn.pingcap.com/learner/course/780002/?utm_source=docs-cn-dev-guide) + +## 第 1 步:启动你的 TiDB 集群 + +本节将介绍 TiDB 集群的启动方法。 + +**使用 TiDB Serverless 集群** + +详细步骤,请参考:[创建 TiDB Serverless 集群](/develop/dev-guide-build-cluster-in-cloud.md#第-1-步创建-tidb-serverless-集群)。 + +**使用本地集群** + +详细步骤,请参考:[部署本地测试 TiDB 集群](/quick-start-with-tidb.md#部署本地测试集群)或[部署正式 TiDB 集群](/production-deployment-using-tiup.md)。 + +## 第 2 步:获取代码 + +```shell +git clone https://github.com/pingcap-inc/tidb-example-java.git +``` + +与 [MyBatis](https://mybatis.org/mybatis-3/index.html) 对比,JDBC 的实现方式并非最优体验。你需要自行编写错误处理逻辑,并且代码无法简单复用。这会使你的代码有些冗余。 + +本文将以 Maven 插件的方式使用 [MyBatis Generator](https://mybatis.org/generator/quickstart.html) 生成部分持久层代码。 + +进入目录 `plain-java-mybatis`: + +```shell +cd plain-java-mybatis +``` + +目录结构如下所示: + +``` +. +├── Makefile +├── pom.xml +└── src + └── main + ├── java + │   └── com + │   └── pingcap + │   ├── MybatisExample.java + │   ├── dao + │   │   └── PlayerDAO.java + │   └── model + │   ├── Player.java + │   ├── PlayerMapper.java + │   └── PlayerMapperEx.java + └── resources + ├── dbinit.sql + ├── log4j.properties + ├── mapper + │   ├── PlayerMapper.xml + │   └── PlayerMapperEx.xml + ├── mybatis-config.xml + └── mybatis-generator.xml +``` + +其中,自动生成的文件有: + +- `src/main/java/com/pingcap/model/Player.java`:Player 实体类文件 +- `src/main/java/com/pingcap/model/PlayerMapper.java`:Player Mapper 的接口文件 +- `src/main/resources/mapper/PlayerMapper.xml`:Player Mapper 的 XML 映射,它是 MyBatis 用于生成 Player Mapper 接口的实现类的配置 + +这些文件的生成策略被写在了 `mybatis-generator.xml` 配置文件内,它是 [MyBatis Generator](https://mybatis.org/generator/quickstart.html) 的配置文件,下面配置文件中添加了使用方法的说明: + +```xml + + + + + + + + + + + + + + + + + + + + + + + + + + +
+ + +``` + +`mybatis-generator.xml` 在 `pom.xml` 中,以 `mybatis-generator-maven-plugin` 插件配置的方式被引入: + +```xml + + org.mybatis.generator + mybatis-generator-maven-plugin + 1.4.1 + + src/main/resources/mybatis-generator.xml + true + true + + + + + + mysql + mysql-connector-java + 5.1.49 + + + +``` + +在 Maven 插件内引入后,可删除旧的生成文件后,通过命令 `mvn mybatis-generate` 生成新的文件。或者你也可以使用已经编写好的 `make` 命令,通过 `make gen` 来同时删除旧文件,并生成新文件。 + +> **注意:** +> +> `mybatis-generator.xml` 中的属性 `configuration.overwrite` 仅可控制新生成的 Java 代码文件使用覆盖方式被写入,但 XML 映射文件仍会以追加方式写入。因此,推荐在 MyBatis Generator 生成新的文件前,先删除掉旧的文件。 + +`Player.java` 是使用 MyBatis Generator 生成出的数据实体类文件,为数据库表在程序内的映射。`Player` 类的每个属性都对应着 `player` 表的一个字段。 + +```java +package com.pingcap.model; + +public class Player { + private String id; + + private Integer coins; + + private Integer goods; + + public Player(String id, Integer coins, Integer goods) { + this.id = id; + this.coins = coins; + this.goods = goods; + } + + public Player() { + super(); + } + + public String getId() { + return id; + } + + public void setId(String id) { + this.id = id; + } + + public Integer getCoins() { + return coins; + } + + public void setCoins(Integer coins) { + this.coins = coins; + } + + public Integer getGoods() { + return goods; + } + + public void setGoods(Integer goods) { + this.goods = goods; + } +} +``` + +`PlayerMapper.java` 是使用 MyBatis Generator 生成出的映射接口文件,它仅规定了接口,接口的实现类是由 MyBatis 来通过 XML 或注解自动生成的: + +```java +package com.pingcap.model; + +import com.pingcap.model.Player; + +public interface PlayerMapper { + int deleteByPrimaryKey(String id); + + int insert(Player row); + + int insertSelective(Player row); + + Player selectByPrimaryKey(String id); + + int updateByPrimaryKeySelective(Player row); + + int updateByPrimaryKey(Player row); +} +``` + +`PlayerMapper.xml` 是使用 MyBatis Generator 生成出的映射 XML 文件,MyBatis 将使用这个文件自动生成 `PlayerMapper` 接口的实现类: + +```xml + + + + + + + + + + + + id, coins, goods + + + + delete from player + where id = #{id,jdbcType=VARCHAR} + + + insert into player (id, coins, goods + ) + values (#{id,jdbcType=VARCHAR}, #{coins,jdbcType=INTEGER}, #{goods,jdbcType=INTEGER} + ) + + + insert into player + + + id, + + + coins, + + + goods, + + + + + #{id,jdbcType=VARCHAR}, + + + #{coins,jdbcType=INTEGER}, + + + #{goods,jdbcType=INTEGER}, + + + + + update player + + + coins = #{coins,jdbcType=INTEGER}, + + + goods = #{goods,jdbcType=INTEGER}, + + + where id = #{id,jdbcType=VARCHAR} + + + update player + set coins = #{coins,jdbcType=INTEGER}, + goods = #{goods,jdbcType=INTEGER} + where id = #{id,jdbcType=VARCHAR} + + +``` + +由于 MyBatis Generator 需要逆向生成源码,因此,数据库中需先行有此表结构,可使用 `dbinit.sql` 生成表结构: + +```sql +USE test; +DROP TABLE IF EXISTS player; + +CREATE TABLE player ( + `id` VARCHAR(36), + `coins` INTEGER, + `goods` INTEGER, + PRIMARY KEY (`id`) +); +``` + +额外拆分接口 `PlayerMapperEx` 继承 `PlayerMapper`,并且编写与之匹配的 `PlayerMapperEx.xml`。避免直接更改 `PlayerMapper.java` 和 `PlayerMapper.xml`。这是为了规避 MyBatis Generator 的反复生成,影响到自行编写的代码。 + +在 `PlayerMapperEx.java` 中定义自行增加的接口: + +```java +package com.pingcap.model; + +import java.util.List; + +public interface PlayerMapperEx extends PlayerMapper { + Player selectByPrimaryKeyWithLock(String id); + + List selectByLimit(Integer limit); + + Integer count(); +} +``` + +在 `PlayerMapperEx.xml` 中定义映射规则: + +```xml + + + + + + + + + + + + id, coins, goods + + + + + + + + + +``` + +`PlayerDAO.java` 是程序用来管理数据对象的类。其中 `DAO` 是 [Data Access Object](https://en.wikipedia.org/wiki/Data_access_object) 的缩写。在其中定义了一系列数据的操作方法,用于数据的写入。 + +```java +package com.pingcap.dao; + +import com.pingcap.model.Player; +import com.pingcap.model.PlayerMapperEx; +import org.apache.ibatis.session.SqlSession; +import org.apache.ibatis.session.SqlSessionFactory; + +import java.util.List; +import java.util.function.Function; + +public class PlayerDAO { + public static class NotEnoughException extends RuntimeException { + public NotEnoughException(String message) { + super(message); + } + } + + // Run SQL code in a way that automatically handles the + // transaction retry logic, so we don't have to duplicate it in + // various places. + public Object runTransaction(SqlSessionFactory sessionFactory, Function fn) { + Object resultObject = null; + SqlSession session = null; + + try { + // open a session with autoCommit is false + session = sessionFactory.openSession(false); + + // get player mapper + PlayerMapperEx playerMapperEx = session.getMapper(PlayerMapperEx.class); + + resultObject = fn.apply(playerMapperEx); + session.commit(); + System.out.println("APP: COMMIT;"); + } catch (Exception e) { + if (e instanceof NotEnoughException) { + System.out.printf("APP: ROLLBACK BY LOGIC; \n%s\n", e.getMessage()); + } else { + System.out.printf("APP: ROLLBACK BY ERROR; \n%s\n", e.getMessage()); + } + + if (session != null) { + session.rollback(); + } + } finally { + if (session != null) { + session.close(); + } + } + + return resultObject; + } + + public Function createPlayers(List players) { + return playerMapperEx -> { + Integer addedPlayerAmount = 0; + for (Player player: players) { + playerMapperEx.insert(player); + addedPlayerAmount ++; + } + System.out.printf("APP: createPlayers() --> %d\n", addedPlayerAmount); + return addedPlayerAmount; + }; + } + + public Function buyGoods(String sellId, String buyId, Integer amount, Integer price) { + return playerMapperEx -> { + Player sellPlayer = playerMapperEx.selectByPrimaryKeyWithLock(sellId); + Player buyPlayer = playerMapperEx.selectByPrimaryKeyWithLock(buyId); + + if (buyPlayer == null || sellPlayer == null) { + throw new NotEnoughException("sell or buy player not exist"); + } + + if (buyPlayer.getCoins() < price || sellPlayer.getGoods() < amount) { + throw new NotEnoughException("coins or goods not enough, rollback"); + } + + int affectRows = 0; + buyPlayer.setGoods(buyPlayer.getGoods() + amount); + buyPlayer.setCoins(buyPlayer.getCoins() - price); + affectRows += playerMapperEx.updateByPrimaryKey(buyPlayer); + + sellPlayer.setGoods(sellPlayer.getGoods() - amount); + sellPlayer.setCoins(sellPlayer.getCoins() + price); + affectRows += playerMapperEx.updateByPrimaryKey(sellPlayer); + + System.out.printf("APP: buyGoods --> sell: %s, buy: %s, amount: %d, price: %d\n", sellId, buyId, amount, price); + return affectRows; + }; + } + + public Function getPlayerByID(String id) { + return playerMapperEx -> playerMapperEx.selectByPrimaryKey(id); + } + + public Function printPlayers(Integer limit) { + return playerMapperEx -> { + List players = playerMapperEx.selectByLimit(limit); + + for (Player player: players) { + System.out.println("\n[printPlayers]:\n" + player); + } + return 0; + }; + } + + public Function countPlayers() { + return PlayerMapperEx::count; + } +} +``` + +`MybatisExample` 是 `plain-java-mybatis` 这个示例程序的主类。其中定义了入口函数: + +```java +package com.pingcap; + +import com.pingcap.dao.PlayerDAO; +import com.pingcap.model.Player; +import org.apache.ibatis.io.Resources; +import org.apache.ibatis.session.SqlSessionFactory; +import org.apache.ibatis.session.SqlSessionFactoryBuilder; + +import java.io.IOException; +import java.io.InputStream; +import java.util.Arrays; +import java.util.Collections; + +public class MybatisExample { + public static void main( String[] args ) throws IOException { + // 1. Create a SqlSessionFactory based on our mybatis-config.xml configuration + // file, which defines how to connect to the database. + InputStream inputStream = Resources.getResourceAsStream("mybatis-config.xml"); + SqlSessionFactory sessionFactory = new SqlSessionFactoryBuilder().build(inputStream); + + // 2. And then, create DAO to manager your data + PlayerDAO playerDAO = new PlayerDAO(); + + // 3. Run some simple examples. + + // Create a player who has 1 coin and 1 goods. + playerDAO.runTransaction(sessionFactory, playerDAO.createPlayers( + Collections.singletonList(new Player("test", 1, 1)))); + + // Get a player. + Player testPlayer = (Player)playerDAO.runTransaction(sessionFactory, playerDAO.getPlayerByID("test")); + System.out.printf("PlayerDAO.getPlayer:\n => id: %s\n => coins: %s\n => goods: %s\n", + testPlayer.getId(), testPlayer.getCoins(), testPlayer.getGoods()); + + // Count players amount. + Integer count = (Integer)playerDAO.runTransaction(sessionFactory, playerDAO.countPlayers()); + System.out.printf("PlayerDAO.countPlayers:\n => %d total players\n", count); + + // Print 3 players. + playerDAO.runTransaction(sessionFactory, playerDAO.printPlayers(3)); + + // 4. Getting further. + + // Player 1: id is "1", has only 100 coins. + // Player 2: id is "2", has 114514 coins, and 20 goods. + Player player1 = new Player("1", 100, 0); + Player player2 = new Player("2", 114514, 20); + + // Create two players "by hand", using the INSERT statement on the backend. + int addedCount = (Integer)playerDAO.runTransaction(sessionFactory, + playerDAO.createPlayers(Arrays.asList(player1, player2))); + System.out.printf("PlayerDAO.createPlayers:\n => %d total inserted players\n", addedCount); + + // Player 1 wants to buy 10 goods from player 2. + // It will cost 500 coins, but player 1 cannot afford it. + System.out.println("\nPlayerDAO.buyGoods:\n => this trade will fail"); + Integer updatedCount = (Integer)playerDAO.runTransaction(sessionFactory, + playerDAO.buyGoods(player2.getId(), player1.getId(), 10, 500)); + System.out.printf("PlayerDAO.buyGoods:\n => %d total update players\n", updatedCount); + + // So player 1 has to reduce the incoming quantity to two. + System.out.println("\nPlayerDAO.buyGoods:\n => this trade will success"); + updatedCount = (Integer)playerDAO.runTransaction(sessionFactory, + playerDAO.buyGoods(player2.getId(), player1.getId(), 2, 100)); + System.out.printf("PlayerDAO.buyGoods:\n => %d total update players\n", updatedCount); + } +} +``` + +## 第 3 步:运行代码 + +本节将逐步介绍代码的运行方法。 + +### 第 3 步第 1 部分:表初始化 + +使用 MyBatis 时,需手动初始化数据库表。若你本地已经安装了 `mysql-client`,且使用本地集群,可直接在 `plain-java-mybatis` 目录下通过 `make prepare` 运行: + +```shell +make prepare +``` + +或直接执行: + +```shell +mysql --host 127.0.0.1 --port 4000 -u root < src/main/resources/dbinit.sql +``` + +若你不使用本地集群,或未安装 `mysql-client`,请直接登录你的集群,并运行 `src/main/resources/dbinit.sql` 文件内的 SQL 语句。 + +### 第 3 步第 2 部分:TiDB Cloud 更改参数 + +若你使用 TiDB Serverless 集群,更改 `mybatis-config.xml` 内关于 `dataSource.url`、`dataSource.username`、`dataSource.password` 的参数: + +```xml + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + +``` + +若你设定的密码为 `123456`,而且从 TiDB Serverless 集群面板中得到的连接信息为: + +- Endpoint: `xxx.tidbcloud.com` +- Port: `4000` +- User: `2aEp24QWEDLqRFs.root` + +那么此处应将配置文件中 `dataSource` 节点内更改为: + +```xml + + + + + ... + + + + + + + + ... + + +``` + +### 第 3 步第 3 部分:运行 + +你可以分别运行 `make prepare`, `make gen`, `make build` 和 `make run` 以运行此代码: + +```shell +make prepare +# this command executes : +# - `mysql --host 127.0.0.1 --port 4000 -u root < src/main/resources/dbinit.sql` +# - `mysql --host 127.0.0.1 --port 4000 -u root -e "TRUNCATE test.player"` + +make gen +# this command executes : +# - `rm -f src/main/java/com/pingcap/model/Player.java` +# - `rm -f src/main/java/com/pingcap/model/PlayerMapper.java` +# - `rm -f src/main/resources/mapper/PlayerMapper.xml` +# - `mvn mybatis-generator:generate` + +make build # this command executes `mvn clean package` +make run # this command executes `java -jar target/plain-java-mybatis-0.0.1-jar-with-dependencies.jar` +``` + +或者你也可以直接使用原生的命令: + +```shell +mysql --host 127.0.0.1 --port 4000 -u root < src/main/resources/dbinit.sql +mysql --host 127.0.0.1 --port 4000 -u root -e "TRUNCATE test.player" +rm -f src/main/java/com/pingcap/model/Player.java +rm -f src/main/java/com/pingcap/model/PlayerMapper.java +rm -f src/main/resources/mapper/PlayerMapper.xml +mvn mybatis-generator:generate +mvn clean package +java -jar target/plain-java-mybatis-0.0.1-jar-with-dependencies.jar +``` + +再或者直接运行 `make` 命令,这是 `make prepare`, `make gen`, `make build` 和 `make run` 的组合。 + +## 第 4 步:预期输出 + +[MyBatis 预期输出](https://github.com/pingcap-inc/tidb-example-java/blob/main/Expected-Output.md#plain-java-mybatis) diff --git a/test/sync_pr_docs_cn/data/markdown-pages/zh/tidb/master/develop/dev-guide-sample-application-java-spring-boot.md b/test/sync_pr_docs_cn/data/markdown-pages/zh/tidb/master/develop/dev-guide-sample-application-java-spring-boot.md new file mode 100644 index 00000000..f823f4e8 --- /dev/null +++ b/test/sync_pr_docs_cn/data/markdown-pages/zh/tidb/master/develop/dev-guide-sample-application-java-spring-boot.md @@ -0,0 +1,1019 @@ +--- +title: 使用 Spring Boot 构建 TiDB 应用程序 +summary: 给出一个 Spring Boot 构建 TiDB 应用程序示例。 +aliases: ['/zh/tidb/dev/dev-guide-sample-application-spring-boot', '/zh/tidb/dev/sample-application-spring-boot'] +--- + + + +# 使用 Spring Boot 构建 TiDB 应用程序 + +本教程向你展示如何使用 TiDB 构建 [Spring Boot](https://spring.io/projects/spring-boot) Web 应用程序。使用 [Spring Data JPA](https://spring.io/projects/spring-data-jpa) 模块作为数据访问能力的框架。此示例应用程序的代码仓库可在 [Github](https://github.com/pingcap-inc/tidb-example-java) 下载。 + +这是一个较为完整的构建 Restful API 的示例应用程序,展示了一个使用 **TiDB** 作为数据库的通用 **Spring Boot** 后端服务。设计了以下过程,用于还原一个现实场景: + +这是一个关于游戏的例子,每个玩家有两个属性:金币数 `coins` 和货物数 `goods`。且每个玩家都拥有一个字段 `id`,作为玩家的唯一标识。玩家在金币数和货物数充足的情况下,可以自由的交易。 + +你可以以此示例为基础,构建自己的应用程序。 + +## 第 1 步:启动你的 TiDB 集群 + +本节将介绍 TiDB 集群的启动方法。 + +**使用 TiDB Serverless 集群** + +详细步骤,请参考:[创建 TiDB Serverless 集群](/develop/dev-guide-build-cluster-in-cloud.md#第-1-步创建-tidb-serverless-集群)。 + +**使用本地集群** + +详细步骤,请参考:[部署本地测试 TiDB 集群](/quick-start-with-tidb.md#部署本地测试集群)或[部署正式 TiDB 集群](/production-deployment-using-tiup.md)。 + +## 第 2 步:安装 JDK + +请在你的计算机上下载并安装 **Java Development Kit** (JDK),这是 Java 开发的必备工具。**Spring Boot** 支持 Java 版本 8 以上的 JDK,由于 **Hibernate** 版本的缘故,推荐使用 Java 版本 11 以上的 JDK。 + +示例应用程序同时支持 **Oracle JDK** 和 **OpenJDK**,请自行选择,本教程将使用版本 17 的 **OpenJDK**。 + +## 第 3 步:安装 Maven + +此示例应用程序使用 **Maven** 来管理应用程序的依赖项。Spring 支持的 **Maven** 版本为 3.2 以上,作为依赖管理软件,推荐使用当前最新稳定版本的 **Maven**。 + +这里给出命令行安装 **Maven** 的办法: + +- macOS 安装: + + {{< copyable "shell-regular" >}} + + ``` + brew install maven + ``` + +- 基于 Debian 的 Linux 发行版上安装(如 Ubuntu 等): + + {{< copyable "shell-regular" >}} + + ``` + apt-get install maven + ``` + +- 基于 Red Hat 的 Linux 发行版上安装(如 Fedora、CentOS 等): + +- dnf 包管理器 + + {{< copyable "shell-regular" >}} + + ``` + dnf install maven + ``` + +- yum 包管理器 + + {{< copyable "shell-regular" >}} + + ``` + yum install maven + ``` + +其他安装方法,请参考 [Maven 官方文档](https://maven.apache.org/install.html)。 + +## 第 4 步:获取应用程序代码 + +> **建议:** +> +> 如果你希望得到一个与本示例相同依赖的空白程序,而无需示例代码,可参考[创建相同依赖空白程序(可选)](#创建相同依赖空白程序可选)一节。 + +请下载或克隆示例代码库 [pingcap-inc/tidb-example-java](https://github.com/pingcap-inc/tidb-example-java),并进入到目录 `spring-jpa-hibernate` 中。 + +## 第 5 步:运行应用程序 + +接下来运行应用程序代码,将会生成一个 Web 应用程序。Hibernate 将在数据库 `test` 中创建一个表 `player_jpa`。如果你向应用程序的 Restful API 发送请求,这些请求将会在 TiDB 集群上运行[数据库事务](/develop/dev-guide-transaction-overview.md)。 + +如果你想了解有关此应用程序的代码的详细信息,可参阅[实现细节](#实现细节)部分。 + +### 第 5 步第 1 部分:TiDB Cloud 更改参数 + +若你使用 TiDB Serverless 集群,更改 `application.yml`(位于 `src/main/resources` 内)关于 `spring.datasource.url`、`spring.datasource.username`、`spring.datasource.password` 的参数: + +```yaml +spring: + datasource: + url: jdbc:mysql://localhost:4000/test + username: root + # password: xxx + driver-class-name: com.mysql.cj.jdbc.Driver + jpa: + show-sql: true + database-platform: org.hibernate.dialect.TiDBDialect + hibernate: + ddl-auto: create-drop +``` + +若你设定的密码为 `123456`,而且从 TiDB Serverless 集群面板中得到的连接信息为: + +- Endpoint: `xxx.tidbcloud.com` +- Port: `4000` +- User: `2aEp24QWEDLqRFs.root` + +那么此处应将参数更改为: + +```yaml +spring: + datasource: + url: jdbc:mysql://xxx.tidbcloud.com:4000/test?sslMode=VERIFY_IDENTITY&enabledTLSProtocols=TLSv1.2,TLSv1.3 + username: 2aEp24QWEDLqRFs.root + password: 123456 + driver-class-name: com.mysql.cj.jdbc.Driver + jpa: + show-sql: true + database-platform: org.hibernate.dialect.TiDBDialect + hibernate: + ddl-auto: create-drop +``` + +### 第 5 步第 2 部分:运行 + +打开终端,进入 `tidb-example-java/spring-jpa-hibernate` 代码示例目录: + +```shell +cd /tidb-example-java/spring-jpa-hibernate +``` + +#### 使用 Make 构建并运行(推荐) + +```shell +make +``` + +#### 手动构建并运行 + +推荐你使用 Make 方式进行构建并运行,当然,若你希望手动进行构建,请依照以下步骤逐步运行,可以得到相同的结果: + +清除缓存并打包: + +```shell +mvn clean package +``` + +运行应用程序的 JAR 文件: + +```shell +java -jar target/spring-jpa-hibernate-0.0.1.jar +``` + +### 第 5 步第 3 部分:输出 + +输出的最后部分应如下所示: + +``` + . ____ _ __ _ _ + /\\ / ___'_ __ _ _(_)_ __ __ _ \ \ \ \ +( ( )\___ | '_ | '_| | '_ \/ _` | \ \ \ \ + \\/ ___)| |_)| | | | | || (_| | ) ) ) ) + ' |____| .__|_| |_|_| |_\__, | / / / / + =========|_|==============|___/=/_/_/_/ + :: Spring Boot :: (v3.0.1) + +2023-01-05T14:06:54.427+08:00 INFO 22005 --- [ main] com.pingcap.App : Starting App using Java 17.0.2 with PID 22005 (/Users/cheese/IdeaProjects/tidb-example-java/spring-jpa-hibernate/target/classes started by cheese in /Users/cheese/IdeaProjects/tidb-example-java) +2023-01-05T14:06:54.428+08:00 INFO 22005 --- [ main] com.pingcap.App : No active profile set, falling back to 1 default profile: "default" +2023-01-05T14:06:54.642+08:00 INFO 22005 --- [ main] .s.d.r.c.RepositoryConfigurationDelegate : Bootstrapping Spring Data JPA repositories in DEFAULT mode. +2023-01-05T14:06:54.662+08:00 INFO 22005 --- [ main] .s.d.r.c.RepositoryConfigurationDelegate : Finished Spring Data repository scanning in 17 ms. Found 1 JPA repository interfaces. +2023-01-05T14:06:54.830+08:00 INFO 22005 --- [ main] o.s.b.w.embedded.tomcat.TomcatWebServer : Tomcat initialized with port(s): 8080 (http) +2023-01-05T14:06:54.833+08:00 INFO 22005 --- [ main] o.apache.catalina.core.StandardService : Starting service [Tomcat] +2023-01-05T14:06:54.833+08:00 INFO 22005 --- [ main] o.apache.catalina.core.StandardEngine : Starting Servlet engine: [Apache Tomcat/10.1.4] +2023-01-05T14:06:54.865+08:00 INFO 22005 --- [ main] o.a.c.c.C.[Tomcat].[localhost].[/] : Initializing Spring embedded WebApplicationContext +2023-01-05T14:06:54.865+08:00 INFO 22005 --- [ main] w.s.c.ServletWebServerApplicationContext : Root WebApplicationContext: initialization completed in 421 ms +2023-01-05T14:06:54.916+08:00 INFO 22005 --- [ main] o.hibernate.jpa.internal.util.LogHelper : HHH000204: Processing PersistenceUnitInfo [name: default] +2023-01-05T14:06:54.929+08:00 INFO 22005 --- [ main] org.hibernate.Version : HHH000412: Hibernate ORM core version 6.1.6.Final +2023-01-05T14:06:54.969+08:00 WARN 22005 --- [ main] org.hibernate.orm.deprecation : HHH90000021: Encountered deprecated setting [javax.persistence.sharedCache.mode], use [jakarta.persistence.sharedCache.mode] instead +2023-01-05T14:06:55.005+08:00 INFO 22005 --- [ main] com.zaxxer.hikari.HikariDataSource : HikariPool-1 - Starting... +2023-01-05T14:06:55.074+08:00 INFO 22005 --- [ main] com.zaxxer.hikari.pool.HikariPool : HikariPool-1 - Added connection com.mysql.cj.jdbc.ConnectionImpl@5e905f2c +2023-01-05T14:06:55.075+08:00 INFO 22005 --- [ main] com.zaxxer.hikari.HikariDataSource : HikariPool-1 - Start completed. +2023-01-05T14:06:55.089+08:00 INFO 22005 --- [ main] SQL dialect : HHH000400: Using dialect: org.hibernate.dialect.TiDBDialect +Hibernate: drop table if exists player_jpa +Hibernate: drop sequence player_jpa_id_seq +Hibernate: create sequence player_jpa_id_seq start with 1 increment by 1 +Hibernate: create table player_jpa (id bigint not null, coins integer, goods integer, primary key (id)) engine=InnoDB +2023-01-05T14:06:55.332+08:00 INFO 22005 --- [ main] o.h.e.t.j.p.i.JtaPlatformInitiator : HHH000490: Using JtaPlatform implementation: [org.hibernate.engine.transaction.jta.platform.internal.NoJtaPlatform] +2023-01-05T14:06:55.335+08:00 INFO 22005 --- [ main] j.LocalContainerEntityManagerFactoryBean : Initialized JPA EntityManagerFactory for persistence unit 'default' +2023-01-05T14:06:55.579+08:00 WARN 22005 --- [ main] JpaBaseConfiguration$JpaWebConfiguration : spring.jpa.open-in-view is enabled by default. Therefore, database queries may be performed during view rendering. Explicitly configure spring.jpa.open-in-view to disable this warning +2023-01-05T14:06:55.710+08:00 INFO 22005 --- [ main] o.s.b.w.embedded.tomcat.TomcatWebServer : Tomcat started on port(s): 8080 (http) with context path '' +2023-01-05T14:06:55.714+08:00 INFO 22005 --- [ main] com.pingcap.App : Started App in 1.432 seconds (process running for 1.654) +``` + +输出日志中,提示应用程序在启动过程中做了什么,这里显示应用程序使用 [Tomcat](https://tomcat.apache.org/) 启动了一个 **Servlet**,使用 Hibernate 作为 ORM,[HikariCP](https://github.com/brettwooldridge/HikariCP) 作为数据库连接池的实现,使用了 `org.hibernate.dialect.TiDBDialect` 作为数据库方言。启动后,Hibernate 删除并重新创建了表 `player_jpa`,及序列 `player_jpa_id_seq`。在启动的最后,监听了 8080 端口,对外提供 HTTP 服务。 + +如果你想了解有关此应用程序的代码的详细信息,可参阅本教程下方的[实现细节](#实现细节)。 + +## 第 6 步:HTTP 请求 + +在运行应用程序后,你可以通过访问根地址 `http://localhost:8000` 向后端程序发送 HTTP 请求。下面将给出一些示例请求来演示如何使用该服务。 + + + +
+ +1. 将配置文件 [`Player.postman_collection.json`](https://raw.githubusercontent.com/pingcap-inc/tidb-example-python/main/django_example/Player.postman_collection.json) 导入 [Postman](https://www.postman.com/)。 + +2. 导入后 **Collections** > **Player** 如图所示: + + ![postman import](/media/develop/postman_player_import.png) + +3. 发送请求: + + - 增加玩家 + + 点击 **Create** 标签,点击 **Send** 按钮,发送 `POST` 形式的 `http://localhost:8000/player/` 请求。返回值为增加的玩家个数,预期为 1。 + + ![Postman-Create](/media/develop/postman_player_create.png) + + - 使用 ID 获取玩家信息 + + 点击 **GetByID** 标签,点击 **Send** 按钮,发送 `GET` 形式的 `http://localhost:8000/player/1` 请求。返回值为 ID 为 1 的玩家信息。 + + ![Postman-GetByID](/media/develop/postman_player_getbyid.png) + + - 使用 Limit 批量获取玩家信息 + + 点击 **GetByLimit** 标签,点击 **Send** 按钮,发送 `GET` 形式的 `http://localhost:8000/player/limit/3` 请求。返回值为最多 3 个玩家的信息列表。 + + ![Postman-GetByLimit](/media/develop/postman_player_getbylimit.png) + + - 分页获取玩家信息 + + 点击 **GetByPage** 标签,点击 **Send** 按钮,发送 `GET` 形式的 `http://localhost:8080/player/page?index=0&size=2` 请求。返回值为 index 为 0 的页,每页有 2 个玩家信息列表。此外,还包含了分页信息,如偏移量、总页数、是否排序等。 + + ![Postman-GetByPage](/media/develop//postman_player_getbypage.png) + + - 获取玩家个数 + + 点击 **Count** 标签,点击 **Send** 按钮,发送 `GET` 形式的 `http://localhost:8000/player/count` 请求。返回值为玩家个数。 + + ![Postman-Count](/media/develop/postman_player_count.png) + + - 玩家交易 + + 点击 **Trade** 标签,点击 **Send** 按钮,发送 `PUT` 形式的 `http://localhost:8000/player/trade` 请求。请求参数为售卖玩家 ID `sellID`、购买玩家 ID `buyID`、购买货物数量 `amount` 以及购买消耗金币数 `price`。返回值为交易是否成功。当出现售卖玩家货物不足、购买玩家金币不足或数据库错误时,交易将不成功。并且由于[数据库事务](/develop/dev-guide-transaction-overview.md)保证,不会有玩家的金币或货物丢失的情况。 + + ![Postman-Trade](/media/develop/postman_player_trade.png) + +
+ +
+ +下面使用 curl 请求服务端。 + +- 增加玩家 + + 使用 `POST` 方法向 `/player` 端点发送请求来增加玩家,例如: + + ```shell + curl --location --request POST 'http://localhost:8080/player/' --header 'Content-Type: application/json' --data-raw '[{"coins":100,"goods":20}]' + ``` + + 这里使用 JSON 作为信息的载荷。表示需要创建一个金币数 `coins` 为 100,货物数 `goods` 为 20 的玩家。返回值为创建的玩家信息: + + ```json + 1 + ``` + +- 使用 ID 获取玩家信息 + + 使用 `GET` 方法向 `/player` 端点发送请求来获取玩家信息。此外,还需要在路径上给出玩家的 ID 参数,即 `/player/{id}`。例如,在请求 ID 为 1 的玩家时: + + ```shell + curl --location --request GET 'http://localhost:8080/player/1' + ``` + + 返回值为 ID 为 1 的玩家的信息: + + ```json + { + "coins": 200, + "goods": 10, + "id": 1 + } + ``` + +- 使用 Limit 批量获取玩家信息 + + 使用 `GET` 方法向 `/player/limit` 端点发送请求来获取玩家信息。此外,还需要在路径上给出限制查询的玩家信息的总数,即 `/player/limit/{limit}`。例如,在请求最多 3 个玩家的信息时: + + ```shell + curl --location --request GET 'http://localhost:8080/player/limit/3' + ``` + + 返回值为玩家信息的列表: + + ```json + [ + { + "coins": 200, + "goods": 10, + "id": 1 + }, + { + "coins": 0, + "goods": 30, + "id": 2 + }, + { + "coins": 100, + "goods": 20, + "id": 3 + } + ] + ``` + +- 分页获取玩家信息 + + 使用 `GET` 方法向 `/player/page` 端点发送请求来分页获取玩家信息。额外地需要使用 URL 参数,例如在请求页面序号 `index` 为 0,每页最大请求量 `size` 为 2 时: + + ```shell + curl --location --request GET 'http://localhost:8080/player/page?index=0&size=2' + ``` + + 返回值为 `index` 为 0 的页,每页有 2 个玩家信息列表。此外,还包含了分页信息,如偏移量、总页数、是否排序等。 + + ```json + { + "content": [ + { + "coins": 200, + "goods": 10, + "id": 1 + }, + { + "coins": 0, + "goods": 30, + "id": 2 + } + ], + "empty": false, + "first": true, + "last": false, + "number": 0, + "numberOfElements": 2, + "pageable": { + "offset": 0, + "pageNumber": 0, + "pageSize": 2, + "paged": true, + "sort": { + "empty": true, + "sorted": false, + "unsorted": true + }, + "unpaged": false + }, + "size": 2, + "sort": { + "empty": true, + "sorted": false, + "unsorted": true + }, + "totalElements": 4, + "totalPages": 2 + } + ``` + +- 获取玩家个数 + + 使用 `GET` 方法向 `/player/count` 端点发送请求来获取玩家个数: + + ```shell + curl --location --request GET 'http://localhost:8080/player/count' + ``` + + 返回值为玩家个数: + + ```json + 4 + ``` + +- 玩家交易 + + 使用 `PUT` 方法向 `/player/trade` 端点发送请求来发起玩家间的交易,例如: + + ```shell + curl --location --request PUT 'http://localhost:8080/player/trade' \ + --header 'Content-Type: application/x-www-form-urlencoded' \ + --data-urlencode 'sellID=1' \ + --data-urlencode 'buyID=2' \ + --data-urlencode 'amount=10' \ + --data-urlencode 'price=100' + ``` + + 这里使用 Form Data 作为信息的载荷。表示售卖玩家 ID `sellID` 为 1、购买玩家 ID `buyID` 为 2、购买货物数量 `amount` 为 10、购买消耗金币数 `price` 为 100。 + + 返回值为交易是否成功: + + ``` + true + ``` + + 当出现售卖玩家货物不足、购买玩家金币不足或数据库错误时,交易将不成功。并且由于[数据库事务](/develop/dev-guide-transaction-overview.md)保证,不会有玩家的金币或货物丢失的情况。 + +
+ +
+ +为方便测试,你可以使用 [`request.sh`](https://github.com/pingcap-inc/tidb-example-java/blob/main/spring-jpa-hibernate/request.sh) 脚本依次发送以下请求: + +1. 循环创建 10 名玩家 +2. 获取 ID 为 1 的玩家信息 +3. 获取至多 3 名玩家信息列表 +4. 获取 `index` 为 0,`size` 为 2 的一页玩家信息 +5. 获取玩家总数 +6. ID 为 1 的玩家作为售出方,ID 为 2 的玩家作为购买方,购买 10 个货物,耗费 100 金币 + +使用 `make request` 或 `./request.sh` 命令运行此脚本,运行结果如下所示: + +```shell +> make request +./request.sh +loop to create 10 players: +1111111111 + +get player 1: +{"id":1,"coins":200,"goods":10} + +get players by limit 3: +[{"id":1,"coins":200,"goods":10},{"id":2,"coins":0,"goods":30},{"id":3,"coins":100,"goods":20}] + +get first players: +{"content":[{"id":1,"coins":200,"goods":10},{"id":2,"coins":0,"goods":30}],"pageable":{"sort":{"empty":true,"unsorted":true,"sorted":false},"offset":0,"pageNumber":0,"pageSize":2,"paged":true,"unpaged":false},"last":false,"totalPages":7,"totalElements":14,"first":true,"size":2,"number":0,"sort":{"empty":true,"unsorted":true,"sorted":false},"numberOfElements":2,"empty":false} + +get players count: +14 + +trade by two players: +false +``` + +
+ +
+ +## 实现细节 + +本小节介绍示例应用程序项目中的组件。 + +### 总览 + +本示例项目的大致目录树如下所示(删除了有碍理解的部分): + +``` +. +├── pom.xml +└── src + └── main + ├── java + │ └── com + │ └── pingcap + │ ├── App.java + │ ├── controller + │ │ └── PlayerController.java + │ ├── dao + │ │ ├── PlayerBean.java + │ │ └── PlayerRepository.java + │ └── service + │ ├── PlayerService.java + │ └── impl + │ └── PlayerServiceImpl.java + └── resources + └── application.yml +``` + +其中: + +- `pom.xml` 内声明了项目的 Maven 配置,如依赖,打包等 +- `application.yml` 内声明了项目的用户配置,如数据库地址、密码、使用的数据库方言等 +- `App.java` 是项目的入口 +- `controller` 是项目对外暴露 HTTP 接口的包 +- `service` 是项目实现接口与逻辑的包 +- `dao` 是项目实现与数据库连接并完成数据持久化的包 + +### 配置 + +本节将简要介绍 `pom.xml` 文件中的 Maven 配置,及 `application.yml` 文件中的用户配置。 + +#### Maven 配置 + +`pom.xml` 文件为 Maven 配置,在文件内声明了项目的 Maven 依赖,打包方法,打包信息等,你可以通过[创建相同依赖空白程序](#创建相同依赖空白程序可选) 这一节来复刻此配置文件的生成流程,当然,也可直接复制至你的项目来使用。 + +```xml + + + 4.0.0 + + org.springframework.boot + spring-boot-starter-parent + 3.0.1 + + + + com.pingcap + spring-jpa-hibernate + 0.0.1 + spring-jpa-hibernate + an example for spring boot, jpa, hibernate and TiDB + + + 17 + 17 + 17 + + + + + org.springframework.boot + spring-boot-starter-data-jpa + + + + org.springframework.boot + spring-boot-starter-web + + + + mysql + mysql-connector-java + runtime + + + + org.springframework.boot + spring-boot-starter-test + test + + + + + + + org.springframework.boot + spring-boot-maven-plugin + + + + +``` + +#### 用户配置 + +`application.yml` 此配置文件声明了用户配置,如数据库地址、密码、使用的数据库方言等。 + +```yaml +spring: + datasource: + url: jdbc:mysql://localhost:4000/test + username: root + # password: xxx + driver-class-name: com.mysql.cj.jdbc.Driver + jpa: + show-sql: true + database-platform: org.hibernate.dialect.TiDBDialect + hibernate: + ddl-auto: create-drop +``` + +此配置格式为 [YAML](https://yaml.org/) 格式。其中: + +- `spring.datasource.url`:数据库连接的 URL。 +- `spring.datasource.url`:数据库用户名。 +- `spring.datasource.password`:数据库密码,此项为空,需注释或删除。 +- `spring.datasource.driver-class-name`:数据库驱动,因为 TiDB 与 MySQL 兼容,则此处使用与 mysql-connector-java 适配的驱动类 `com.mysql.cj.jdbc.Driver`。 +- `jpa.show-sql`:为 true 时将打印 JPA 运行的 SQL。 +- `jpa.database-platform`:选用的数据库方言,此处连接了 TiDB,自然选择 TiDB 方言,注意,此方言在 6.0.0.Beta2 版本后的 Hibernate 中才可选择,请注意依赖版本。 +- `jpa.hibernate.ddl-auto`:此处选择的 create-drop 将会在程序开始时创建表,退出时删除表。请勿在正式环境使用,但此处为示例程序,希望尽量不影响数据库数据,因此选择了此选项。 + +### 入口文件 + +入口文件 `App.java`: + +```java +package com.pingcap; + +import org.springframework.boot.SpringApplication; +import org.springframework.boot.autoconfigure.SpringBootApplication; +import org.springframework.boot.context.ApplicationPidFileWriter; + +@SpringBootApplication +public class App { + public static void main(String[] args) { + SpringApplication springApplication = new SpringApplication(App.class); + springApplication.addListeners(new ApplicationPidFileWriter("spring-jpa-hibernate.pid")); + springApplication.run(args); + } +} +``` + +入口类比较简单,首先,有一个 Spring Boot 应用程序的标准配置注解 [@SpringBootApplication](https://docs.spring.io/spring-boot/docs/current/api/org/springframework/boot/autoconfigure/SpringBootApplication.html)。有关详细信息,请参阅 Spring Boot 官方文档中的 [Using the @SpringBootApplication Annotation](https://docs.spring.io/spring-boot/docs/current/reference/html/using-spring-boot.html#using-boot-using-springbootapplication-annotation)。随后,使用 `ApplicationPidFileWriter` 在程序启动过程中,写下一个名为 `spring-jpa-hibernate.pid` 的 PID (process identification number) 文件,可从外部使用此 PID 文件关闭此应用程序。 + +### 数据库持久层 + +数据库持久层,即 `dao` 包内,实现了数据对象的持久化。 + +#### 实体对象 + +`PlayerBean.java` 文件为实体对象,这个对象对应了数据库的一张表。 + +```java +package com.pingcap.dao; + +import jakarta.persistence.*; + +/** + * it's core entity in hibernate + * @Table appoint to table name + */ +@Entity +@Table(name = "player_jpa") +public class PlayerBean { + /** + * @ID primary key + * @GeneratedValue generated way. this field will use generator named "player_id" + * @SequenceGenerator using `sequence` feature to create a generator, + * and it named "player_jpa_id_seq" in database, initial form 1 (by `initialValue` + * parameter default), and every operator will increase 1 (by `allocationSize`) + */ + @Id + @GeneratedValue(generator="player_id") + @SequenceGenerator(name="player_id", sequenceName="player_jpa_id_seq", allocationSize=1) + private Long id; + + /** + * @Column field + */ + @Column(name = "coins") + private Integer coins; + @Column(name = "goods") + private Integer goods; + + public Long getId() { + return id; + } + + public void setId(Long id) { + this.id = id; + } + + public Integer getCoins() { + return coins; + } + + public void setCoins(Integer coins) { + this.coins = coins; + } + + public Integer getGoods() { + return goods; + } + + public void setGoods(Integer goods) { + this.goods = goods; + } +} +``` + +这里可以看到,实体类中有很多注解,这些注解给了 Hibernate 额外的信息,用以绑定实体类和表: + +- `@Entity` 声明 `PlayerBean` 是一个实体类。 +- `@Table` 使用注解属性 `name` 将此实体类和表 `player_jpa` 关联。 +- `@Id` 声明此属性关联表的主键列。 +- `@GeneratedValue` 表示自动生成该列的值,而不应手动设置,使用属性 `generator` 指定生成器的名称为 `player_id`。 +- `@SequenceGenerator` 声明一个使用[序列](/sql-statements/sql-statement-create-sequence.md)的生成器,使用注解属性 `name` 声明生成器的名称为 `player_id` (与 `@GeneratedValue` 中指定的名称需保持一致)。随后使用注解属性 `sequenceName` 指定数据库中序列的名称。最后,使用注解属性 `allocationSize` 声明序列的步长为 1。 +- `@Column` 将每个私有属性声明为表 `player_jpa` 的一列,使用注解属性 `name` 确定属性对应的列名。 + +#### 存储库 + +为了抽象数据库层,Spring 应用程序使用 [Repository](https://docs.spring.io/spring-data/jpa/docs/current/reference/html/#repositories) 接口,或者 Repository 的子接口。 这个接口映射到一个数据库对象,常见的,比如会映射到一个表上。JPA 会实现一些预制的方法,比如 [INSERT](/sql-statements/sql-statement-insert.md),或使用主键的 [SELECT](/sql-statements/sql-statement-select.md) 等。 + +```java +package com.pingcap.dao; + +import jakarta.persistence.LockModeType; +import org.springframework.data.domain.Page; +import org.springframework.data.domain.Pageable; +import org.springframework.data.jpa.repository.JpaRepository; +import org.springframework.data.jpa.repository.Lock; +import org.springframework.data.jpa.repository.Query; +import org.springframework.data.repository.query.Param; +import org.springframework.stereotype.Repository; + +import java.util.List; + +@Repository +public interface PlayerRepository extends JpaRepository { + /** + * use HQL to query by page + * @param pageable a pageable parameter required by hibernate + * @return player list package by page message + */ + @Query(value = "SELECT player_jpa FROM PlayerBean player_jpa") + Page getPlayersByPage(Pageable pageable); + + /** + * use SQL to query by limit, using named parameter + * @param limit sql parameter + * @return player list (max size by limit) + */ + @Query(value = "SELECT * FROM player_jpa LIMIT :limit", nativeQuery = true) + List getPlayersByLimit(@Param("limit") Integer limit); + + /** + * query player and add a lock for update + * @param id player id + * @return player + */ + @Lock(value = LockModeType.PESSIMISTIC_WRITE) + @Query(value = "SELECT player FROM PlayerBean player WHERE player.id = :id") + // @Query(value = "SELECT * FROM player_jpa WHERE id = :id FOR UPDATE", nativeQuery = true) + PlayerBean getPlayerAndLock(@Param("id") Long id); +} +``` + +`PlayerRepository` 拓展了 Spring 用于 JPA 数据访问所使用的接口 `JpaRepository`。使用 `@Query` 注解,告诉 Hibernate 此接口如何实现查询。在此处使用了两种查询语句的语法,其中,在接口 `getPlayersByPage` 中的查询语句使用的是一种被 Hibernate 称为 [HQL](https://docs.jboss.org/hibernate/orm/6.0/userguide/html_single/Hibernate_User_Guide.html#hql) (Hibernate Query Language) 的语法。而接口 `getPlayersByLimit` 中使用的是普通的 SQL,在使用 SQL 语法时,需要将 `@Query` 的注解参数 `nativeQuery` 设置为 true。 + +在 `getPlayersByLimit` 注解的 SQL 中,`:limit` 在 Hibernate 中被称为[命名参数](https://docs.jboss.org/hibernate/orm/6.0/userguide/html_single/Hibernate_User_Guide.html#jpql-query-parameters),Hibernate 将按名称自动寻找并拼接注解所在接口内的参数。你也可以使用 `@Param` 来指定与参数不同的名称用于注入。 + +在 `getPlayerAndLock` 中,使用了一个注解 [@Lock](https://docs.spring.io/spring-data/jpa/docs/current/api/org/springframework/data/jpa/repository/Lock.html),此注解声明此处使用悲观锁进行锁定,如需了解更多其他锁定方式,可查看[实体锁定](https://openjpa.apache.org/builds/2.2.2/apache-openjpa/docs/jpa_overview_em_locking.html)文档。此处的 `@Lock` 仅可与 HQL 搭配使用,否则将会产生错误。当然,如果你希望直接使用 SQL 进行锁定,可直接使用注释部分的注解: + +```java +@Query(value = "SELECT * FROM player_jpa WHERE id = :id FOR UPDATE", nativeQuery = true) +``` + +直接使用 SQL 的 `FOR UPDATE` 来增加锁。你也可通过 TiDB [SELECT 文档](/sql-statements/sql-statement-select.md) 进行更深层次的原理学习。 + +### 逻辑实现 + +逻辑实现层,即 `service` 包,内含了项目实现的接口与逻辑 + +#### 接口 + +`PlayerService.java` 文件内定义了逻辑接口,实现接口,而不是直接编写一个类的原因,是尽量使例子贴近实际使用,体现设计的开闭原则。你也可以省略掉此接口,在依赖类中直接注入实现类,但并不推荐这样做。 + +```java +package com.pingcap.service; + +import com.pingcap.dao.PlayerBean; +import org.springframework.data.domain.Page; + +import java.util.List; + +public interface PlayerService { + /** + * create players by passing in a List of PlayerBean + * + * @param players will create players list + * @return The number of create accounts + */ + Integer createPlayers(List players); + + /** + * buy goods and transfer funds between one player and another in one transaction + * @param sellId sell player id + * @param buyId buy player id + * @param amount goods amount, if sell player has not enough goods, the trade will break + * @param price price should pay, if buy player has not enough coins, the trade will break + */ + void buyGoods(Long sellId, Long buyId, Integer amount, Integer price) throws RuntimeException; + + /** + * get the player info by id. + * + * @param id player id + * @return the player of this id + */ + PlayerBean getPlayerByID(Long id); + + /** + * get a subset of players from the data store by limit. + * + * @param limit return max size + * @return player list + */ + List getPlayers(Integer limit); + + /** + * get a page of players from the data store. + * + * @param index page index + * @param size page size + * @return player list + */ + Page getPlayersByPage(Integer index, Integer size); + + /** + * count players from the data store. + * + * @return all players count + */ + Long countPlayers(); +} +``` + +#### 实现(重要) + +`PlayerService.java` 文件内实现了 `PlayerService` 接口,所有数据操作逻辑都编写在这里。 + +```java +package com.pingcap.service.impl; + +import com.pingcap.dao.PlayerBean; +import com.pingcap.dao.PlayerRepository; +import com.pingcap.service.PlayerService; +import jakarta.transaction.Transactional; +import org.springframework.beans.factory.annotation.Autowired; +import org.springframework.data.domain.Page; +import org.springframework.data.domain.PageRequest; +import org.springframework.stereotype.Service; + +import java.util.List; + +/** + * PlayerServiceImpl implements PlayerService interface + * @Transactional it means every method in this class, will package by a pair of + * transaction.begin() and transaction.commit(). and it will be call + * transaction.rollback() when method throw an exception + */ +@Service +@Transactional +public class PlayerServiceImpl implements PlayerService { + @Autowired + private PlayerRepository playerRepository; + + @Override + public Integer createPlayers(List players) { + return playerRepository.saveAll(players).size(); + } + + @Override + public void buyGoods(Long sellId, Long buyId, Integer amount, Integer price) throws RuntimeException { + PlayerBean buyPlayer = playerRepository.getPlayerAndLock(buyId); + PlayerBean sellPlayer = playerRepository.getPlayerAndLock(sellId); + if (buyPlayer == null || sellPlayer == null) { + throw new RuntimeException("sell or buy player not exist"); + } + + if (buyPlayer.getCoins() < price || sellPlayer.getGoods() < amount) { + throw new RuntimeException("coins or goods not enough, rollback"); + } + + buyPlayer.setGoods(buyPlayer.getGoods() + amount); + buyPlayer.setCoins(buyPlayer.getCoins() - price); + playerRepository.save(buyPlayer); + + sellPlayer.setGoods(sellPlayer.getGoods() - amount); + sellPlayer.setCoins(sellPlayer.getCoins() + price); + playerRepository.save(sellPlayer); + } + + @Override + public PlayerBean getPlayerByID(Long id) { + return playerRepository.findById(id).orElse(null); + } + + @Override + public List getPlayers(Integer limit) { + return playerRepository.getPlayersByLimit(limit); + } + + @Override + public Page getPlayersByPage(Integer index, Integer size) { + return playerRepository.getPlayersByPage(PageRequest.of(index, size)); + } + + @Override + public Long countPlayers() { + return playerRepository.count(); + } +} +``` + +这里使用了 `@Service` 这个注解,声明此对象的生命周期交由 Spring 管理。 + +注意,除了有 `@Service` 注解之外,PlayerServiceImpl 实现类还有一个 [@Transactional](https://docs.spring.io/spring-framework/docs/current/reference/html/data-access.html#transaction-declarative-annotations) 注解。当在应用程序中启用事务管理时 (可使用 [@EnableTransactionManagement](https://docs.spring.io/spring-framework/docs/current/javadoc-api/org/springframework/transaction/annotation/EnableTransactionManagement.html) 打开,但 Spring Boot 默认开启,无需再次手动配置),Spring 会自动将所有带有 `@Transactional` 注释的对象包装在一个代理中,使用该代理对对象的调用进行处理。 + +你可以简单的认为,代理在带有 `@Transactional` 注释的对象内的函数调用时:在函数顶部将使用 `transaction.begin()` 开启事务,函数返回后,调用 `transaction.commit()` 进行事务提交,而出现任何运行时错误时,代理将会调用 `transaction.rollback()` 来回滚。 + +你可参阅[数据库事务](/develop/dev-guide-transaction-overview.md)来获取更多有关事务的信息,或者阅读 Spring 官网中的文章[理解 Spring 框架的声明式事务实现](https://docs.spring.io/spring-framework/docs/current/reference/html/data-access.html#tx-decl-explained)。 + +整个实现类中,`buyGoods` 函数需重点关注,其在不符合逻辑时将抛出异常,引导 Hibernate 进行事务回滚,防止出现错误数据。 + +### 外部接口 + +`controller` 包对外暴露 HTTP 接口,可以通过 [REST API](https://www.redhat.com/en/topics/api/what-is-a-rest-api#) 来访问服务。 + +```java +package com.pingcap.controller; + +import com.pingcap.dao.PlayerBean; +import com.pingcap.service.PlayerService; +import org.springframework.beans.factory.annotation.Autowired; +import org.springframework.data.domain.Page; +import org.springframework.lang.NonNull; +import org.springframework.web.bind.annotation.*; + +import java.util.List; + +@RestController +@RequestMapping("/player") +public class PlayerController { + @Autowired + private PlayerService playerService; + + @PostMapping + public Integer createPlayer(@RequestBody @NonNull List playerList) { + return playerService.createPlayers(playerList); + } + + @GetMapping("/{id}") + public PlayerBean getPlayerByID(@PathVariable Long id) { + return playerService.getPlayerByID(id); + } + + @GetMapping("/limit/{limit_size}") + public List getPlayerByLimit(@PathVariable("limit_size") Integer limit) { + return playerService.getPlayers(limit); + } + + @GetMapping("/page") + public Page getPlayerByPage(@RequestParam Integer index, @RequestParam("size") Integer size) { + return playerService.getPlayersByPage(index, size); + } + + @GetMapping("/count") + public Long getPlayersCount() { + return playerService.countPlayers(); + } + + @PutMapping("/trade") + public Boolean trade(@RequestParam Long sellID, @RequestParam Long buyID, @RequestParam Integer amount, @RequestParam Integer price) { + try { + playerService.buyGoods(sellID, buyID, amount, price); + } catch (RuntimeException e) { + return false; + } + + return true; + } +} +``` + +`PlayerController` 中使用了尽可能多的注解方式来作为示例展示功能,在实际项目中,请尽量保持风格的统一,同时遵循你公司或团体的规则。`PlayerController` 有许多注解,下方将进行逐一解释: + +- [@RestController](https://docs.spring.io/spring-framework/docs/current/javadoc-api/org/springframework/web/bind/annotation/RestController.html) 将 `PlayerController` 声明为一个 [Web Controller](https://en.wikipedia.org/wiki/Model%E2%80%93view%E2%80%93controller),且将返回值序列化为 JSON 输出。 +- [@RequestMapping](https://docs.spring.io/spring-framework/docs/current/javadoc-api/org/springframework/web/bind/annotation/RequestMapping.html) 映射 URL 端点为 `/player`,即此 `Web Controller` 仅监听 `/player` URL 下的请求。 +- `@Autowired` 用于 Spring 的自动装配,可以看到,此处声明需要一个 `PlayerService` 对象,此对象为接口,并未指定使用哪一个实现类,这是由 Spring 自动装配的,有关此装配规则,可查看 Spirng 官网中的 [The IoC container](https://docs.spring.io/spring-framework/docs/3.2.x/spring-framework-reference/html/beans.html) 一文。 +- [@PostMapping](https://docs.spring.io/spring-framework/docs/current/javadoc-api/org/springframework/web/bind/annotation/PostMapping.html) 声明此函数将响应 HTTP 中的 [POST](https://developer.mozilla.org/en-US/docs/Web/HTTP/Methods/POST) 类型请求。 + - `@RequestBody` 声明此处将 HTTP 的整个载荷解析到参数 `playerList` 中。 + - `@NonNull` 声明参数不可为空,否则将校验并返回错误。 +- [@GetMapping](https://docs.spring.io/spring-framework/docs/current/javadoc-api/org/springframework/web/bind/annotation/GetMapping.html) 声明此函数将响应 HTTP 中的 [GET](https://developer.mozilla.org/en-US/docs/Web/HTTP/Methods/GET) 类型请求。 + - [@PathVariable](https://docs.spring.io/spring-framework/docs/current/javadoc-api/org/springframework/web/bind/annotation/PathVariable.html) 可以看到注解中有形如 `{id}` 、`{limit_size}` 这样的占位符,这种占位符将被绑定到 `@PathVariable` 注释的变量中,绑定的依据是注解中的注解属性 `name`(变量名可省略,即 `@PathVariable(name="limit_size")` 可写成 `@PathVariable("limit_size")` ),不特殊指定时,与变量名名称相同。 +- [@PutMapping](https://docs.spring.io/spring-framework/docs/current/javadoc-api/org/springframework/web/bind/annotation/PutMapping.html) 声明此函数将响应 HTTP 中的 [PUT](https://developer.mozilla.org/en-US/docs/Web/HTTP/Methods/PUT) 类型请求。 +- [@RequestParam](https://docs.spring.io/spring-framework/docs/current/javadoc-api/org/springframework/web/bind/annotation/RequestParam.html) 此声明将解析请求中的 URL 参数、表单参数等参数,绑定至注解的变量中。 + +## 创建相同依赖空白程序(可选) + +本程序使用 [Spring Initializr](https://start.spring.io/) 构建。你可以在这个网页上通过点选以下选项并更改少量配置,来快速得到一个与本示例程序相同依赖的空白应用程序,配置项如下: + +**Project** + +- Maven Project + +**Language** + +- Java + +**Spring Boot** + +- 最新稳定版本 + +**Project Metadata** + +- Group: com.pingcap +- Artifact: spring-jpa-hibernate +- Name: spring-jpa-hibernate +- Package name: com.pingcap +- Packaging: Jar +- Java: 17 + +**Dependencies** + +- Spring Web +- Spring Data JPA +- MySQL Driver + +> **注意:** +> +> 尽管 SQL 相对标准化,但每个数据库供应商都使用 ANSI SQL 定义语法的子集和超集。这被称为数据库的方言。 Hibernate 通过其 org.hibernate.dialect.Dialect 类和每个数据库供应商的各种子类来处理这些方言的变化。 +> +> 在大多数情况下,Hibernate 将能够通过在启动期间通过 JDBC 连接的一些返回值来确定要使用的正确方言。有关 Hibernate 确定要使用的正确方言的能力(以及你影响该解析的能力)的信息,请参阅[方言解析](https://docs.jboss.org/hibernate/orm/6.0/userguide/html_single/Hibernate_User_Guide.html#portability-dialectresolver)。 +> +> 如果由于某种原因无法确定正确的方言,或者你想使用自定义方言,则需要设置 hibernate.dialect 配置项。 +> +> _—— 节选自 Hibernate 官方文档: [Database Dialect](https://docs.jboss.org/hibernate/orm/6.0/userguide/html_single/Hibernate_User_Guide.html#database-dialect)_ + +随后,即可获取一个拥有与示例程序相同依赖的空白 **Spring Boot** 应用程序。 \ No newline at end of file diff --git a/test/sync_pr_docs_cn/data/markdown-pages/zh/tidb/master/develop/dev-guide-sample-application-python-django.md b/test/sync_pr_docs_cn/data/markdown-pages/zh/tidb/master/develop/dev-guide-sample-application-python-django.md new file mode 100644 index 00000000..80e143b7 --- /dev/null +++ b/test/sync_pr_docs_cn/data/markdown-pages/zh/tidb/master/develop/dev-guide-sample-application-python-django.md @@ -0,0 +1,783 @@ +--- +title: 使用 Django 构建 TiDB 应用程序 +summary: 给出一个 Django 构建 TiDB 应用程序示例。 +aliases: ['/zh/tidb/dev/dev-guide-sample-application-django'] +--- + + + +# 使用 Django 构建 TiDB 应用程序 + +本文档将展示如何使用 [Django](https://www.djangoproject.com/) 构建一个 TiDB Web 应用程序。使用 [django-tidb](https://github.com/pingcap/django-tidb) 模块作为数据访问能力的框架。示例应用程序的代码可从 [Github](https://github.com/pingcap-inc/tidb-example-python) 下载。 + +这是一个较为完整的构建 Restful API 的示例应用程序,展示了一个使用 TiDB 作为数据库的通用 Django 后端服务。该示例设计了以下过程,用于还原一个现实场景: + +这是一个关于游戏的例子,每个玩家有两个属性:金币数 `coins` 和货物数 `goods`。且每个玩家都拥有一个字段 `id`,作为玩家的唯一标识。玩家在金币数和货物数充足的情况下,可以自由地交易。 + +你可以以此示例为基础,构建自己的应用程序。 + +## 第 1 步:启动你的 TiDB 集群 + +本节将介绍 TiDB 集群的启动方法。 + +**使用 TiDB Serverless 集群** + +详细步骤,请参考:[创建 TiDB Serverless 集群](/develop/dev-guide-build-cluster-in-cloud.md#第-1-步创建-tidb-serverless-集群)。 + +**使用本地集群** + +详细步骤,请参考:[部署本地测试 TiDB 集群](/quick-start-with-tidb.md#部署本地测试集群)或[部署正式 TiDB 集群](/production-deployment-using-tiup.md)。 + +## 第 2 步:安装 Python + +请在你的计算机上下载并安装 **Python**。本文的示例使用 [Django 3.2.16](https://docs.djangoproject.com/zh-hans/3.2/) 版本。根据 [Django 文档](https://docs.djangoproject.com/zh-hans/3.2/faq/install/#what-python-version-can-i-use-with-django),Django 3.2.16 版本支持 Python 3.6、3.7、3.8、3.9 和 3.10 版本,推荐使用 Python 3.10 版本。 + +## 第 3 步:获取应用程序代码 + +> **建议:** +> +> 如果你希望得到一个与本示例相同依赖的空白程序,而无需示例代码,可参考[创建相同依赖空白程序(可选)](#创建相同依赖空白程序可选)一节。 + +请下载或克隆示例代码库 [pingcap-inc/tidb-example-python](https://github.com/pingcap-inc/tidb-example-python),并进入到目录 `django_example` 中。 + +## 第 4 步:运行应用程序 + +接下来运行应用程序代码,将会生成一个 Web 应用程序。你可以使用 `python manage.py migrate` 命令,要求 Django 在数据库 `django` 中创建一个表 `player`。如果你向应用程序的 Restful API 发送请求,这些请求将会在 TiDB 集群上运行[数据库事务](/develop/dev-guide-transaction-overview.md)。 + +如果你想了解有关此应用程序的代码的详细信息,可参阅[实现细节](#实现细节)部分。 + +### 第 4 步第 1 部分:TiDB Cloud 更改参数 + +若你使用了 TiDB Serverless 集群,此处需使用系统本地的 CA 证书,并将证书路径记为 `` 以供后续指代。你可以参考 [Where is the CA root path on my system?](https://docs.pingcap.com/tidbcloud/secure-connections-to-serverless-tier-clusters#where-is-the-ca-root-path-on-my-system) 文档获取你所使用的操作系统的 CA 证书位置。 + +更改 `example_project/settings.py` 中的 `DATABASES` 参数: + +```python +DATABASES = { + 'default': { + 'ENGINE': 'django_tidb', + 'NAME': 'django', + 'USER': 'root', + 'PASSWORD': '', + 'HOST': '127.0.0.1', + 'PORT': 4000, + }, +} +``` + +若你设定的密码为 `123456`,而且从 TiDB Serverless 集群面板中得到的连接信息为: + +- Endpoint: `xxx.tidbcloud.com` +- Port: `4000` +- User: `2aEp24QWEDLqRFs.root` + +下面以 macOS 为例,应将参数更改为: + +```python +DATABASES = { + 'default': { + 'ENGINE': 'django_tidb', + 'NAME': 'django', + 'USER': '2aEp24QWEDLqRFs.root', + 'PASSWORD': '123456', + 'HOST': 'xxx.tidbcloud.com', + 'PORT': 4000, + 'OPTIONS': { + 'ssl': { + "ca": "" + }, + }, + }, +} +``` + +### 第 4 步第 2 部分:运行 + +1. 打开终端,进入 `tidb-example-python` 代码示例目录: + + ```bash + cd /tidb-example-python + ``` + +2. 安装项目依赖并进入 `django_example` 目录: + + ```bash + pip install -r requirement.txt + cd django_example + ``` + +3. 运行数据模型迁移: + + > **注意:** + > + > - 此步骤假定已经存在 `django` 数据库。 + > - 若未创建 `django` 数据库,可通过 `CREATE DATABASE django` 语句进行创建。关于创建数据库语句的详细信息,参考 [`CREATE DATABASE`](/sql-statements/sql-statement-create-database.md#create-database)。 + > - 数据库名称 `NAME` 可在 `example_project/settings.py` 的 `DATABASES` 属性中更改。 + + 这将在你连接的数据库内生成 Django 所需的相应数据表。 + + ```bash + python manage.py migrate + ``` + +4. 运行应用程序: + + ```bash + python manage.py runserver + ``` + +### 第 4 步第 3 部分:输出 + +输出的最后部分应如下所示: + +``` +Watching for file changes with StatReloader +Performing system checks... + +System check identified no issues (0 silenced). +December 12, 2022 - 08:21:50 +Django version 3.2.16, using settings 'example_project.settings' +Starting development server at http://127.0.0.1:8000/ +Quit the server with CONTROL-C. +``` + +如果你想了解有关此应用程序的代码的详细信息,可参阅[实现细节](#实现细节)部分。 + +## 第 5 步:HTTP 请求 + +在运行应用程序后,你可以通过访问根地址 `http://localhost:8000` 向后端程序发送 HTTP 请求。下面将给出一些示例请求来演示如何使用该服务。 + + + +
+ +1. 将配置文件 [`Player.postman_collection.json`](https://raw.githubusercontent.com/pingcap-inc/tidb-example-python/main/django_example/Player.postman_collection.json) 导入 [Postman](https://www.postman.com/)。 + +2. 导入后 **Collections** > **Player** 如图所示: + + ![postman import](/media/develop/postman_player_import.png) + +3. 发送请求: + + - 增加玩家 + + 点击 **Create** 标签,点击 **Send** 按钮,发送 `POST` 形式的 `http://localhost:8000/player/` 请求。返回值为增加的玩家个数,预期为 1。 + + - 使用 ID 获取玩家信息 + + 点击 **GetByID** 标签,点击 **Send** 按钮,发送 `GET` 形式的 `http://localhost:8000/player/1` 请求。返回值为 ID 为 1 的玩家信息。 + + - 使用 Limit 批量获取玩家信息 + + 点击 **GetByLimit** 标签,点击 **Send** 按钮,发送 `GET` 形式的 `http://localhost:8000/player/limit/3` 请求。返回值为最多 3 个玩家的信息列表。 + + - 获取玩家个数 + + 点击 **Count** 标签,点击 **Send** 按钮,发送 `GET` 形式的 `http://localhost:8000/player/count` 请求。返回值为玩家个数。 + + - 玩家交易 + + 点击 **Trade** 标签,点击 **Send** 按钮,发送 `POST` 形式的 `http://localhost:8000/player/trade` 请求。请求参数为售卖玩家 ID `sellID`、购买玩家 ID `buyID`、购买货物数量 `amount` 以及购买消耗金币数 `price`。返回值为交易是否成功。当出现售卖玩家货物不足、购买玩家金币不足或数据库错误时,交易将不成功。并且由于[数据库事务](/develop/dev-guide-transaction-overview.md)保证,不会有玩家的金币或货物丢失的情况。 + +
+ +
+ +下面使用 curl 请求服务端。 + +- 增加玩家 + + 使用 `POST` 方法向 `/player` 端点发送请求来增加玩家,例如: + + ```shell + curl --location --request POST 'http://localhost:8000/player/' --header 'Content-Type: application/json' --data-raw '[{"coins":100,"goods":20}]' + ``` + + 这里使用 JSON 作为信息的载荷。表示需要创建一个金币数 `coins` 为 100,货物数 `goods` 为 20 的玩家。返回值为创建的玩家信息: + + ``` + create 1 players. + ``` + +- 使用 ID 获取玩家信息 + + 使用 `GET` 方法向 `/player` 端点发送请求来获取玩家信息。此外,还需要在路径上给出玩家的 ID 参数,即 `/player/{id}`。例如,在请求 ID 为 1 的玩家时: + + ```shell + curl --location --request GET 'http://localhost:8000/player/1' + ``` + + 返回值为 ID 为 1 的玩家的信息: + + ```json + { + "coins": 200, + "goods": 10, + "id": 1 + } + ``` + +- 使用 Limit 批量获取玩家信息 + + 使用 `GET` 方法向 `/player/limit` 端点发送请求来获取玩家信息。此外,还需要在路径上给出限制查询的玩家信息的总数,即 `/player/limit/{limit}`。例如,在请求最多 3 个玩家的信息时: + + ```shell + curl --location --request GET 'http://localhost:8000/player/limit/3' + ``` + + 返回值为玩家信息的列表: + + ```json + [ + { + "coins": 200, + "goods": 10, + "id": 1 + }, + { + "coins": 0, + "goods": 30, + "id": 2 + }, + { + "coins": 100, + "goods": 20, + "id": 3 + } + ] + ``` + +- 获取玩家个数 + + 使用 `GET` 方法向 `/player/count` 端点发送请求来获取玩家个数: + + ```shell + curl --location --request GET 'http://localhost:8000/player/count' + ``` + + 返回值为玩家个数: + + ``` + 4 + ``` + +- 玩家交易 + + 使用 `POST` 方法向 `/player/trade` 端点发送请求来发起玩家间的交易,例如: + + ```shell + curl --location --request POST 'http://localhost:8000/player/trade' \ + --header 'Content-Type: application/x-www-form-urlencoded' \ + --data-urlencode 'sellID=1' \ + --data-urlencode 'buyID=2' \ + --data-urlencode 'amount=10' \ + --data-urlencode 'price=100' + ``` + + 这里使用 Form Data 作为信息的载荷。表示售卖玩家 ID `sellID` 为 1、购买玩家 ID `buyID` 为 2、购买货物数量 `amount` 为 10、购买消耗金币数 `price` 为 100。 + + 返回值为交易是否成功: + + ``` + true + ``` + + 当出现售卖玩家货物不足、购买玩家金币不足或数据库错误时,交易将不成功。并且由于[数据库事务](/develop/dev-guide-transaction-overview.md)保证,不会有玩家的金币或货物丢失的情况。 + +
+ +
+ +为方便测试,你可以使用 [`request.sh`](https://github.com/pingcap-inc/tidb-example-python/blob/main/django_example/request.sh) 脚本依次发送以下请求: + +1. 循环创建 10 名玩家 +2. 获取 ID 为 1 的玩家信息 +3. 获取至多 3 名玩家信息列表 +4. 获取玩家总数 +5. ID 为 1 的玩家作为售出方,ID 为 2 的玩家作为购买方,购买 10 个货物,耗费 100 金币 + +使用 `./request.sh` 命令运行此脚本,运行结果如下所示: + +```shell +> ./request.sh +loop to create 10 players: +create 1 players.create 1 players.create 1 players.create 1 players.create 1 players.create 1 players.create 1 players.create 1 players.create 1 players.create 1 players. + +get player 1: +{"id": 1, "coins": 100, "goods": 20} + +get players by limit 3: +[{"id": 1, "coins": 100, "goods": 20}, {"id": 2, "coins": 100, "goods": 20}, {"id": 3, "coins": 100, "goods": 20}] + +get players count: +10 + +trade by two players: +trade successful +``` + +
+ +
+ +## 实现细节 + +本小节介绍示例应用程序项目中的组件。 + +### 总览 + +本示例项目的目录树大致如下所示: + +``` +. +├── example_project +│ ├── __init__.py +│ ├── asgi.py +│ ├── settings.py +│ ├── urls.py +│ └── wsgi.py +├── player +│ ├── __init__.py +│ ├── admin.py +│ ├── apps.py +│ ├── migrations +│ │ ├── 0001_initial.py +│ │ └── __init__.py +│ ├── models.py +│ ├── tests.py +│ ├── urls.py +│ └── views.py +└── manage.py +``` + +其中: + +- 每一个文件夹中的 `__init__.py` 文件声明了该文件夹是一个 Python 包。 +- `manage.py` 为 Django 自动生成的用于管理项目的脚本。 +- `example_project` 包含项目级别的代码: + + - `settings.py` 声明了项目的配置,如数据库地址、密码、使用的数据库方言等。 + - `urls.py` 配置了项目的根路由。 + +- `player` 是项目中提供对 `Player` 数据模型管理、数据查询的包,这在 Django 中被称为应用。你可以使用 `python manage.py startapp player` 来创建一个空白的 `player` 应用。 + + - `models.py` 定义了 `Player` 数据模型。 + - `migrations` 是一组数据模型迁移脚本。你可以使用 `python manage.py makemigrations player` 命令自动分析 `models.py` 文件中定义的数据对象,并生成迁移脚本。 + - `urls.py` 定义了应用的路由。 + - `views.py` 提供了应用的逻辑代码。 + +> **注意:** +> +> 由于 Django 的设计采用了可插拔模式,因此,你需要在创建应用后,在项目中进行注册。在本示例中,注册过程就是在 `example_project/settings.py` 文件中,在 `INSTALLED_APPS` 对象内添加 `'player.apps.PlayerConfig'` 条目。你可以参考示例代码 [`settings.py`](https://github.com/pingcap-inc/tidb-example-python/blob/main/django_example/example_project/settings.py#L33-L41) 以获得更多信息。 + +### 项目配置 + +本节将简要介绍 `example_project` 包内 `settings.py` 的重要配置。这个文件包含了 Django 项目的配置,声明了项目包含的应用、中间件、连接的数据库等信息。你可以通过[创建相同依赖空白程序](#创建相同依赖空白程序可选)这一节来了解此配置文件的生成流程,也可直接在项目中使用 `settings.py` 文件。关于 Django 配置的更多信息,参考 [Django 配置](https://docs.djangoproject.com/zh-hans/3.2/topics/settings/)文档。 + +```python +... + +# Application definition + +INSTALLED_APPS = [ + 'player.apps.PlayerConfig', + 'django.contrib.admin', + 'django.contrib.auth', + 'django.contrib.contenttypes', + 'django.contrib.sessions', + 'django.contrib.messages', + 'django.contrib.staticfiles', +] + +MIDDLEWARE = [ + 'django.middleware.security.SecurityMiddleware', + 'django.contrib.sessions.middleware.SessionMiddleware', + 'django.middleware.common.CommonMiddleware', + # 'django.middleware.csrf.CsrfViewMiddleware', + 'django.contrib.auth.middleware.AuthenticationMiddleware', + 'django.contrib.messages.middleware.MessageMiddleware', + 'django.middleware.clickjacking.XFrameOptionsMiddleware', +] + +... + +# Database +# https://docs.djangoproject.com/en/3.2/ref/settings/#databases + +DATABASES = { + 'default': { + 'ENGINE': 'django_tidb', + 'NAME': 'django', + 'USER': 'root', + 'PASSWORD': '', + 'HOST': '127.0.0.1', + 'PORT': 4000, + }, +} +DEFAULT_AUTO_FIELD = 'django.db.models.AutoField' + +... +``` + +其中: + +- `INSTALLED_APPS`:启用的应用全限定名称列表。 +- `MIDDLEWARE`:启用的中间件列表。由于本示例无需 `CsrfViewMiddleware` 中间件,因此其被注释。 +- `DATABASES`:数据库配置。其中,`ENGINE` 一项被配置为 `django_tidb`,这遵循了 [django-tidb](https://github.com/pingcap/django-tidb) 的配置要求。 + +### 根路由 + +在 `example_project` 包中的 `urls.py` 文件中编写了根路由: + +```python +from django.contrib import admin +from django.urls import include, path + +urlpatterns = [ + path('player/', include('player.urls')), + path('admin/', admin.site.urls), +] +``` + +在上面的示例中,根路由将 `player/` 路径指向 `player.urls`。即,`player` 包下的 `urls.py` 将负责处理所有以 `player/` 开头的 URL 请求。关于更多 Django URL 调度器的信息,请参考 [Django URL 调度器](https://docs.djangoproject.com/zh-hans/3.2/topics/http/urls/)文档。 + +### player 应用 + +`player` 应用实现了对 `Player` 对象的数据模型迁移、对象持久化、接口实现等功能。 + +#### 数据模型 + +`models.py` 文件内包含 `Player` 数据模型,这个模型对应了数据库的一张表。 + +```python +from django.db import models + +# Create your models here. + + +class Player(models.Model): + id = models.AutoField(primary_key=True) + coins = models.IntegerField() + goods = models.IntegerField() + + objects = models.Manager() + + class Meta: + db_table = "player" + + def as_dict(self): + return { + "id": self.id, + "coins": self.coins, + "goods": self.goods, + } +``` + +在上面的示例中,数据模型中有一个子类 `Meta`,这些子类给了 Django 额外的信息,用以指定数据模型的元信息。其中,`db_table` 声明此数据模型对应的表名为 `player`。关于模型元信息的全部选项可查看 [Django 模型 Meta 选项](https://docs.djangoproject.com/zh-hans/3.2/ref/models/options/)文档。 + +此外,数据模型中定义了 `id`、`coins` 及 `goods` 三个属性: + +- `id`:`models.AutoField(primary_key=True)` 表示其为一个自动递增的主键。 +- `coins`:`models.IntegerField()` 表示其为一个 Integer 类型的字段。 +- `goods`:`models.IntegerField()` 表示其为一个 Integer 类型的字段。 + +关于数据模型的详细信息,可查看 [Django 模型](https://docs.djangoproject.com/zh-hans/3.2/topics/db/models/)文档。 + +#### 数据模型迁移 + +Django 以 Python 数据模型定义代码为依赖,对数据库模型进行迁移。因此,它会生成一系列数据库模型迁移脚本,用于解决代码与数据库之间的差异。在 `models.py` 中定义完 `Player` 数据模型后,你可以使用 `python manage.py makemigrations player` 生成迁移脚本。在本文示例中,`migrations` 包内的 `0001_initial.py` 就是自动生成的迁移脚本。 + +```python +# Generated by Django 3.2.16 on 2022-11-16 11:09 + +from django.db import migrations, models + + +class Migration(migrations.Migration): + + initial = True + + dependencies = [ + ] + + operations = [ + migrations.CreateModel( + name='Player', + fields=[ + ('id', models.AutoField(primary_key=True, serialize=False)), + ('coins', models.IntegerField()), + ('goods', models.IntegerField()), + ], + options={ + 'db_table': 'player', + }, + ), + ] +``` + +你可以使用 `python manage.py sqlmigrate ...` 来预览迁移脚本最终将运行的 SQL 语句。这将极大地减少迁移脚本运行你意料之外的 SQL 语句的可能性。在生成迁移脚本后,推荐至少使用一次此命令预览并仔细检查生成的 SQL 语句。在本示例中,你可以运行 `python manage.py sqlmigrate player 0001`,其输出为可读的 SQL 语句,有助于开发者对语句进行审核: + +```sql +-- +-- Create model Player +-- +CREATE TABLE `player` (`id` integer AUTO_INCREMENT NOT NULL PRIMARY KEY, `coins` integer NOT NULL, `goods` integer NOT NULL); +``` + +生成迁移脚本后,你可以使用 `python manage.py migrate` 实施数据迁移。此命令拥有幂等性,其运行后将在数据库内保存一条运行记录以完成幂等保证。因此,你可以多次运行此命令,而无需担心重复运行 SQL 语句。 + +#### 应用路由 + +在[根路由](#根路由)一节中,示例程序将 `player/` 路径指向了 `player.urls`。本节将展开叙述 `player` 包下的 `urls.py` 应用路由: + +```python +from django.urls import path + +from . import views + +urlpatterns = [ + path('', views.create, name='create'), + path('count', views.count, name='count'), + path('limit/', views.limit_list, name='limit_list'), + path('', views.get_by_id, name='get_by_id'), + path('trade', views.trade, name='trade'), +] +``` + +应用路由注册了 5 个路径: + +- `''`:被指向了 `views.create` 函数。 +- `'count'`:被指向了 `views.count` 函数。 +- `'limit/'`:被指向了 `views.limit_list` 函数。此处路径包含一个 `` 路径变量,其中: + + - `int` 是指这个参数其将被验证是否为 `int` 类型。 + - `limit` 是指此参数的值将被映射至名为 `limit` 的函数入参中。 + +- `''`:被指向了 `views.get_by_id` 函数,此处路径包含一个 `` 路径变量。 +- `'trade'`:被指向了 `views.trade` 函数。 + +此外,应用路由是根路由转发而来的,因此将在 URL 匹配时包含根路由配置的路径。如上面示例所示,根路由配置为 `player/` 转发至此应用路由,那么,应用路由中的: + +- `''` 在实际的请求中为 `http(s)://(:)/player`。 +- `'count'` 在实际的请求中为 `http(s)://(:)/player/count`。 +- `'limit/'` 以 `limit` 为 `3` 为例,在实际的请求中为 `http(s)://(:)/player/limit/3`。 + +#### 逻辑实现 + +逻辑实现代码,在 `player` 包下的 `views.py` 内,这在 Django 中被称为视图。关于 Django 视图的更多信息,参考 [Django 视图](https://docs.djangoproject.com/zh-hans/3.2/topics/http/views/)文档。 + +```python +from django.db import transaction +from django.db.models import F +from django.shortcuts import get_object_or_404 + +from django.http import HttpResponse, JsonResponse +from django.views.decorators.http import * +from .models import Player +import json + + +@require_POST +def create(request): + dict_players = json.loads(request.body.decode('utf-8')) + players = list(map( + lambda p: Player( + coins=p['coins'], + goods=p['goods'] + ), dict_players)) + result = Player.objects.bulk_create(objs=players) + return HttpResponse(f'create {len(result)} players.') + + +@require_GET +def count(request): + return HttpResponse(Player.objects.count()) + + +@require_GET +def limit_list(request, limit: int = 0): + if limit == 0: + return HttpResponse("") + players = set(Player.objects.all()[:limit]) + dict_players = list(map(lambda p: p.as_dict(), players)) + return JsonResponse(dict_players, safe=False) + + +@require_GET +def get_by_id(request, player_id: int): + result = get_object_or_404(Player, pk=player_id).as_dict() + return JsonResponse(result) + + +@require_POST +@transaction.atomic +def trade(request): + sell_id, buy_id, amount, price = int(request.POST['sellID']), int(request.POST['buyID']), \ + int(request.POST['amount']), int(request.POST['price']) + sell_player = Player.objects.select_for_update().get(id=sell_id) + if sell_player.goods < amount: + raise Exception(f'sell player {sell_player.id} goods not enough') + + buy_player = Player.objects.select_for_update().get(id=buy_id) + if buy_player.coins < price: + raise Exception(f'buy player {buy_player.id} coins not enough') + + Player.objects.filter(id=sell_id).update(goods=F('goods') - amount, coins=F('coins') + price) + Player.objects.filter(id=buy_id).update(goods=F('goods') + amount, coins=F('coins') - price) + + return HttpResponse("trade successful") +``` + +下面将逐一解释代码中的重点部分: + +- 装饰器: + + - `@require_GET`:代表此函数仅接受 `GET` 类型的 HTTP 请求。 + - `@require_POST`:代表此函数仅接受 `POST` 类型的 HTTP 请求。 + - `@transaction.atomic`:代表此函数内的所有数据库操作将被包含于同一个事务中运行。关于在 Django 中使用事务的更多信息,可参考 [Django 数据库事务](https://docs.djangoproject.com/zh-hans/3.2/topics/db/transactions/)文档。关于 TiDB 中事物的详细信息,可参考 [TiDB 事务概览](/develop/dev-guide-transaction-overview.md)。 + +- `create` 函数: + + - 获取 `request` 对象中 `body` 的 Payload,并用 `utf-8` 解码: + + ```python + dict_players = json.loads(request.body.decode('utf-8')) + ``` + + - 使用 lambda 中的 `map` 函数,将 dict 类型的 `dict_players` 对象转换为 `Player` 数据模型的列表: + + ```python + players = list(map( + lambda p: Player( + coins=p['coins'], + goods=p['goods'] + ), dict_players)) + ``` + + - 调用 `Player` 数据模型的 `bulk_create` 函数,批量添加 `players` 列表,并返回添加的数据条目: + + ```python + result = Player.objects.bulk_create(objs=players) + return HttpResponse(f'create {len(result)} players.') + ``` + +- `count` 函数:调用 `Player` 数据模型的 `count` 函数,并返回所有的数据条目。 +- `limit_list` 函数: + + - 短路逻辑,`limit` 为 `0` 时不发送数据库请求: + + ```python + if limit == 0: + return HttpResponse("") + ``` + + - 调用 `Player` 数据模型的 `all` 函数,并使用切片操作符获取前 `limit` 个数据。需要注意的是,Django 不是获取所有数据并在内存中切分前 `limit` 个数据,而是在使用时请求数据库的前 `limit` 个数据。这是由于 Django 重写了切片操作符,并且 QuerySet 对象是**惰性**的。这意味着对一个未执行的 QuerySet 进行切片,将继续返回一个未执行的 QuerySet,直到你第一次真正的请求 QuerySet 内的数据。例如此处使用 `set` 函数对其进行迭代并返回整个集合。关于 Django QuerySet 的更多信息,你可以参考 [Django QuerySet API](https://docs.djangoproject.com/zh-hans/3.2/ref/models/querysets/) 文档。 + + ```python + players = set(Player.objects.all()[:limit]) + ``` + + - 将返回的 `Player` 数据模型的列表,转为对象为 dict 的列表,并使用 `JsonResponse` 输出。 + + ```python + dict_players = list(map(lambda p: p.as_dict(), players)) + return JsonResponse(dict_players, safe=False) + ``` + +- `get_by_id` 函数: + + - 使用 `get_object_or_404` 语法糖传入 `player_id`,并将 `Player` 对象转为 dict。如数据不存在,将由此函数返回 `404` 状态码: + + ```python + result = get_object_or_404(Player, pk=player_id).as_dict() + ``` + + - 使用 `JsonResponse` 返回数据: + + ```python + return JsonResponse(result) + ``` + +- `trade` 函数: + + - 从 `POST` Payload 中接收 Form 形式的数据: + + ```python + sell_id, buy_id, amount, price = int(request.POST['sellID']), int(request.POST['buyID']), \ + int(request.POST['amount']), int(request.POST['price']) + ``` + + - 调用 `Player` 数据模型的 `select_for_update` 函数对卖家和买家的数据进行加锁,并检查卖家的货物数量和买家的货币数量是否足够。该函数使用了 `@transaction.atomic` 装饰器,任意异常都会导致事务回滚。可以利用这个机制,在任意检查失败的时候,抛出异常,由 Django 进行事务回滚。 + + ```python + sell_player = Player.objects.select_for_update().get(id=sell_id) + if sell_player.goods < amount: + raise Exception(f'sell player {sell_player.id} goods not enough') + + buy_player = Player.objects.select_for_update().get(id=buy_id) + if buy_player.coins < price: + raise Exception(f'buy player {buy_player.id} coins not enough') + ``` + + - 更新卖家与买家的数据。由于这里使用了 `@transaction.atomic` 装饰器,任何异常都将由 Django 回滚事务。因此,请不要在此处使用 `try-except` 语句进行异常处理。如果一定需要处理,请在 except 块中将异常继续抛向上层,以防止因 Django 误认为函数运行正常而提交事务,导致数据错误。 + + ```python + Player.objects.filter(id=sell_id).update(goods=F('goods') - amount, coins=F('coins') + price) + Player.objects.filter(id=buy_id).update(goods=F('goods') + amount, coins=F('coins') - price) + ``` + + - 返回交易成功字符串,因为其他情况将导致异常抛出返回: + + ```python + return HttpResponse("trade successful") + ``` + +## 创建相同依赖空白程序(可选) + +本程序使用 Django Admin CLI [django-admin](https://django-admin-cli.readthedocs.io/en/stable/index.html) 构建。你可以安装并使用 `django-admin` 来快速完成 Django 项目的初始化。如果需要快速获得与示例程序 `django_example` 相同的可运行空白应用程序,可以按照以下步骤操作: + +1. 初始化 Django 项目 `copy_django_example`: + + ```bash + pip install -r requirement.txt + django-admin startproject copy_django_example + cd copy_django_example + ``` + +2. 更改 `DATABASES` 配置: + + 1. 打开 `copy_django_example/settings.py` 配置文件 + 2. 将 `DATABASES` 部分从指向本地 SQLite 的配置更改为 TiDB 集群的信息: + + ```python + DATABASES = { + 'default': { + 'ENGINE': 'django_tidb', + 'NAME': 'django', + 'USER': 'root', + 'PASSWORD': '', + 'HOST': '127.0.0.1', + 'PORT': 4000, + }, + } + DEFAULT_AUTO_FIELD = 'django.db.models.AutoField' + ``` + + 3. 由于本示例不需要跨域校验,因此你需要注释或删除 `MIDDLEWARE` 中的 `CsrfViewMiddleware`。修改后的 `MIDDLEWARE` 为: + + ```python + MIDDLEWARE = [ + 'django.middleware.security.SecurityMiddleware', + 'django.contrib.sessions.middleware.SessionMiddleware', + 'django.middleware.common.CommonMiddleware', + # 'django.middleware.csrf.CsrfViewMiddleware', + 'django.contrib.auth.middleware.AuthenticationMiddleware', + 'django.contrib.messages.middleware.MessageMiddleware', + 'django.middleware.clickjacking.XFrameOptionsMiddleware', + ] + ``` + +至此,你已经完成了一个空白的应用程序,此应用程序与示例应用程序的依赖完全相同。如果需要进一步了解 Django 的使用方法,参考: + +- [Django 文档](https://docs.djangoproject.com/zh-hans/3.2/) +- [Django 入门教程](https://docs.djangoproject.com/zh-hans/3.2/intro/tutorial01/) \ No newline at end of file diff --git a/test/sync_pr_docs_cn/data/markdown-pages/zh/tidb/master/develop/dev-guide-sample-application-python-mysql-connector.md b/test/sync_pr_docs_cn/data/markdown-pages/zh/tidb/master/develop/dev-guide-sample-application-python-mysql-connector.md new file mode 100644 index 00000000..2e278371 --- /dev/null +++ b/test/sync_pr_docs_cn/data/markdown-pages/zh/tidb/master/develop/dev-guide-sample-application-python-mysql-connector.md @@ -0,0 +1,283 @@ +--- +title: TiDB 和 MySQL Connector/Python 的简单 CRUD 应用程序 +summary: 给出一个 TiDB 和 MySQL Connector/Python 的简单 CRUD 应用程序示例。 +aliases: ['/zh/tidb/dev/dev-guide-sample-application-python'] +--- + + + + +# TiDB 和 MySQL Connector/Python 的简单 CRUD 应用程序 + +[MySQL Connector/Python](https://dev.mysql.com/doc/connector-python/en/) 为当前比较流行的开源 Python Driver 之一。 + +本文档将展示如何使用 TiDB 和 MySQL Connector/Python 来构造一个简单的 CRUD 应用程序。 + +> **注意:** +> +> 推荐使用 Python 3.10 及以上版本进行 TiDB 的应用程序的编写。 + +## 第 1 步:启动你的 TiDB 集群 + +本节将介绍 TiDB 集群的启动方法。 + +**使用 TiDB Serverless 集群** + +详细步骤,请参考:[创建 TiDB Serverless 集群](/develop/dev-guide-build-cluster-in-cloud.md#第-1-步创建-tidb-serverless-集群)。 + +**使用本地集群** + +详细步骤,请参考:[部署本地测试 TiDB 集群](/quick-start-with-tidb.md#部署本地测试集群)或[部署正式 TiDB 集群](/production-deployment-using-tiup.md)。 + +## 第 2 步:获取代码 + +```shell +git clone https://github.com/pingcap-inc/tidb-example-python.git +``` + +此处将以 MySQL Connector/Python **8.0.31** 版本进行说明。虽然 Python 的 Driver 相较其他语言,使用也极其方便。但因其不可屏蔽底层实现,需手动管控事务的特性,如果没有大量必须使用 SQL 的场景,仍然推荐使用 ORM 进行程序编写。这可以降低程序的耦合性。 + +```python +import uuid +from typing import List + +from mysql.connector import connect, MySQLConnection +from mysql.connector.cursor import MySQLCursor + + +def get_connection(autocommit: bool = True) -> MySQLConnection: + connection = connect(host='127.0.0.1', + port=4000, + user='root', + password='', + database='test') + connection.autocommit = autocommit + return connection + + +def create_player(cursor: MySQLCursor, player: tuple) -> None: + cursor.execute("INSERT INTO player (id, coins, goods) VALUES (%s, %s, %s)", player) + + +def get_player(cursor: MySQLCursor, player_id: str) -> tuple: + cursor.execute("SELECT id, coins, goods FROM player WHERE id = %s", (player_id,)) + return cursor.fetchone() + + +def get_players_with_limit(cursor: MySQLCursor, limit: int) -> List[tuple]: + cursor.execute("SELECT id, coins, goods FROM player LIMIT %s", (limit,)) + return cursor.fetchall() + + +def random_player(amount: int) -> List[tuple]: + players = [] + for _ in range(amount): + players.append((str(uuid.uuid4()), 10000, 10000)) + + return players + + +def bulk_create_player(cursor: MySQLCursor, players: List[tuple]) -> None: + cursor.executemany("INSERT INTO player (id, coins, goods) VALUES (%s, %s, %s)", players) + + +def get_count(cursor: MySQLCursor) -> int: + cursor.execute("SELECT count(*) FROM player") + return cursor.fetchone()[0] + + +def trade_check(cursor: MySQLCursor, sell_id: str, buy_id: str, amount: int, price: int) -> bool: + get_player_with_lock_sql = "SELECT coins, goods FROM player WHERE id = %s FOR UPDATE" + + # sell player goods check + cursor.execute(get_player_with_lock_sql, (sell_id,)) + _, sell_goods = cursor.fetchone() + if sell_goods < amount: + print(f'sell player {sell_id} goods not enough') + return False + + # buy player coins check + cursor.execute(get_player_with_lock_sql, (buy_id,)) + buy_coins, _ = cursor.fetchone() + if buy_coins < price: + print(f'buy player {buy_id} coins not enough') + return False + + +def trade_update(cursor: MySQLCursor, sell_id: str, buy_id: str, amount: int, price: int) -> None: + update_player_sql = "UPDATE player set goods = goods + %s, coins = coins + %s WHERE id = %s" + + # deduct the goods of seller, and raise his/her the coins + cursor.execute(update_player_sql, (-amount, price, sell_id)) + # deduct the coins of buyer, and raise his/her the goods + cursor.execute(update_player_sql, (amount, -price, buy_id)) + + +def trade(connection: MySQLConnection, sell_id: str, buy_id: str, amount: int, price: int) -> None: + with connection.cursor() as cursor: + if trade_check(cursor, sell_id, buy_id, amount, price) is False: + connection.rollback() + return + + try: + trade_update(cursor, sell_id, buy_id, amount, price) + except Exception as err: + connection.rollback() + print(f'something went wrong: {err}') + else: + connection.commit() + print("trade success") + + +def simple_example() -> None: + with get_connection(autocommit=True) as connection: + with connection.cursor() as cur: + # create a player, who has a coin and a goods. + create_player(cur, ("test", 1, 1)) + + # get this player, and print it. + test_player = get_player(cur, "test") + print(f'id:{test_player[0]}, coins:{test_player[1]}, goods:{test_player[2]}') + + # create players with bulk inserts. + # insert 1919 players totally, with 114 players per batch. + # all players have random uuid + print(f'start to insert one by one, it will take a long time') + player_list = random_player(1919) + for idx in range(0, len(player_list), 114): + print(f'inserted {idx} players') + bulk_create_player(cur, player_list[idx:idx + 114]) + + # print the number of players + count = get_count(cur) + print(f'number of players: {count}') + + # print 3 players. + three_players = get_players_with_limit(cur, 3) + for player in three_players: + print(f'id:{player[0]}, coins:{player[1]}, goods:{player[2]}') + + +def trade_example() -> None: + with get_connection(autocommit=False) as conn: + with conn.cursor() as cur: + # create two players + # player 1: id is "1", has only 100 coins. + # player 2: id is "2", has 114514 coins, and 20 goods. + create_player(cur, ("1", 100, 0)) + create_player(cur, ("2", 114514, 20)) + conn.commit() + + # player 1 wants to buy 10 goods from player 2. + # it will cost 500 coins, but player 1 cannot afford it. + # so this trade will fail, and nobody will lose their coins or goods + trade(conn, sell_id="2", buy_id="1", amount=10, price=500) + + # then player 1 has to reduce the incoming quantity to 2. + # this trade will be successful + trade(conn, sell_id="2", buy_id="1", amount=2, price=100) + + # let's take a look for player 1 and player 2 currently + with conn.cursor() as cur: + _, player1_coin, player1_goods = get_player(cur, "1") + print(f'id:1, coins:{player1_coin}, goods:{player1_goods}') + _, player2_coin, player2_goods = get_player(cur, "2") + print(f'id:2, coins:{player2_coin}, goods:{player2_goods}') + + +simple_example() +trade_example() +``` + +Driver 有着更低的封装程度,因此我们可以在程序内见到大量的 SQL。程序内查询到的 `Player`,与 ORM 不同,因为没有数据对象的存在,`Player` 将以 tuple 进行表示。 + +关于 MySQL Connector/Python 的更多使用方法,你可以参考 [MySQL Connector/Python 官方文档](https://dev.mysql.com/doc/connector-python/en/)。 + +## 第 3 步:运行代码 + +本节将逐步介绍代码的运行方法。 + +### 第 3 步第 1 部分:表初始化 + +本示例需手动初始化表,若你使用本地集群,可直接运行: + + + +
+ +```shell +mysql --host 127.0.0.1 --port 4000 -u root < player_init.sql +``` + +
+ +
+ +```shell +mycli --host 127.0.0.1 --port 4000 -u root --no-warn < player_init.sql +``` + +
+ +
+ +若不使用本地集群,或未安装命令行客户端,请用喜欢的方式(如 Navicat、DBeaver 等 GUI 工具)直接登录集群,并运行 `player_init.sql` 文件内的 SQL 语句。 + +### 第 3 步第 2 部分:TiDB Cloud 更改参数 + +若你使用了 TiDB Serverless 集群,此处需使用系统本地的 CA 证书,并将证书路径记为 `` 以供后续指代。你可以参考 [Where is the CA root path on my system?](https://docs.pingcap.com/tidbcloud/secure-connections-to-serverless-tier-clusters#where-is-the-ca-root-path-on-my-system) 文档获取你所使用的操作系统的 CA 证书位置。 + +更改 `mysql_connector_python_example.py` 内 `get_connection` 函数: + +```python +def get_connection(autocommit: bool = True) -> MySQLConnection: + connection = connect(host='127.0.0.1', + port=4000, + user='root', + password='', + database='test') + connection.autocommit = autocommit + return connection +``` + +若你设定的密码为 `123456`,而且从 TiDB Serverless 集群面板中得到的连接信息为: + +- Endpoint: `xxx.tidbcloud.com` +- Port: `4000` +- User: `2aEp24QWEDLqRFs.root` + +那么此处应将 `get_connection` 更改为: + +```python +def get_connection(autocommit: bool = True) -> MySQLConnection: + connection = connect( + host="xxx.tidbcloud.com", + port=4000, + user="2aEp24QWEDLqRFs.root", + password="123456", + database="test", + autocommit=autocommit, + ssl_ca='', + ssl_verify_identity=True + ) + connection.autocommit = autocommit + return connection +``` + +### 第 3 步第 3 部分:运行 + +运行前请先安装依赖: + +```bash +pip3 install -r requirement.txt +``` + +当以后需要多次运行脚本时,请在每次运行前先依照[表初始化](#第-3-步第-1-部分表初始化)一节再次进行表初始化。 + +```bash +python3 mysql_connector_python_example.py +``` + +## 第 4 步:预期输出 + +[MySQL Connector/Python 预期输出](https://github.com/pingcap-inc/tidb-example-python/blob/main/Expected-Output.md#mysql-connector-python) diff --git a/test/sync_pr_docs_cn/data/markdown-pages/zh/tidb/master/develop/dev-guide-sample-application-python-mysqlclient.md b/test/sync_pr_docs_cn/data/markdown-pages/zh/tidb/master/develop/dev-guide-sample-application-python-mysqlclient.md new file mode 100644 index 00000000..6665889c --- /dev/null +++ b/test/sync_pr_docs_cn/data/markdown-pages/zh/tidb/master/develop/dev-guide-sample-application-python-mysqlclient.md @@ -0,0 +1,282 @@ +--- +title: TiDB 和 mysqlclient 的简单 CRUD 应用程序 +summary: 给出一个 TiDB 和 mysqlclient 的简单 CRUD 应用程序示例。 +--- + + + + +# TiDB 和 mysqlclient 的简单 CRUD 应用程序 + +[mysqlclient](https://pypi.org/project/mysqlclient/) 为当前比较流行的开源 Python Driver 之一。 + +本文档将展示如何使用 TiDB 和 mysqlclient 来构造一个简单的 CRUD 应用程序。 + +> **注意:** +> +> 推荐使用 Python 3.10 及以上版本进行 TiDB 的应用程序的编写。 + +## 第 1 步:启动你的 TiDB 集群 + +本节将介绍 TiDB 集群的启动方法。 + +**使用 TiDB Serverless 集群** + +详细步骤,请参考:[创建 TiDB Serverless 集群](/develop/dev-guide-build-cluster-in-cloud.md#第-1-步创建-tidb-serverless-集群)。 + +**使用本地集群** + +详细步骤,请参考:[部署本地测试 TiDB 集群](/quick-start-with-tidb.md#部署本地测试集群)或[部署正式 TiDB 集群](/production-deployment-using-tiup.md)。 + +## 第 2 步:获取代码 + +```shell +git clone https://github.com/pingcap-inc/tidb-example-python.git +``` + +此处将以 mysqlclient **2.1.1** 版本进行说明。虽然 Python 的 Driver 相较其他语言,使用也极其方便。但因其不可屏蔽底层实现,需手动管控事务的特性,如果没有大量必须使用 SQL 的场景,仍然推荐使用 ORM 进行程序编写。这可以降低程序的耦合性。 + +```python +import uuid +from typing import List + +import MySQLdb +from MySQLdb import Connection +from MySQLdb.cursors import Cursor + +def get_connection(autocommit: bool = True) -> MySQLdb.Connection: + return MySQLdb.connect( + host="127.0.0.1", + port=4000, + user="root", + password="", + database="test", + autocommit=autocommit + ) + + +def create_player(cursor: Cursor, player: tuple) -> None: + cursor.execute("INSERT INTO player (id, coins, goods) VALUES (%s, %s, %s)", player) + + +def get_player(cursor: Cursor, player_id: str) -> tuple: + cursor.execute("SELECT id, coins, goods FROM player WHERE id = %s", (player_id,)) + return cursor.fetchone() + + +def get_players_with_limit(cursor: Cursor, limit: int) -> List[tuple]: + cursor.execute("SELECT id, coins, goods FROM player LIMIT %s", (limit,)) + return cursor.fetchall() + + +def random_player(amount: int) -> List[tuple]: + players = [] + for _ in range(amount): + players.append((uuid.uuid4(), 10000, 10000)) + + return players + + +def bulk_create_player(cursor: Cursor, players: List[tuple]) -> None: + cursor.executemany("INSERT INTO player (id, coins, goods) VALUES (%s, %s, %s)", players) + + +def get_count(cursor: Cursor) -> None: + cursor.execute("SELECT count(*) FROM player") + return cursor.fetchone()[0] + + +def trade_check(cursor: Cursor, sell_id: str, buy_id: str, amount: int, price: int) -> bool: + get_player_with_lock_sql = "SELECT coins, goods FROM player WHERE id = %s FOR UPDATE" + + # sell player goods check + cursor.execute(get_player_with_lock_sql, (sell_id,)) + _, sell_goods = cursor.fetchone() + if sell_goods < amount: + print(f'sell player {sell_id} goods not enough') + return False + + # buy player coins check + cursor.execute(get_player_with_lock_sql, (buy_id,)) + buy_coins, _ = cursor.fetchone() + if buy_coins < price: + print(f'buy player {buy_id} coins not enough') + return False + + +def trade_update(cursor: Cursor, sell_id: str, buy_id: str, amount: int, price: int) -> None: + update_player_sql = "UPDATE player set goods = goods + %s, coins = coins + %s WHERE id = %s" + + # deduct the goods of seller, and raise his/her the coins + cursor.execute(update_player_sql, (-amount, price, sell_id)) + # deduct the coins of buyer, and raise his/her the goods + cursor.execute(update_player_sql, (amount, -price, buy_id)) + + +def trade(connection: Connection, sell_id: str, buy_id: str, amount: int, price: int) -> None: + with connection.cursor() as cursor: + if trade_check(cursor, sell_id, buy_id, amount, price) is False: + connection.rollback() + return + + try: + trade_update(cursor, sell_id, buy_id, amount, price) + except Exception as err: + connection.rollback() + print(f'something went wrong: {err}') + else: + connection.commit() + print("trade success") + + +def simple_example() -> None: + with get_connection(autocommit=True) as conn: + with conn.cursor() as cur: + # create a player, who has a coin and a goods. + create_player(cur, ("test", 1, 1)) + + # get this player, and print it. + test_player = get_player(cur, "test") + print(f'id:{test_player[0]}, coins:{test_player[1]}, goods:{test_player[2]}') + + # create players with bulk inserts. + # insert 1919 players totally, with 114 players per batch. + # each player has a random UUID + player_list = random_player(1919) + for idx in range(0, len(player_list), 114): + bulk_create_player(cur, player_list[idx:idx + 114]) + + # print the number of players + count = get_count(cur) + print(f'number of players: {count}') + + # print 3 players. + three_players = get_players_with_limit(cur, 3) + for player in three_players: + print(f'id:{player[0]}, coins:{player[1]}, goods:{player[2]}') + + +def trade_example() -> None: + with get_connection(autocommit=False) as conn: + with conn.cursor() as cur: + # create two players + # player 1: id is "1", has only 100 coins. + # player 2: id is "2", has 114514 coins, and 20 goods. + create_player(cur, ("1", 100, 0)) + create_player(cur, ("2", 114514, 20)) + conn.commit() + + # player 1 wants to buy 10 goods from player 2. + # it will cost 500 coins, but player 1 cannot afford it. + # so this trade will fail, and nobody will lose their coins or goods + trade(conn, sell_id="2", buy_id="1", amount=10, price=500) + + # then player 1 has to reduce the incoming quantity to 2. + # this trade will be successful + trade(conn, sell_id="2", buy_id="1", amount=2, price=100) + + # let's take a look for player 1 and player 2 currently + with conn.cursor() as cur: + _, player1_coin, player1_goods = get_player(cur, "1") + print(f'id:1, coins:{player1_coin}, goods:{player1_goods}') + _, player2_coin, player2_goods = get_player(cur, "2") + print(f'id:2, coins:{player2_coin}, goods:{player2_goods}') + + +simple_example() +trade_example() +``` + +Driver 有着更低的封装程度,因此我们可以在程序内见到大量的 SQL。程序内查询到的 `Player`,与 ORM 不同,因为没有数据对象的存在,`Player` 将以元组 (tuple) 进行表示。 + +关于 mysqlclient 的更多使用方法,你可以参考 [mysqlclient 官方文档](https://mysqlclient.readthedocs.io/)。 + +## 第 3 步:运行代码 + +本节将逐步介绍代码的运行方法。 + +### 第 3 步第 1 部分:表初始化 + +本示例需手动初始化表,若你使用本地集群,可直接运行: + + + +
+ +```shell +mysql --host 127.0.0.1 --port 4000 -u root < player_init.sql +``` + +
+ +
+ +```shell +mycli --host 127.0.0.1 --port 4000 -u root --no-warn < player_init.sql +``` + +
+ +
+ +若不使用本地集群,或未安装命令行客户端,请用喜欢的方式(如 Navicat、DBeaver 等 GUI 工具)直接登录集群,并运行 `player_init.sql` 文件内的 SQL 语句。 + +### 第 3 步第 2 部分:TiDB Cloud 更改参数 + +若你使用了 TiDB Serverless 集群,此处需使用系统本地的 CA 证书,并将证书路径记为 `` 以供后续指代。你可以参考 [Where is the CA root path on my system?](https://docs.pingcap.com/tidbcloud/secure-connections-to-serverless-tier-clusters#where-is-the-ca-root-path-on-my-system) 文档获取你所使用的操作系统的 CA 证书位置。 + +若你使用 TiDB Serverless 集群,更改 `mysqlclient_example.py` 内 `get_connection` 函数: + +```python +def get_connection(autocommit: bool = True) -> MySQLdb.Connection: + return MySQLdb.connect( + host="127.0.0.1", + port=4000, + user="root", + password="", + database="test", + autocommit=autocommit + ) +``` + +若你设定的密码为 `123456`,而且从 TiDB Serverless 集群面板中得到的连接信息为: + +- Endpoint: `xxx.tidbcloud.com` +- Port: `4000` +- User: `2aEp24QWEDLqRFs.root` + +那么此处应将 `get_connection` 更改为: + +```python +def get_connection(autocommit: bool = True) -> MySQLdb.Connection: + return MySQLdb.connect( + host="xxx.tidbcloud.com", + port=4000, + user="2aEp24QWEDLqRFs.root", + password="123456", + database="test", + autocommit=autocommit, + ssl_mode="VERIFY_IDENTITY", + ssl={ + "ca": "" + } + ) +``` + +### 第 3 步第 3 部分:运行 + +运行前请先安装依赖: + +```bash +pip3 install -r requirement.txt +``` + +当以后需要多次运行脚本时,请在每次运行前先依照[表初始化](#第-3-步第-1-部分表初始化)一节再次进行表初始化。 + +```bash +python3 mysqlclient_example.py +``` + +## 第 4 步:预期输出 + +[mysqlclient 预期输出](https://github.com/pingcap-inc/tidb-example-python/blob/main/Expected-Output.md#mysqlclient) diff --git a/test/sync_pr_docs_cn/data/markdown-pages/zh/tidb/master/develop/dev-guide-sample-application-python-peewee.md b/test/sync_pr_docs_cn/data/markdown-pages/zh/tidb/master/develop/dev-guide-sample-application-python-peewee.md new file mode 100644 index 00000000..b3bd4519 --- /dev/null +++ b/test/sync_pr_docs_cn/data/markdown-pages/zh/tidb/master/develop/dev-guide-sample-application-python-peewee.md @@ -0,0 +1,245 @@ +--- +title: TiDB 和 peewee 的简单 CRUD 应用程序 +summary: 给出一个 TiDB 和 peewee 的简单 CRUD 应用程序示例。 +--- + + + + +# TiDB 和 peewee 的简单 CRUD 应用程序 + +[peewee](http://docs.peewee-orm.com/en/latest/) 为当前比较流行的开源 Python ORM 之一。 + +本文档将展示如何使用 TiDB 和 peewee 来构造一个简单的 CRUD 应用程序。 + +> **注意:** +> +> 推荐使用 Python 3.10 及以上版本进行 TiDB 的应用程序的编写。 + +## 第 1 步:启动你的 TiDB 集群 + +本节将介绍 TiDB 集群的启动方法。 + +**使用 TiDB Serverless 集群** + +详细步骤,请参考:[创建 TiDB Serverless 集群](/develop/dev-guide-build-cluster-in-cloud.md#第-1-步创建-tidb-serverless-集群)。 + +**使用本地集群** + +详细步骤,请参考:[部署本地测试 TiDB 集群](/quick-start-with-tidb.md#部署本地测试集群)或[部署正式 TiDB 集群](/production-deployment-using-tiup.md)。 + +## 第 2 步:获取代码 + +```shell +git clone https://github.com/pingcap-inc/tidb-example-python.git +``` + +此处将以 peewee **3.15.4** 版本进行说明。 + +```python +import os +import uuid +from typing import List + +from peewee import * + +from playhouse.db_url import connect + +db = connect('mysql://root:@127.0.0.1:4000/test') + + +class Player(Model): + id = CharField(max_length=36, primary_key=True) + coins = IntegerField() + goods = IntegerField() + + class Meta: + database = db + table_name = "player" + + +def random_player(amount: int) -> List[Player]: + players = [] + for _ in range(amount): + players.append(Player(id=uuid.uuid4(), coins=10000, goods=10000)) + + return players + + +def simple_example() -> None: + # create a player, who has a coin and a goods. + Player.create(id="test", coins=1, goods=1) + + # get this player, and print it. + test_player = Player.select().where(Player.id == "test").get() + print(f'id:{test_player.id}, coins:{test_player.coins}, goods:{test_player.goods}') + + # create players with bulk inserts. + # insert 1919 players totally, with 114 players per batch. + # each player has a random UUID + player_list = random_player(1919) + Player.bulk_create(player_list, 114) + + # print the number of players + count = Player.select().count() + print(f'number of players: {count}') + + # print 3 players. + three_players = Player.select().limit(3) + for player in three_players: + print(f'id:{player.id}, coins:{player.coins}, goods:{player.goods}') + + +def trade_check(sell_id: str, buy_id: str, amount: int, price: int) -> bool: + sell_goods = Player.select(Player.goods).where(Player.id == sell_id).get().goods + if sell_goods < amount: + print(f'sell player {sell_id} goods not enough') + return False + + buy_coins = Player.select(Player.coins).where(Player.id == buy_id).get().coins + if buy_coins < price: + print(f'buy player {buy_id} coins not enough') + return False + + return True + + +def trade(sell_id: str, buy_id: str, amount: int, price: int) -> None: + with db.atomic() as txn: + try: + if trade_check(sell_id, buy_id, amount, price) is False: + txn.rollback() + return + + # deduct the goods of seller, and raise his/her the coins + Player.update(goods=Player.goods - amount, coins=Player.coins + price).where(Player.id == sell_id).execute() + # deduct the coins of buyer, and raise his/her the goods + Player.update(goods=Player.goods + amount, coins=Player.coins - price).where(Player.id == buy_id).execute() + + except Exception as err: + txn.rollback() + print(f'something went wrong: {err}') + else: + txn.commit() + print("trade success") + + +def trade_example() -> None: + # create two players + # player 1: id is "1", has only 100 coins. + # player 2: id is "2", has 114514 coins, and 20 goods. + Player.create(id="1", coins=100, goods=0) + Player.create(id="2", coins=114514, goods=20) + + # player 1 wants to buy 10 goods from player 2. + # it will cost 500 coins, but player 1 cannot afford it. + # so this trade will fail, and nobody will lose their coins or goods + trade(sell_id="2", buy_id="1", amount=10, price=500) + + # then player 1 has to reduce the incoming quantity to 2. + # this trade will be successful + trade(sell_id="2", buy_id="1", amount=2, price=100) + + # let's take a look for player 1 and player 2 currently + after_trade_players = Player.select().where(Player.id.in_(["1", "2"])) + for player in after_trade_players: + print(f'id:{player.id}, coins:{player.coins}, goods:{player.goods}') + + +db.connect() + +# recreate the player table +db.drop_tables([Player]) +db.create_tables([Player]) + +simple_example() +trade_example() +``` + +相较于直接使用 Driver,peewee 屏蔽了创建数据库连接时,不同数据库差异的细节。peewee 还封装了大量的操作,如会话管理、基本对象的 CRUD 等,极大地简化了代码量。 + +`Player` 类为数据库表在程序内的映射。`Player` 的每个属性都对应着 `player` 表的一个字段。peewee 使用 `Player` 类为了给 peewee 提供更多的信息,使用了形如以上示例中的 `id = CharField(max_length=36, primary_key=True)` 的类型定义,用来指示字段类型和其附加属性。`id = CharField(max_length=36, primary_key=True)` 表示 `id` 字段为 `CharField` 类型,对应数据库类型为 `VARCHAR`,长度为 `36`,且为主键。 + +关于 peewee 的更多使用方法,你可以参考 [peewee 官网](http://docs.peewee-orm.com/en/latest/)。 + +## 第 3 步:运行代码 + +本节将逐步介绍代码的运行方法。 + +### 第 3 步第 1 部分:表初始化 + +本示例需手动初始化表,若你使用本地集群,可直接运行: + + + +
+ +```shell +mysql --host 127.0.0.1 --port 4000 -u root < player_init.sql +``` + +
+ +
+ +```shell +mycli --host 127.0.0.1 --port 4000 -u root --no-warn < player_init.sql +``` + +
+ +
+ +若不使用本地集群,或未安装命令行客户端,请用喜欢的方式(如 Navicat、DBeaver 等 GUI 工具)直接登录集群,并运行 `player_init.sql` 文件内的 SQL 语句。 + +### 第 3 步第 2 部分:TiDB Cloud 更改参数 + +若你使用了 TiDB Serverless 集群,此处需使用系统本地的 CA 证书,并将证书路径记为 `` 以供后续指代。你可以参考 [Where is the CA root path on my system?](https://docs.pingcap.com/tidbcloud/secure-connections-to-serverless-tier-clusters#where-is-the-ca-root-path-on-my-system) 文档获取你所使用的操作系统的 CA 证书位置。 + +若你使用 TiDB Serverless 集群,更改 `peewee_example.py` 内 `connect` 函数的入参: + +```python +db = connect('mysql://root:@127.0.0.1:4000/test') +``` + +若你设定的密码为 `123456`,而且从 TiDB Serverless 集群面板中得到的连接信息为: + +- Endpoint: `xxx.tidbcloud.com` +- Port: `4000` +- User: `2aEp24QWEDLqRFs.root` + +那么此处应将 `connect` 更改为: + +- peewee 将 PyMySQL 作为 Driver 时: + + ```python + db = connect('mysql://2aEp24QWEDLqRFs.root:123456@xxx.tidbcloud.com:4000/test', + ssl_verify_cert=True, ssl_ca="") + ``` + +- peewee 将 mysqlclient 作为 Driver 时: + + ```python + db = connect('mysql://2aEp24QWEDLqRFs.root:123456@xxx.tidbcloud.com:4000/test', + ssl_mode="VERIFY_IDENTITY", ssl={"ca": ""}) + ``` + +由于 peewee 会将参数透传至 Driver 中,使用 peewee 时请注意 Driver 的使用类型。 + +### 第 3 步第 3 部分:运行 + +运行前请先安装依赖: + +```bash +pip3 install -r requirement.txt +``` + +当以后需要多次运行脚本时,请在每次运行前先依照[表初始化](#第-3-步第-1-部分表初始化)一节再次进行表初始化。 + +```bash +python3 peewee_example.py +``` + +## 第 4 步:预期输出 + +[peewee 预期输出](https://github.com/pingcap-inc/tidb-example-python/blob/main/Expected-Output.md#peewee) diff --git a/test/sync_pr_docs_cn/data/markdown-pages/zh/tidb/master/develop/dev-guide-sample-application-python-pymysql.md b/test/sync_pr_docs_cn/data/markdown-pages/zh/tidb/master/develop/dev-guide-sample-application-python-pymysql.md new file mode 100644 index 00000000..5657c625 --- /dev/null +++ b/test/sync_pr_docs_cn/data/markdown-pages/zh/tidb/master/develop/dev-guide-sample-application-python-pymysql.md @@ -0,0 +1,277 @@ +--- +title: TiDB 和 PyMySQL 的简单 CRUD 应用程序 +summary: 给出一个 TiDB 和 PyMySQL 的简单 CRUD 应用程序示例。 +--- + + + + +# TiDB 和 PyMySQL 的简单 CRUD 应用程序 + +[PyMySQL](https://pypi.org/project/PyMySQL/) 为当前比较流行的开源 Python Driver 之一。 + +本文档将展示如何使用 TiDB 和 PyMySQL 来构造一个简单的 CRUD 应用程序。 + +> **注意:** +> +> 推荐使用 Python 3.10 及以上版本进行 TiDB 的应用程序的编写。 + +## 第 1 步:启动你的 TiDB 集群 + +本节将介绍 TiDB 集群的启动方法。 + +**使用 TiDB Serverless 集群** + +详细步骤,请参考:[创建 TiDB Serverless 集群](/develop/dev-guide-build-cluster-in-cloud.md#第-1-步创建-tidb-serverless-集群)。 + +**使用本地集群** + +详细步骤,请参考:[部署本地测试 TiDB 集群](/quick-start-with-tidb.md#部署本地测试集群)或[部署正式 TiDB 集群](/production-deployment-using-tiup.md)。 + +## 第 2 步:获取代码 + +```shell +git clone https://github.com/pingcap-inc/tidb-example-python.git +``` + +此处将以 PyMySQL **1.0.2** 版本进行说明。虽然 Python 的 Driver 相较其他语言,使用也极其方便。但因其不可屏蔽底层实现,需手动管控事务的特性,如果没有大量必须使用 SQL 的场景,仍然推荐使用 ORM 进行程序编写。这可以降低程序的耦合性。 + +```python +import uuid +from typing import List + +import pymysql.cursors +from pymysql import Connection +from pymysql.cursors import DictCursor + + +def get_connection(autocommit: bool = False) -> Connection: + return pymysql.connect(host='127.0.0.1', + port=4000, + user='root', + password='', + database='test', + cursorclass=DictCursor, + autocommit=autocommit) + + +def create_player(cursor: DictCursor, player: tuple) -> None: + cursor.execute("INSERT INTO player (id, coins, goods) VALUES (%s, %s, %s)", player) + + +def get_player(cursor: DictCursor, player_id: str) -> dict: + cursor.execute("SELECT id, coins, goods FROM player WHERE id = %s", (player_id,)) + return cursor.fetchone() + + +def get_players_with_limit(cursor: DictCursor, limit: int) -> tuple: + cursor.execute("SELECT id, coins, goods FROM player LIMIT %s", (limit,)) + return cursor.fetchall() + + +def random_player(amount: int) -> List[tuple]: + players = [] + for _ in range(amount): + players.append((uuid.uuid4(), 10000, 10000)) + + return players + + +def bulk_create_player(cursor: DictCursor, players: List[tuple]) -> None: + cursor.executemany("INSERT INTO player (id, coins, goods) VALUES (%s, %s, %s)", players) + + +def get_count(cursor: DictCursor) -> int: + cursor.execute("SELECT count(*) as count FROM player") + return cursor.fetchone()['count'] + + +def trade_check(cursor: DictCursor, sell_id: str, buy_id: str, amount: int, price: int) -> bool: + get_player_with_lock_sql = "SELECT coins, goods FROM player WHERE id = %s FOR UPDATE" + + # sell player goods check + cursor.execute(get_player_with_lock_sql, (sell_id,)) + seller = cursor.fetchone() + if seller['goods'] < amount: + print(f'sell player {sell_id} goods not enough') + return False + + # buy player coins check + cursor.execute(get_player_with_lock_sql, (buy_id,)) + buyer = cursor.fetchone() + if buyer['coins'] < price: + print(f'buy player {buy_id} coins not enough') + return False + + +def trade_update(cursor: DictCursor, sell_id: str, buy_id: str, amount: int, price: int) -> None: + update_player_sql = "UPDATE player set goods = goods + %s, coins = coins + %s WHERE id = %s" + + # deduct the goods of seller, and raise his/her the coins + cursor.execute(update_player_sql, (-amount, price, sell_id)) + # deduct the coins of buyer, and raise his/her the goods + cursor.execute(update_player_sql, (amount, -price, buy_id)) + + +def trade(connection: Connection, sell_id: str, buy_id: str, amount: int, price: int) -> None: + with connection.cursor() as cursor: + if trade_check(cursor, sell_id, buy_id, amount, price) is False: + connection.rollback() + return + + try: + trade_update(cursor, sell_id, buy_id, amount, price) + except Exception as err: + connection.rollback() + print(f'something went wrong: {err}') + else: + connection.commit() + print("trade success") + + +def simple_example() -> None: + with get_connection(autocommit=True) as connection: + with connection.cursor() as cur: + # create a player, who has a coin and a goods. + create_player(cur, ("test", 1, 1)) + + # get this player, and print it. + test_player = get_player(cur, "test") + print(test_player) + + # create players with bulk inserts. + # insert 1919 players totally, with 114 players per batch. + # each player has a random UUID + player_list = random_player(1919) + for idx in range(0, len(player_list), 114): + bulk_create_player(cur, player_list[idx:idx + 114]) + + # print the number of players + count = get_count(cur) + print(f'number of players: {count}') + + # print 3 players. + three_players = get_players_with_limit(cur, 3) + for player in three_players: + print(player) + + +def trade_example() -> None: + with get_connection(autocommit=False) as connection: + with connection.cursor() as cur: + # create two players + # player 1: id is "1", has only 100 coins. + # player 2: id is "2", has 114514 coins, and 20 goods. + create_player(cur, ("1", 100, 0)) + create_player(cur, ("2", 114514, 20)) + connection.commit() + + # player 1 wants to buy 10 goods from player 2. + # it will cost 500 coins, but player 1 cannot afford it. + # so this trade will fail, and nobody will lose their coins or goods + trade(connection, sell_id="2", buy_id="1", amount=10, price=500) + + # then player 1 has to reduce the incoming quantity to 2. + # this trade will be successful + trade(connection, sell_id="2", buy_id="1", amount=2, price=100) + + # let's take a look for player 1 and player 2 currently + with connection.cursor() as cur: + print(get_player(cur, "1")) + print(get_player(cur, "2")) + + +simple_example() +trade_example() +``` + +Driver 有着更低的封装程度,因此我们可以在程序内见到大量的 SQL。程序内查询到的 `Player`,与 ORM 不同,因为没有数据对象的存在,`Player` 将以 dict 进行表示。 + +关于 PyMySQL 的更多使用方法,你可以参考 [PyMySQL 官方文档](https://pymysql.readthedocs.io/en/latest/)。 + +## 第 3 步:运行代码 + +本节将逐步介绍代码的运行方法。 + +### 第 3 步第 1 部分:表初始化 + +本示例需手动初始化表,若你使用本地集群,可直接运行: + + + +
+ +```shell +mysql --host 127.0.0.1 --port 4000 -u root < player_init.sql +``` + +
+ +
+ +```shell +mycli --host 127.0.0.1 --port 4000 -u root --no-warn < player_init.sql +``` + +
+ +
+ +若不使用本地集群,或未安装命令行客户端,请用喜欢的方式(如 Navicat、DBeaver 等 GUI 工具)直接登录集群,并运行 `player_init.sql` 文件内的 SQL 语句。 + +### 第 3 步第 2 部分:TiDB Cloud 更改参数 + +若你使用了 TiDB Serverless 集群,此处需使用系统本地的 CA 证书,并将证书路径记为 `` 以供后续指代。你可以参考 [Where is the CA root path on my system?](https://docs.pingcap.com/tidbcloud/secure-connections-to-serverless-tier-clusters#where-is-the-ca-root-path-on-my-system) 文档获取你所使用的操作系统的 CA 证书位置。 + +若你使用 TiDB Serverless 集群,更改 `pymysql_example.py` 内 `get_connection` 函数: + +```python +def get_connection(autocommit: bool = False) -> Connection: + return pymysql.connect(host='127.0.0.1', + port=4000, + user='root', + password='', + database='test', + cursorclass=DictCursor, + autocommit=autocommit) +``` + +若你设定的密码为 `123456`,而且从 TiDB Serverless 集群面板中得到的连接信息为: + +- Endpoint: `xxx.tidbcloud.com` +- Port: `4000` +- User: `2aEp24QWEDLqRFs.root` + +那么此处应将 `get_connection` 更改为: + +```python +def get_connection(autocommit: bool = False) -> Connection: + return pymysql.connect(host='xxx.tidbcloud.com', + port=4000, + user='2aEp24QWEDLqRFs.root', + password='123546', + database='test', + cursorclass=DictCursor, + autocommit=autocommit, + ssl_ca='', + ssl_verify_cert=True, + ssl_verify_identity=True) +``` + +### 第 3 步第 3 部分:运行 + +运行前请先安装依赖: + +```bash +pip3 install -r requirement.txt +``` + +当以后需要多次运行脚本时,请在每次运行前先依照[表初始化](#第-3-步第-1-部分表初始化)一节再次进行表初始化。 + +```bash +python3 pymysql_example.py +``` + +## 第 4 步:预期输出 + +[PyMySQL 预期输出](https://github.com/pingcap-inc/tidb-example-python/blob/main/Expected-Output.md#PyMySQL) diff --git a/test/sync_pr_docs_cn/data/markdown-pages/zh/tidb/master/develop/dev-guide-sample-application-python-sqlalchemy.md b/test/sync_pr_docs_cn/data/markdown-pages/zh/tidb/master/develop/dev-guide-sample-application-python-sqlalchemy.md new file mode 100644 index 00000000..0f32bc59 --- /dev/null +++ b/test/sync_pr_docs_cn/data/markdown-pages/zh/tidb/master/develop/dev-guide-sample-application-python-sqlalchemy.md @@ -0,0 +1,238 @@ +--- +title: TiDB 和 SQLAlchemy 的简单 CRUD 应用程序 +summary: 给出一个 TiDB 和 SQLAlchemy 的简单 CRUD 应用程序示例。 +--- + + + + +# TiDB 和 SQLAlchemy 的简单 CRUD 应用程序 + +[SQLAlchemy](https://www.sqlalchemy.org/) 为当前比较流行的开源 Python ORM 之一。 + +本文档将展示如何使用 TiDB 和 SQLAlchemy 来构造一个简单的 CRUD 应用程序。 + +> **注意:** +> +> 推荐使用 Python 3.10 及以上版本进行 TiDB 的应用程序的编写。 + +## 第 1 步:启动你的 TiDB 集群 + +本节将介绍 TiDB 集群的启动方法。 + +**使用 TiDB Serverless 集群** + +详细步骤,请参考:[创建 TiDB Serverless 集群](/develop/dev-guide-build-cluster-in-cloud.md#第-1-步创建-tidb-serverless-集群)。 + +**使用本地集群** + +详细步骤,请参考:[部署本地测试 TiDB 集群](/quick-start-with-tidb.md#部署本地测试集群)或[部署正式 TiDB 集群](/production-deployment-using-tiup.md)。 + +## 第 2 步:获取代码 + +```shell +git clone https://github.com/pingcap-inc/tidb-example-python.git +``` + +此处将以 SQLAlchemy **1.4.44** 版本进行说明。 + +```python +import uuid +from typing import List + +from sqlalchemy import create_engine, String, Column, Integer, select, func +from sqlalchemy.orm import declarative_base, sessionmaker + +engine = create_engine('mysql://root:@127.0.0.1:4000/test') +Base = declarative_base() +Base.metadata.create_all(engine) +Session = sessionmaker(bind=engine) + + +class Player(Base): + __tablename__ = "player" + + id = Column(String(36), primary_key=True) + coins = Column(Integer) + goods = Column(Integer) + + def __repr__(self): + return f'Player(id={self.id!r}, coins={self.coins!r}, goods={self.goods!r})' + + +def random_player(amount: int) -> List[Player]: + players = [] + for _ in range(amount): + players.append(Player(id=uuid.uuid4(), coins=10000, goods=10000)) + + return players + + +def simple_example() -> None: + with Session() as session: + # create a player, who has a coin and a goods. + session.add(Player(id="test", coins=1, goods=1)) + + # get this player, and print it. + get_test_stmt = select(Player).where(Player.id == "test") + for player in session.scalars(get_test_stmt): + print(player) + + # create players with bulk inserts. + # insert 1919 players totally, with 114 players per batch. + # each player has a random UUID + player_list = random_player(1919) + for idx in range(0, len(player_list), 114): + session.bulk_save_objects(player_list[idx:idx + 114]) + + # print the number of players + count = session.query(func.count(Player.id)).scalar() + print(f'number of players: {count}') + + # print 3 players. + three_players = session.query(Player).limit(3).all() + for player in three_players: + print(player) + + session.commit() + + +def trade_check(session: Session, sell_id: str, buy_id: str, amount: int, price: int) -> bool: + # sell player goods check + sell_player = session.query(Player.goods).filter(Player.id == sell_id).with_for_update().one() + if sell_player.goods < amount: + print(f'sell player {sell_id} goods not enough') + return False + + # buy player coins check + buy_player = session.query(Player.coins).filter(Player.id == buy_id).with_for_update().one() + if buy_player.coins < price: + print(f'buy player {buy_id} coins not enough') + return False + + +def trade(sell_id: str, buy_id: str, amount: int, price: int) -> None: + with Session() as session: + if trade_check(session, sell_id, buy_id, amount, price) is False: + return + + # deduct the goods of seller, and raise his/her the coins + session.query(Player).filter(Player.id == sell_id). \ + update({'goods': Player.goods - amount, 'coins': Player.coins + price}) + # deduct the coins of buyer, and raise his/her the goods + session.query(Player).filter(Player.id == buy_id). \ + update({'goods': Player.goods + amount, 'coins': Player.coins - price}) + + session.commit() + print("trade success") + + +def trade_example() -> None: + with Session() as session: + # create two players + # player 1: id is "1", has only 100 coins. + # player 2: id is "2", has 114514 coins, and 20 goods. + session.add(Player(id="1", coins=100, goods=0)) + session.add(Player(id="2", coins=114514, goods=20)) + session.commit() + + # player 1 wants to buy 10 goods from player 2. + # it will cost 500 coins, but player 1 cannot afford it. + # so this trade will fail, and nobody will lose their coins or goods + trade(sell_id="2", buy_id="1", amount=10, price=500) + + # then player 1 has to reduce the incoming quantity to 2. + # this trade will be successful + trade(sell_id="2", buy_id="1", amount=2, price=100) + + with Session() as session: + traders = session.query(Player).filter(Player.id.in_(("1", "2"))).all() + for player in traders: + print(player) + session.commit() + + +simple_example() +trade_example() +``` + +相较于直接使用 Driver,SQLAlchemy 屏蔽了创建数据库连接时,不同数据库差异的细节。SQLAlchemy 还封装了大量的操作,如会话管理、基本对象的 CRUD 等,极大地简化了代码量。 + +`Player` 类为数据库表在程序内的映射。`Player` 的每个属性都对应着 `player` 表的一个字段。SQLAlchemy 使用 `Player` 类为了给 SQLAlchemy 提供更多的信息,使用了形如以上示例中的 `id = Column(String(36), primary_key=True)` 的类型定义,用来指示字段类型和其附加属性。`id = Column(String(36), primary_key=True)` 表示 `id` 字段为 `String` 类型,对应数据库类型为 `VARCHAR`,长度为 `36`,且为主键。 + +关于 SQLAlchemy 的更多使用方法,你可以参考 [SQLAlchemy 官网](https://www.sqlalchemy.org/)。 + +## 第 3 步:运行代码 + +本节将逐步介绍代码的运行方法。 + +### 第 3 步第 1 部分:表初始化 + +本示例需手动初始化表,若你使用本地集群,可直接运行: + + + +
+ +```shell +mysql --host 127.0.0.1 --port 4000 -u root < player_init.sql +``` + +
+ +
+ +```shell +mycli --host 127.0.0.1 --port 4000 -u root --no-warn < player_init.sql +``` + +
+ +
+ +若不使用本地集群,或未安装命令行客户端,请用喜欢的方式(如 Navicat、DBeaver 等 GUI 工具)直接登录集群,并运行 `player_init.sql` 文件内的 SQL 语句。 + +### 第 3 步第 2 部分:TiDB Cloud 更改参数 + +若你使用了 TiDB Serverless 集群,此处需使用系统本地的 CA 证书,并将证书路径记为 `` 以供后续指代。你可以参考 [Where is the CA root path on my system?](https://docs.pingcap.com/tidbcloud/secure-connections-to-serverless-tier-clusters#where-is-the-ca-root-path-on-my-system) 文档获取你所使用的操作系统的 CA 证书位置。 + +若你使用 TiDB Serverless 集群,更改 `sqlalchemy_example.py` 内 `create_engine` 函数的入参: + +```python +engine = create_engine('mysql://root:@127.0.0.1:4000/test') +``` + +若你设定的密码为 `123456`,而且从 TiDB Serverless 集群面板中得到的连接信息为: + +- Endpoint: `xxx.tidbcloud.com` +- Port: `4000` +- User: `2aEp24QWEDLqRFs.root` + +那么此处应将 `create_engine` 更改为: + +```python +engine = create_engine('mysql://2aEp24QWEDLqRFs.root:123456@xxx.tidbcloud.com:4000/test', connect_args={ + "ssl_mode": "VERIFY_IDENTITY", + "ssl": { + "ca": "" + } +}) +``` + +### 第 3 步第 3 部分:运行 + +运行前请先安装依赖: + +```bash +pip3 install -r requirement.txt +``` + +当以后需要多次运行脚本时,请在每次运行前先依照[表初始化](#第-3-步第-1-部分表初始化)一节再次进行表初始化。 + +```bash +python3 sqlalchemy_example.py +``` + +## 第 4 步:预期输出 + +[SQLAlchemy 预期输出](https://github.com/pingcap-inc/tidb-example-python/blob/main/Expected-Output.md#SQLAlchemy) diff --git a/test/sync_pr_operator/data/markdown-pages/en/tidb-in-kubernetes/master/TOC.md b/test/sync_pr_operator/data/markdown-pages/en/tidb-in-kubernetes/master/TOC.md new file mode 100644 index 00000000..dfed473a --- /dev/null +++ b/test/sync_pr_operator/data/markdown-pages/en/tidb-in-kubernetes/master/TOC.md @@ -0,0 +1,214 @@ + + + +- [TiDB on Kubernetes Docs](https://docs.pingcap.com/tidb-in-kubernetes/dev) +- Introduction + - [Overview](tidb-operator-overview.md) + - [What's New in v1.5](whats-new-in-v1.5.md) +- [Get Started](get-started.md) +- Deploy + - On Self-Managed Kubernetes + - [Prerequisites](prerequisites.md) + - [Configure Storage Class](configure-storage-class.md) + - [Deploy TiDB Operator](deploy-tidb-operator.md) + - [Configure a TiDB Cluster](configure-a-tidb-cluster.md) + - [Deploy a TiDB Cluster](deploy-on-general-kubernetes.md) + - [Initialize a TiDB Cluster](initialize-a-cluster.md) + - [Access a TiDB Cluster](access-tidb.md) + - On Public Cloud Kubernetes + - [Amazon EKS](deploy-on-aws-eks.md) + - [Google Cloud GKE](deploy-on-gcp-gke.md) + - [Azure AKS](deploy-on-azure-aks.md) + - [Alibaba Cloud ACK](deploy-on-alibaba-cloud.md) + - [Deploy TiDB on ARM64 Machines](deploy-cluster-on-arm64.md) + - [Deploy TiFlash to Explore TiDB HTAP](deploy-tiflash.md) + - Deploy TiDB Across Multiple Kubernetes Clusters + - [Build Multiple Interconnected AWS EKS Clusters](build-multi-aws-eks.md) + - [Build Multiple Interconnected GKE Clusters](build-multi-gcp-gke.md) + - [Deploy TiDB Across Multiple Kubernetes Clusters](deploy-tidb-cluster-across-multiple-kubernetes.md) + - [Deploy a Heterogeneous TiDB Cluster](deploy-heterogeneous-tidb-cluster.md) + - [Deploy TiCDC](deploy-ticdc.md) + - [Deploy TiDB Binlog](deploy-tidb-binlog.md) +- Monitor and Alert + - [Deploy Monitoring and Alerts for TiDB](monitor-a-tidb-cluster.md) + - [Monitor and Diagnose TiDB Using TiDB Dashboard](access-dashboard.md) + - [Aggregate Monitoring Data of Multiple TiDB Clusters](aggregate-multiple-cluster-monitor-data.md) + - [Monitor a TiDB Cluster across Multiple Kubernetes Clusters](deploy-tidb-monitor-across-multiple-kubernetes.md) + - [Enable Dynamic Configuration for TidbMonitor](enable-monitor-dynamic-configuration.md) + - [Enable Shards for TidbMonitor](enable-monitor-shards.md) +- Migrate + - [Import Data](restore-data-using-tidb-lightning.md) + - Migrate from MySQL + - [Deploy DM](deploy-tidb-dm.md) + - [Migrate to TiDB Using DM](use-tidb-dm.md) + - [Migrate TiDB to Kubernetes](migrate-tidb-to-kubernetes.md) +- Manage + - Secure + - [Enable TLS for the MySQL Client](enable-tls-for-mysql-client.md) + - [Enable TLS between TiDB Components](enable-tls-between-components.md) + - [Enable TLS for DM](enable-tls-for-dm.md) + - [Replicate Data to TLS-enabled Downstream Services](enable-tls-for-ticdc-sink.md) + - [Renew and Replace the TLS Certificate](renew-tls-certificate.md) + - [Run Containers as a Non-root User](containers-run-as-non-root-user.md) + - [Scale](scale-a-tidb-cluster.md) + - Upgrade + - [Upgrade a TiDB Cluster](upgrade-a-tidb-cluster.md) + - Upgrade TiDB Operator + - [Normal Upgrade](upgrade-tidb-operator.md) + - [Canary Upgrade](canary-upgrade-tidb-operator.md) + - Backup and Restore + - [Overview](backup-restore-overview.md) + - [Backup and Restore Custom Resources](backup-restore-cr.md) + - [Grant Permissions to Remote Storage](grant-permissions-to-remote-storage.md) + - Amazon S3 Compatible Storage + - [Back Up Data Using BR](backup-to-aws-s3-using-br.md) + - [Restore Data Using BR](restore-from-aws-s3-using-br.md) + - [Back Up Data Using Dumpling](backup-to-s3.md) + - [Restore Data Using TiDB Lightning](restore-from-s3.md) + - Google Cloud Storage + - [Back Up Data Using BR](backup-to-gcs-using-br.md) + - [Restore Data Using BR](restore-from-gcs-using-br.md) + - [Back Up Data Using Dumpling](backup-to-gcs.md) + - [Restore Data Using TiDB Lightning](restore-from-gcs.md) + - Azure Blob Storage + - [Back Up Data Using BR](backup-to-azblob-using-br.md) + - [Restore Data Using BR](restore-from-azblob-using-br.md) + - Persistent Volumes + - [Back Up Data](backup-to-pv-using-br.md) + - [Restore Data](restore-from-pv-using-br.md) + - Snapshot Backup and Restore + - [Architecture](volume-snapshot-backup-restore.md) + - [Back Up Data Using EBS Snapshots](backup-to-aws-s3-by-snapshot.md) + - [Restore Data from EBS Snapshots](restore-from-aws-s3-by-snapshot.md) + - [Backup and Restore Performance](backup-restore-snapshot-perf.md) + - [FAQs](backup-restore-faq.md) + - Maintain + - [Restart a TiDB Cluster](restart-a-tidb-cluster.md) + - [Destroy a TiDB Cluster](destroy-a-tidb-cluster.md) + - [View TiDB Logs](view-logs.md) + - [Modify TiDB Cluster Configuration](modify-tidb-configuration.md) + - [Configure Automatic Failover](use-auto-failover.md) + - [Pause Sync of a TiDB Cluster](pause-sync-of-tidb-cluster.md) + - [Suspend a TiDB Cluster](suspend-tidb-cluster.md) + - [Maintain Different TiDB Clusters Separately Using Multiple TiDB Operator](deploy-multiple-tidb-operator.md) + - [Maintain Kubernetes Nodes](maintain-a-kubernetes-node.md) + - [Migrate from Helm 2 to Helm 3](migrate-to-helm3.md) + - Replace Nodes for a TiDB Cluster + - [Replace Nodes on Cloud Disks](replace-nodes-for-cloud-disk.md) + - [Replace Nodes on Local Disks](replace-nodes-for-local-disk.md) + - Disaster Recovery + - [Recover a Deleted TiDB Cluster](recover-deleted-cluster.md) + - [Recover a PD Cluster](pd-recover.md) +- Troubleshoot + - [Troubleshooting Tips](tips.md) + - [Deployment Failures](deploy-failures.md) + - [Cluster Exceptions](exceptions.md) + - [Network Issues](network-issues.md) + - [Troubleshoot TiDB Cluster Using PingCAP Clinic](clinic-user-guide.md) +- [FAQs](faq.md) +- Reference + - Architecture + - [TiDB Operator](architecture.md) + - [TiDB Scheduler](tidb-scheduler.md) + - [Advanced StatefulSet Controller](advanced-statefulset.md) + - [Admission Controller](enable-admission-webhook.md) + - [Sysbench Performance Test](benchmark-sysbench.md) + - [API References](https://github.com/pingcap/tidb-operator/blob/master/docs/api-references/docs.md) + - [Cheat Sheet](cheat-sheet.md) + - [Required RBAC Rules](tidb-operator-rbac.md) + - Tools + - [tkctl](use-tkctl.md) + - [TiDB Toolkit](tidb-toolkit.md) + - Configure + - [Configure tidb-drainer Chart](configure-tidb-binlog-drainer.md) + - [Log Collection](logs-collection.md) + - [Monitoring and Alert on Kubernetes](monitor-kubernetes.md) + - [PingCAP Clinic Diagnostic Data](clinic-data-collection.md) +- Release Notes + - v1.5 + - [1.5 GA](releases/release-1.5.0.md) + - [1.5.0-beta.1](releases/release-1.5.0-beta.1.md) + - v1.4 + - [1.4.5](releases/release-1.4.5.md) + - [1.4.4](releases/release-1.4.4.md) + - [1.4.3](releases/release-1.4.3.md) + - [1.4.2](releases/release-1.4.2.md) + - [1.4.1](releases/release-1.4.1.md) + - [1.4 GA](releases/release-1.4.0.md) + - [1.4.0-beta.3](releases/release-1.4.0-beta.3.md) + - [1.4.0-beta.2](releases/release-1.4.0-beta.2.md) + - [1.4.0-beta.1](releases/release-1.4.0-beta.1.md) + - [1.4.0-alpha.1](releases/release-1.4.0-alpha.1.md) + - v1.3 + - [1.3.10](releases/release-1.3.10.md) + - [1.3.9](releases/release-1.3.9.md) + - [1.3.8](releases/release-1.3.8.md) + - [1.3.7](releases/release-1.3.7.md) + - [1.3.6](releases/release-1.3.6.md) + - [1.3.5](releases/release-1.3.5.md) + - [1.3.4](releases/release-1.3.4.md) + - [1.3.3](releases/release-1.3.3.md) + - [1.3.2](releases/release-1.3.2.md) + - [1.3.1](releases/release-1.3.1.md) + - [1.3 GA](releases/release-1.3.0.md) + - [1.3.0-beta.1](releases/release-1.3.0-beta.1.md) + - v1.2 + - [1.2.7](releases/release-1.2.7.md) + - [1.2.6](releases/release-1.2.6.md) + - [1.2.5](releases/release-1.2.5.md) + - [1.2.4](releases/release-1.2.4.md) + - [1.2.3](releases/release-1.2.3.md) + - [1.2.2](releases/release-1.2.2.md) + - [1.2.1](releases/release-1.2.1.md) + - [1.2 GA](releases/release-1.2.0.md) + - [1.2.0-rc.2](releases/release-1.2.0-rc.2.md) + - [1.2.0-rc.1](releases/release-1.2.0-rc.1.md) + - [1.2.0-beta.2](releases/release-1.2.0-beta.2.md) + - [1.2.0-beta.1](releases/release-1.2.0-beta.1.md) + - [1.2.0-alpha.1](releases/release-1.2.0-alpha.1.md) + - v1.1 + - [1.1.15](releases/release-1.1.15.md) + - [1.1.14](releases/release-1.1.14.md) + - [1.1.13](releases/release-1.1.13.md) + - [1.1.12](releases/release-1.1.12.md) + - [1.1.11](releases/release-1.1.11.md) + - [1.1.10](releases/release-1.1.10.md) + - [1.1.9](releases/release-1.1.9.md) + - [1.1.8](releases/release-1.1.8.md) + - [1.1.7](releases/release-1.1.7.md) + - [1.1.6](releases/release-1.1.6.md) + - [1.1.5](releases/release-1.1.5.md) + - [1.1.4](releases/release-1.1.4.md) + - [1.1.3](releases/release-1.1.3.md) + - [1.1.2](releases/release-1.1.2.md) + - [1.1.1](releases/release-1.1.1.md) + - [1.1 GA](releases/release-1.1-ga.md) + - [1.1.0-rc.4](releases/release-1.1.0-rc.4.md) + - [1.1.0-rc.3](releases/release-1.1.0-rc.3.md) + - [1.1.0-rc.2](releases/release-1.1.0-rc.2.md) + - [1.1.0-rc.1](releases/release-1.1.0-rc.1.md) + - [1.1.0-beta.2](releases/release-1.1.0-beta.2.md) + - [1.1.0-beta.1](releases/release-1.1.0-beta.1.md) + - v1.0 + - [1.0.7](releases/release-1.0.7.md) + - [1.0.6](releases/release-1.0.6.md) + - [1.0.5](releases/release-1.0.5.md) + - [1.0.4](releases/release-1.0.4.md) + - [1.0.3](releases/release-1.0.3.md) + - [1.0.2](releases/release-1.0.2.md) + - [1.0.1](releases/release-1.0.1.md) + - [1.0 GA](releases/release-1.0-ga.md) + - [1.0.0-rc.1](releases/release-1.0.0-rc.1.md) + - [1.0.0-beta.3](releases/release-1.0.0-beta.3.md) + - [1.0.0-beta.2](releases/release-1.0.0-beta.2.md) + - [1.0.0-beta.1-p2](releases/release-1.0.0-beta.1-p2.md) + - [1.0.0-beta.1-p1](releases/release-1.0.0-beta.1-p1.md) + - [1.0.0-beta.1](releases/release-1.0.0-beta.1.md) + - [1.0.0-beta.0](releases/release-1.0.0-beta.0.md) + - v0 + - [0.4.0](releases/release-0.4.0.md) + - [0.3.1](releases/release-0.3.1.md) + - [0.3.0](releases/release-0.3.0.md) + - [0.2.1](releases/release-0.2.1.md) + - [0.2.0](releases/release-0.2.0.md) + - [0.1.0](releases/release-0.1.0.md) diff --git a/test/sync_pr_operator/data/markdown-pages/en/tidb-in-kubernetes/master/grant-permissions-to-remote-storage.md b/test/sync_pr_operator/data/markdown-pages/en/tidb-in-kubernetes/master/grant-permissions-to-remote-storage.md new file mode 100644 index 00000000..2e660799 --- /dev/null +++ b/test/sync_pr_operator/data/markdown-pages/en/tidb-in-kubernetes/master/grant-permissions-to-remote-storage.md @@ -0,0 +1,207 @@ +--- +title: Grant Permissions to Remote Storage +summary: Learn how to grant permissions to access remote storage for backup and restore. +--- + +# Grant Permissions to Remote Storage + +This document describes how to grant permissions to access remote storage for backup and restore. During the backup process, TiDB cluster data is backed up to the remote storage. During the restore process, the backup data is restored from the remote storage to the TiDB cluster. + +## AWS account permissions + +Amazon Web Service (AWS) provides different methods to grant permissions for different types of Kubernetes clusters. This document describes the following three methods. + +### Grant permissions by AccessKey and SecretKey + +The AWS client can read `AWS_ACCESS_KEY_ID` and `AWS_SECRET_ACCESS_KEY` from the process environment variables to obtain the associated user or role permissions. + +Create the `s3-secret` secret by running the following command. Use the AWS account's AccessKey and SecretKey. The secret stores the credential used for accessing S3-compatible storage. + +{{< copyable "shell-regular" >}} + +```shell +kubectl create secret generic s3-secret --from-literal=access_key=xxx --from-literal=secret_key=yyy --namespace=test1 +``` + +### Grant permissions by associating IAM with Pod + +If you associate the user's [IAM](https://aws.amazon.com/cn/iam/) role with the resources of the running Pods, the processes running in the Pods can have the permissions of the role. This method is provided by [`kube2iam`](https://github.com/jtblin/kube2iam). + +> **Note:** +> +> - When you use this method to grant permissions, you can [create the `kube2iam` environment](https://github.com/jtblin/kube2iam#usage) in the Kubernetes cluster and deploy TiDB Operator and the TiDB cluster. +> - This method is not applicable to the [`hostNetwork`](https://kubernetes.io/docs/concepts/policy/pod-security-policy) mode. Make sure the value of `spec.tikv.hostNetwork` is set to `false`. + +1. Create an IAM role. + + First, [create an IAM User](https://docs.aws.amazon.com/IAM/latest/UserGuide/id_users_create.html) for your account. + + Then, Give the required permission to the IAM role you have created. Refer to [Adding and Removing IAM Identity Permissions](https://docs.aws.amazon.com/IAM/latest/UserGuide/access_policies_manage-attach-detach.html) for details. + + Because the `Backup` CR needs to access the Amazon S3 storage, the IAM role is granted the `AmazonS3FullAccess` permission. + + When backing up a TiDB cluster using EBS volume snapshots, besides the `AmazonS3FullAccess` permission, the following permissions are also required: + + ```json + { + "Effect": "Allow", + "Action": [ + "ec2:AttachVolume", + "ec2:CreateSnapshot", + "ec2:CreateSnapshots", + "ec2:CreateTags", + "ec2:CreateVolume", + "ec2:DeleteSnapshot", + "ec2:DeleteTags", + "ec2:DeleteVolume", + "ec2:DescribeInstances", + "ec2:DescribeSnapshots", + "ec2:DescribeTags", + "ec2:DescribeVolumes", + "ec2:DetachVolume", + "ebs:ListSnapshotBlocks", + "ebs:ListChangedBlocks" + ], + "Resource": "*" + } + ``` + +2. Associate IAM with the TiKV Pod: + + When you use BR to back up TiDB data, the TiKV Pod also needs to perform read and write operations on S3-compatible storage as the BR Pod does. Therefore, you need to add annotations to the TiKV Pod to associate it with the IAM role. + + {{< copyable "shell-regular" >}} + + ```shell + kubectl patch tc demo1 -n test1 --type merge -p '{"spec":{"tikv":{"annotations":{"iam.amazonaws.com/role":"arn:aws:iam::123456789012:role/user"}}}}' + ``` + + After the TiKV Pod is restarted, check whether the Pod has the annotation. + +> **Note:** +> +> `arn:aws:iam::123456789012:role/user` is the IAM role created in Step 1. + +### Grant permissions by associating IAM with ServiceAccount + +If you associate the user's [IAM](https://aws.amazon.com/cn/iam/) role with [`serviceAccount`](https://kubernetes.io/docs/reference/access-authn-authz/admission-controllers/#serviceaccount) of Kubernetes, the Pods using the `serviceAccount` can have the permissions of the role. This method is provided by [`EKS Pod Identity Webhook`](https://github.com/aws/amazon-eks-pod-identity-webhook). + +When you use this method to grant permissions, you can [create the EKS cluster](https://docs.aws.amazon.com/zh_cn/eks/latest/userguide/create-cluster.html) and deploy TiDB Operator and the TiDB cluster. + +1. Enable the IAM role for the `serviceAccount` in the cluster: + + Refer to [AWS documentation](https://docs.aws.amazon.com/eks/latest/userguide/enable-iam-roles-for-service-accounts.html). + +2. Create the IAM role: + + [Create an IAM role](https://docs.aws.amazon.com/eks/latest/userguide/associate-service-account-role.html) and grant the `AmazonS3FullAccess` permissions to the role. Edit the role's `Trust relationships` to grant tidb-backup-manager the access to this IAM role. + + When backing up a TiDB cluster using EBS volume snapshots, besides the `AmazonS3FullAccess` permission, the following permissions are also required: + + ```json + { + "Effect": "Allow", + "Action": [ + "ec2:AttachVolume", + "ec2:CreateSnapshot", + "ec2:CreateSnapshots", + "ec2:CreateTags", + "ec2:CreateVolume", + "ec2:DeleteSnapshot", + "ec2:DeleteTags", + "ec2:DeleteVolume", + "ec2:DescribeInstances", + "ec2:DescribeSnapshots", + "ec2:DescribeTags", + "ec2:DescribeVolumes", + "ec2:DetachVolume", + "ebs:ListSnapshotBlocks", + "ebs:ListChangedBlocks" + ], + "Resource": "*" + } + ``` + + At the same time, edit the role's `Trust relationships` to grant tidb-controller-manager the access to this IAM role. + +3. Associate the IAM role with the `ServiceAccount` resources. + + {{< copyable "shell-regular" >}} + + ```shell + kubectl annotate sa tidb-backup-manager eks.amazonaws.com/role-arn=arn:aws:iam::123456789012:role/user --namespace=test1 + ``` + + When backing up or restoring a TiDB cluster using EBS volume snapshots, you need to associate the IAM role with the `ServiceAccount` resources of tidb-controller-manager. + + ```shell + kubectl annotate sa tidb-controller-manager eks.amazonaws.com/role-arn=arn:aws:iam::123456789012:role/user --namespace=tidb-admin + ``` + + Restart the tidb-controller-manager Pod of TiDB Operator to make the configured `ServiceAccount` take effect. + +4. Associate the `ServiceAccount` with the TiKV Pod: + + {{< copyable "shell-regular" >}} + + ```shell + kubectl patch tc demo1 -n test1 --type merge -p '{"spec":{"tikv":{"serviceAccount": "tidb-backup-manager"}}}' + ``` + + Modify the value of `spec.tikv.serviceAccount` to `tidb-backup-manager`. After the TiKV Pod is restarted, check whether the Pod's `serviceAccountName` is changed. + +> **Note:** +> +> `arn:aws:iam::123456789012:role/user` is the IAM role created in Step 2. + +## GCS account permissions + +### Grant permissions by the service account + +Create the `gcs-secret` secret which stores the credential used to access GCS. The `google-credentials.json` file stores the service account key that you have downloaded from the Google Cloud console. Refer to [Google Cloud documentation](https://cloud.google.com/docs/authentication/getting-started) for details. + +{{< copyable "shell-regular" >}} + +```shell +kubectl create secret generic gcs-secret --from-file=credentials=./google-credentials.json -n test1 +``` + +## Azure account permissions + +Azure provides different methods to grant permissions for different types of Kubernetes clusters. This document describes the following two methods. + +### Grant permissions by access key + +The Azure client can read `AZURE_STORAGE_ACCOUNT` and `AZURE_STORAGE_KEY` from the process environment variables to obtain the associated user or role permissions. + +Run the following command to create the `azblob-secret` secret and use your Azure account access key to grant permissions. The secret stores the credential used for accessing Azure Blob Storage. + +{{< copyable "shell-regular" >}} + +```shell +kubectl create secret generic azblob-secret --from-literal=AZURE_STORAGE_ACCOUNT=xxx --from-literal=AZURE_STORAGE_KEY=yyy --namespace=test1 +``` + +### Grant permissions by Azure AD + +The Azure client can read `AZURE_STORAGE_ACCOUNT`, `AZURE_CLIENT_ID`, `AZURE_TENANT_ID`, and `AZURE_CLIENT_SECRET` to obtain the associated user or role permissions. + +1. Create the `azblob-secret-ad` secret by running the following command. Use the Active Directory (AD) of your Azure account. The secret stores the credential used for accessing Azure Blob Storage. + + {{< copyable "shell-regular" >}} + + ```shell + kubectl create secret generic azblob-secret-ad --from-literal=AZURE_STORAGE_ACCOUNT=xxx --from-literal=AZURE_CLIENT_ID=yyy --from-literal=AZURE_TENANT_ID=zzz --from-literal=AZURE_CLIENT_SECRET=aaa --namespace=test1 + ``` + +2. Associate the secret with the TiKV Pod: + + When you use BR to back up TiDB data, the TiKV Pod also needs to perform read and write operations on Azure Blob Storage as the BR Pod does. Therefore, you need to associate the TiKV Pod with the secret. + + {{< copyable "shell-regular" >}} + + ```shell + kubectl patch tc demo1 -n test1 --type merge -p '{"spec":{"tikv":{"envFrom":[{"secretRef":{"name":"azblob-secret-ad"}}]}}}' + ``` + + After the TiKV Pod is restarted, check whether the Pod has the environment variables. diff --git a/test/sync_pr_operator/data/markdown-pages/en/tidb-in-kubernetes/master/releases/release-1.5.0.md b/test/sync_pr_operator/data/markdown-pages/en/tidb-in-kubernetes/master/releases/release-1.5.0.md new file mode 100644 index 00000000..5034c822 --- /dev/null +++ b/test/sync_pr_operator/data/markdown-pages/en/tidb-in-kubernetes/master/releases/release-1.5.0.md @@ -0,0 +1,41 @@ +--- +title: TiDB Operator 1.5.0 Release Notes +summary: Learn about new features, improvements, and bug fixes in TiDB Operator 1.5.0. +--- + +# TiDB Operator 1.5.0 Release Notes + +Release date: August 4, 2023 + +TiDB Operator version: 1.5.0 + +## Rolling update changes + +If TiFlash is deployed in a TiDB cluster that is v7.1.0 or later, the TiFlash component will be rolling updated after TiDB Operator is upgraded to v1.5.0 due to [#5075](https://github.com/pingcap/tidb-operator/pull/5075). + +## New features + +- Add the BR Federation Manager component to orchestrate `Backup` and `Restore` custom resources (CR) across multiple Kubernetes clusters ([#4996](https://github.com/pingcap/tidb-operator/pull/4996), [@csuzhangxc](https://github.com/csuzhangxc)) +- Support using the `VolumeBackup` CR to back up a TiDB cluster deployed across multiple Kubernetes clusters based on EBS snapshots ([#5013](https://github.com/pingcap/tidb-operator/pull/5013), [@WangLe1321](https://github.com/WangLe1321)) +- Support using the `VolumeRestore` CR to restore a TiDB cluster deployed across multiple Kubernetes clusters based on EBS snapshots ([#5039](https://github.com/pingcap/tidb-operator/pull/5039), [@WangLe1321](https://github.com/WangLe1321)) +- Support using the `VolumeBackupSchedule` CR to automatically back up a TiDB cluster deployed across multiple Kubernetes clusters based on EBS snapshots ([#5036](https://github.com/pingcap/tidb-operator/pull/5036), [@BornChanger](https://github.com/BornChanger)) +- Support backing up CRs related to `TidbCluster` when backing up a TiDB cluster deployed across multiple Kubernetes based on EBS snapshots ([#5207](https://github.com/pingcap/tidb-operator/pull/5207), [@WangLe1321](https://github.com/WangLe1321)) + +## Improvements + +- Add the `startUpScriptVersion` field for DM master to specify the version of the startup script ([#4971](https://github.com/pingcap/tidb-operator/pull/4971), [@hanlins](https://github.com/hanlins)) +- Support `spec.preferIPv6` for DmCluster, TidbDashboard, TidbMonitor, and TidbNGMonitoring ([#4977](https://github.com/pingcap/tidb-operator/pull/4977), [@KanShiori](https://github.com/KanShiori)) +- Support setting expiration time for TiKV leader eviction and PD leader transfer ([#4997](https://github.com/pingcap/tidb-operator/pull/4997), [@Tema](https://github.com/Tema)) +- Support setting toleration for `TidbInitializer` ([#5047](https://github.com/pingcap/tidb-operator/pull/5047), [@csuzhangxc](https://github.com/csuzhangxc)) +- Support configuring the timeout for PD start ([#5071](https://github.com/pingcap/tidb-operator/pull/5071), [@oliviachenairbnb](https://github.com/oliviachenairbnb)) +- Skip evicting leaders for TiKV when changing PVC size to avoid leader eviction blocked caused by low disk space ([#5101](https://github.com/pingcap/tidb-operator/pull/5101), [@csuzhangxc](https://github.com/csuzhangxc)) +- Support updating annotations and labels in services for PD, TiKV, TiFlash, TiProxy, DM-master, and DM-worker ([#4973](https://github.com/pingcap/tidb-operator/pull/4973), [@wxiaomou](https://github.com/wxiaomou)) +- Enable volume resizing by default for PV expansion ([#5167](https://github.com/pingcap/tidb-operator/pull/5167), [@liubog2008](https://github.com/liubog2008)) + +## Bug fixes + +- Fix the quorum loss issue during TiKV upgrade due to some TiKV stores going down ([#4979](https://github.com/pingcap/tidb-operator/pull/4979), [@Tema](https://github.com/Tema)) +- Fix the quorum loss issue during PD upgrade due to some members going down ([#4995](https://github.com/pingcap/tidb-operator/pull/4995), [@Tema](https://github.com/Tema)) +- Fix the issue that TiDB Operator panics when no Kubernetes cluster-level permission is configured ([#5058](https://github.com/pingcap/tidb-operator/pull/5058), [@liubog2008](https://github.com/liubog2008)) +- Fix the issue that TiDB Operator might panic when `AdditionalVolumeMounts` is set for the `TidbCluster` CR ([#5058](https://github.com/pingcap/tidb-operator/pull/5058), [@liubog2008](https://github.com/liubog2008)) +- Fix the issue that `baseImage` for the `TidbDashboard` CR is parsed incorrectly when custom image registry is used ([#5014](https://github.com/pingcap/tidb-operator/pull/5014), [@linkinghack](https://github.com/linkinghack)) diff --git a/test/sync_pr_operator/data/markdown-pages/en/tidb-in-kubernetes/master/tidb-operator-overview.md b/test/sync_pr_operator/data/markdown-pages/en/tidb-in-kubernetes/master/tidb-operator-overview.md new file mode 100644 index 00000000..35b2d76e --- /dev/null +++ b/test/sync_pr_operator/data/markdown-pages/en/tidb-in-kubernetes/master/tidb-operator-overview.md @@ -0,0 +1,69 @@ +--- +title: TiDB Operator Overview +summary: Learn the overview of TiDB Operator. +aliases: ['/docs/tidb-in-kubernetes/dev/tidb-operator-overview/'] +--- + +# TiDB Operator Overview + +[TiDB Operator](https://github.com/pingcap/tidb-operator) is an automatic operation system for TiDB clusters on Kubernetes. It provides a full management life-cycle for TiDB including deployment, upgrades, scaling, backup, fail-over, and configuration changes. With TiDB Operator, TiDB can run seamlessly in the Kubernetes clusters deployed on a public cloud or in a self-hosted environment. + +The corresponding relationship between TiDB Operator and TiDB versions is as follows: + +| TiDB versions | Compatible TiDB Operator versions | +|:---|:---| +| dev | dev | +| TiDB >= 7.1 | 1.5 (Recommended), 1.4 | +| 6.5 <= TiDB < 7.1 | 1.5, 1.4 (Recommended), 1.3 | +| 5.4 <= TiDB < 6.5 | 1.4, 1.3 (Recommended) | +| 5.1 <= TiDB < 5.4 | 1.4, 1.3 (Recommended), 1.2 | +| 3.0 <= TiDB < 5.1 | 1.4, 1.3 (Recommended), 1.2, 1.1 | +| 2.1 <= TiDB < v3.0| 1.0 (End of support) | + +## Manage TiDB clusters using TiDB Operator + +TiDB Operator provides several ways to deploy TiDB clusters on Kubernetes: + ++ For test environment: + + - [Get Started](get-started.md) using kind, Minikube, or the Google Cloud Shell + ++ For production environment: + + + On public cloud: + - [Deploy TiDB on AWS EKS](deploy-on-aws-eks.md) + - [Deploy TiDB on Google Cloud GKE](deploy-on-gcp-gke.md) + - [Deploy TiDB on Azure AKS](deploy-on-azure-aks.md) + - [Deploy TiDB on Alibaba Cloud ACK](deploy-on-alibaba-cloud.md) + + - In an existing Kubernetes cluster: + + First install TiDB Operator on a Kubernetes cluster according to [Deploy TiDB Operator on Kubernetes](deploy-tidb-operator.md), then deploy your TiDB clusters according to [Deploy TiDB on General Kubernetes](deploy-on-general-kubernetes.md). + + You also need to adjust the configuration of the Kubernetes cluster based on [Prerequisites for TiDB on Kubernetes](prerequisites.md) and configure the local PV for your Kubernetes cluster to achieve low latency of local storage for TiKV according to [Local PV Configuration](configure-storage-class.md#local-pv-configuration). + +Before deploying TiDB on any of the above two environments, you can always refer to [TiDB Cluster Configuration Document](configure-a-tidb-cluster.md) to customize TiDB configurations. + +After the deployment is complete, see the following documents to use, operate, and maintain TiDB clusters on Kubernetes: + ++ [Access the TiDB Cluster](access-tidb.md) ++ [Scale TiDB Cluster](scale-a-tidb-cluster.md) ++ [Upgrade a TiDB Cluster](upgrade-a-tidb-cluster.md) ++ [Change the Configuration of TiDB Cluster](configure-a-tidb-cluster.md) ++ [Back up and Restore a TiDB Cluster](backup-restore-overview.md) ++ [Automatic Failover](use-auto-failover.md) ++ [Monitor a TiDB Cluster on Kubernetes](monitor-a-tidb-cluster.md) ++ [View TiDB Logs on Kubernetes](view-logs.md) ++ [Maintain Kubernetes Nodes that Hold the TiDB Cluster](maintain-a-kubernetes-node.md) + +When a problem occurs and the cluster needs diagnosis, you can: + ++ See [TiDB FAQs on Kubernetes](faq.md) for any available solution; ++ See [Troubleshoot TiDB on Kubernetes](tips.md) to shoot troubles. + +TiDB on Kubernetes provides a dedicated command-line tool `tkctl` for cluster management and auxiliary diagnostics. Meanwhile, some of TiDB's tools are used differently on Kubernetes. You can: + ++ Use `tkctl` according to [`tkctl` Guide](use-tkctl.md ); ++ See [Tools on Kubernetes](tidb-toolkit.md) to understand how TiDB tools are used on Kubernetes. + +Finally, when a new version of TiDB Operator is released, you can refer to [Upgrade TiDB Operator](upgrade-tidb-operator.md) to upgrade to the latest version. diff --git a/test/sync_pr_operator/data/markdown-pages/en/tidb-in-kubernetes/master/whats-new-in-v1.5.md b/test/sync_pr_operator/data/markdown-pages/en/tidb-in-kubernetes/master/whats-new-in-v1.5.md new file mode 100644 index 00000000..e0e4ddce --- /dev/null +++ b/test/sync_pr_operator/data/markdown-pages/en/tidb-in-kubernetes/master/whats-new-in-v1.5.md @@ -0,0 +1,31 @@ +--- +title: What's New in TiDB Operator 1.5 +summary: Learn about new features in TiDB Operator 1.5.0. +--- + +# What's New in TiDB Operator 1.5 + +TiDB Operator 1.5 introduces the following key features, which helps you manage TiDB clusters and the tools more easily in terms of extensibility and usability. + +## Compatibility changes + +To use the `PreferDualStack` feature (enabled with `spec.preferIPv6: true`) introduced in [#4959](https://github.com/pingcap/tidb-operator/pull/4959), Kubernetes version >= v1.20 is required. + +## Rolling update changes + +If TiFlash is deployed in a TiDB cluster that is v7.1.0 or later, the TiFlash component will be rolling updated after TiDB Operator is upgraded to v1.5.0 due to [#5075](https://github.com/pingcap/tidb-operator/pull/5075). + +## Extensibility + +- Support specifying an initialization SQL file to be executed during the first bootstrap of TiDB with the `bootstrapSQLConfigMapName` field. +- Support setting `PreferDualStack` for all Service's `ipFamilyPolicy` with `spec.preferIPv6: true`. +- Support managing TiCDC and TiProxy with [Advanced StatefulSet](advanced-statefulset.md). +- Add the BR Federation Manager component to support the backup and restore of a TiDB cluster deployed across multiple Kubernetes clusters based on EBS snapshots. + +## Usability + +- Support using the `tidb.pingcap.com/pd-transfer-leader` annotation to restart PD Pods gracefully. +- Support using the `tidb.pingcap.com/tidb-graceful-shutdown` annotation to restart TiDB Pods gracefully. +- Allow users to define a strategy to restart failed backup jobs, enhancing backup stability. +- Add metrics for the reconciler and worker queue to improve observability. +- Add metrics for counting errors that occur during the reconciliation to improve observability. diff --git a/test/sync_pr_operator/data/markdown-pages/zh/tidb-in-kubernetes/master/TOC.md b/test/sync_pr_operator/data/markdown-pages/zh/tidb-in-kubernetes/master/TOC.md new file mode 100644 index 00000000..a26a5e4d --- /dev/null +++ b/test/sync_pr_operator/data/markdown-pages/zh/tidb-in-kubernetes/master/TOC.md @@ -0,0 +1,214 @@ + + + +- [TiDB on Kubernetes 文档](https://docs.pingcap.com/zh/tidb-in-kubernetes/dev) +- 关于 TiDB Operator + - [简介](tidb-operator-overview.md) + - [v1.5 新特性](whats-new-in-v1.5.md) +- [快速上手](get-started.md) +- 部署 + - 自托管的 Kubernetes + - [集群环境要求](prerequisites.md) + - [配置 Storage Class](configure-storage-class.md) + - [部署 TiDB Operator](deploy-tidb-operator.md) + - [配置 TiDB 集群](configure-a-tidb-cluster.md) + - [部署 TiDB 集群](deploy-on-general-kubernetes.md) + - [初始化 TiDB 集群](initialize-a-cluster.md) + - [访问 TiDB 集群](access-tidb.md) + - 公有云的 Kubernetes + - [Amazon EKS](deploy-on-aws-eks.md) + - [Google Cloud GKE](deploy-on-gcp-gke.md) + - [Azure AKS](deploy-on-azure-aks.md) + - [阿里云 ACK](deploy-on-alibaba-cloud.md) + - [在 ARM64 机器上部署 TiDB 集群](deploy-cluster-on-arm64.md) + - [部署 TiDB HTAP 存储引擎 TiFlash](deploy-tiflash.md) + - 跨多个 Kubernetes 集群部署 TiDB 集群 + - [构建多个网络互通的 AWS EKS 集群](build-multi-aws-eks.md) + - [构建多个网络互通的 GKE 集群](build-multi-gcp-gke.md) + - [跨多个 Kubernetes 集群部署 TiDB 集群](deploy-tidb-cluster-across-multiple-kubernetes.md) + - [部署 TiDB 异构集群](deploy-heterogeneous-tidb-cluster.md) + - [部署增量数据同步工具 TiCDC](deploy-ticdc.md) + - [部署 Binlog 收集工具](deploy-tidb-binlog.md) +- 监控与告警 + - [部署 TiDB 集群监控与告警](monitor-a-tidb-cluster.md) + - [使用 TiDB Dashboard 监控诊断 TiDB 集群](access-dashboard.md) + - [聚合多个 TiDB 集群的监控数据](aggregate-multiple-cluster-monitor-data.md) + - [跨多个 Kubernetes 集群监控 TiDB 集群](deploy-tidb-monitor-across-multiple-kubernetes.md) + - [开启 TidbMonitor 动态配置](enable-monitor-dynamic-configuration.md) + - [开启 TidbMonitor 分片功能](enable-monitor-shards.md) +- 数据迁移 + - [导入集群数据](restore-data-using-tidb-lightning.md) + - 从 MySQL 迁移 + - [部署 DM](deploy-tidb-dm.md) + - [使用 DM 迁移 MySQL 数据到 TiDB 集群](use-tidb-dm.md) + - [迁移 TiDB 至 Kubernetes](migrate-tidb-to-kubernetes.md) +- 运维管理 + - 安全 + - [为 MySQL 客户端开启 TLS](enable-tls-for-mysql-client.md) + - [为 TiDB 组件间开启 TLS](enable-tls-between-components.md) + - [为 DM 组件开启 TLS](enable-tls-for-dm.md) + - [同步数据到开启 TLS 的下游服务](enable-tls-for-ticdc-sink.md) + - [更新和替换 TLS 证书](renew-tls-certificate.md) + - [以非 root 用户运行](containers-run-as-non-root-user.md) + - [扩缩容](scale-a-tidb-cluster.md) + - 升级 + - [升级 TiDB 集群](upgrade-a-tidb-cluster.md) + - 升级 TiDB Operator + - [常规升级](upgrade-tidb-operator.md) + - [灰度升级](canary-upgrade-tidb-operator.md) + - 备份与恢复 + - [备份与恢复简介](backup-restore-overview.md) + - [备份与恢复 CR 介绍](backup-restore-cr.md) + - [远程存储访问授权](grant-permissions-to-remote-storage.md) + - 使用 Amazon S3 兼容的存储 + - [使用 BR 备份 TiDB 集群数据到兼容 S3 的存储](backup-to-aws-s3-using-br.md) + - [使用 BR 恢复 S3 兼容存储上的备份数据](restore-from-aws-s3-using-br.md) + - [使用 Dumpling 备份 TiDB 集群数据到兼容 S3 的存储](backup-to-s3.md) + - [使用 TiDB Lightning 恢复 S3 兼容存储上的备份数据](restore-from-s3.md) + - 使用 Google Cloud Storage + - [使用 BR 备份 TiDB 集群数据到 GCS](backup-to-gcs-using-br.md) + - [使用 BR 恢复 GCS 上的备份数据](restore-from-gcs-using-br.md) + - [使用 Dumpling 备份 TiDB 集群数据到 GCS](backup-to-gcs.md) + - [使用 TiDB Lightning 恢复 GCS 上的备份数据](restore-from-gcs.md) + - 使用 Azure Blob Storage + - [使用 BR 备份 TiDB 集群数据到 Azblob](backup-to-azblob-using-br.md) + - [使用 BR 恢复 Azblob 上的备份数据](restore-from-azblob-using-br.md) + - 使用持久卷 + - [使用 BR 备份 TiDB 集群数据到持久卷](backup-to-pv-using-br.md) + - [使用 BR 恢复持久卷上的备份数据](restore-from-pv-using-br.md) + - 基于快照的备份和恢复 + - [功能架构](volume-snapshot-backup-restore.md) + - [基于 EBS 快照备份 TiDB 集群](backup-to-aws-s3-by-snapshot.md) + - [基于 EBS 快照恢复 TiDB 集群](restore-from-aws-s3-by-snapshot.md) + - [基于 EBS 卷快照备份恢复的性能介绍](backup-restore-snapshot-perf.md) + - [基于 EBS 快照备份恢复的常见问题](backup-restore-faq.md) + - 运维 + - [重启 TiDB 集群](restart-a-tidb-cluster.md) + - [销毁 TiDB 集群](destroy-a-tidb-cluster.md) + - [查看 TiDB 日志](view-logs.md) + - [修改 TiDB 集群配置](modify-tidb-configuration.md) + - [配置集群故障自动转移](use-auto-failover.md) + - [暂停 TiDB 集群同步](pause-sync-of-tidb-cluster.md) + - [挂起 TiDB 集群](suspend-tidb-cluster.md) + - [使用多套 TiDB Operator 单独管理不同的 TiDB 集群](deploy-multiple-tidb-operator.md) + - [维护 TiDB 集群所在的 Kubernetes 节点](maintain-a-kubernetes-node.md) + - [从 Helm 2 迁移到 Helm 3](migrate-to-helm3.md) + - 为 TiDB 集群更换节点 + - [更换云存储节点](replace-nodes-for-cloud-disk.md) + - [更换本地存储节点](replace-nodes-for-local-disk.md) + - 灾难恢复 + - [恢复误删的 TiDB 集群](recover-deleted-cluster.md) + - [恢复 PD 集群](pd-recover.md) +- 故障诊断 + - [使用技巧](tips.md) + - [部署错误](deploy-failures.md) + - [集群异常](exceptions.md) + - [网络问题](network-issues.md) + - [使用 PingCAP Clinic 诊断 TiDB 集群](clinic-user-guide.md) +- [常见问题](faq.md) +- 参考 + - 架构 + - [TiDB Operator 架构](architecture.md) + - [TiDB Scheduler 扩展调度器](tidb-scheduler.md) + - [增强型 StatefulSet 控制器](advanced-statefulset.md) + - [准入控制器](enable-admission-webhook.md) + - [Sysbench 性能测试](benchmark-sysbench.md) + - [API 参考文档](https://github.com/pingcap/tidb-operator/blob/master/docs/api-references/docs.md) + - [Cheat Sheet](cheat-sheet.md) + - [TiDB Operator RBAC 规则](tidb-operator-rbac.md) + - 工具 + - [tkctl](use-tkctl.md) + - [TiDB Toolkit](tidb-toolkit.md) + - 配置 + - [tidb-drainer chart 配置](configure-tidb-binlog-drainer.md) + - [日志收集](logs-collection.md) + - [Kubernetes 监控与告警](monitor-kubernetes.md) + - [PingCAP Clinic 数据采集范围说明](clinic-data-collection.md) +- 版本发布历史 + - v1.5 + - [1.5 GA](releases/release-1.5.0.md) + - [1.5.0-beta.1](releases/release-1.5.0-beta.1.md) + - v1.4 + - [1.4.5](releases/release-1.4.5.md) + - [1.4.4](releases/release-1.4.4.md) + - [1.4.3](releases/release-1.4.3.md) + - [1.4.2](releases/release-1.4.2.md) + - [1.4.1](releases/release-1.4.1.md) + - [1.4 GA](releases/release-1.4.0.md) + - [1.4.0-beta.3](releases/release-1.4.0-beta.3.md) + - [1.4.0-beta.2](releases/release-1.4.0-beta.2.md) + - [1.4.0-beta.1](releases/release-1.4.0-beta.1.md) + - [1.4.0-alpha.1](releases/release-1.4.0-alpha.1.md) + - v1.3 + - [1.3.10](releases/release-1.3.10.md) + - [1.3.9](releases/release-1.3.9.md) + - [1.3.8](releases/release-1.3.8.md) + - [1.3.7](releases/release-1.3.7.md) + - [1.3.6](releases/release-1.3.6.md) + - [1.3.5](releases/release-1.3.5.md) + - [1.3.4](releases/release-1.3.4.md) + - [1.3.3](releases/release-1.3.3.md) + - [1.3.2](releases/release-1.3.2.md) + - [1.3.1](releases/release-1.3.1.md) + - [1.3 GA](releases/release-1.3.0.md) + - [1.3.0-beta.1](releases/release-1.3.0-beta.1.md) + - v1.2 + - [1.2.7](releases/release-1.2.7.md) + - [1.2.6](releases/release-1.2.6.md) + - [1.2.5](releases/release-1.2.5.md) + - [1.2.4](releases/release-1.2.4.md) + - [1.2.3](releases/release-1.2.3.md) + - [1.2.2](releases/release-1.2.2.md) + - [1.2.1](releases/release-1.2.1.md) + - [1.2 GA](releases/release-1.2.0.md) + - [1.2.0-rc.2](releases/release-1.2.0-rc.2.md) + - [1.2.0-rc.1](releases/release-1.2.0-rc.1.md) + - [1.2.0-beta.2](releases/release-1.2.0-beta.2.md) + - [1.2.0-beta.1](releases/release-1.2.0-beta.1.md) + - [1.2.0-alpha.1](releases/release-1.2.0-alpha.1.md) + - v1.1 + - [1.1.15](releases/release-1.1.15.md) + - [1.1.14](releases/release-1.1.14.md) + - [1.1.13](releases/release-1.1.13.md) + - [1.1.12](releases/release-1.1.12.md) + - [1.1.11](releases/release-1.1.11.md) + - [1.1.10](releases/release-1.1.10.md) + - [1.1.9](releases/release-1.1.9.md) + - [1.1.8](releases/release-1.1.8.md) + - [1.1.7](releases/release-1.1.7.md) + - [1.1.6](releases/release-1.1.6.md) + - [1.1.5](releases/release-1.1.5.md) + - [1.1.4](releases/release-1.1.4.md) + - [1.1.3](releases/release-1.1.3.md) + - [1.1.2](releases/release-1.1.2.md) + - [1.1.1](releases/release-1.1.1.md) + - [1.1 GA](releases/release-1.1-ga.md) + - [1.1.0-rc.4](releases/release-1.1.0-rc.4.md) + - [1.1.0-rc.3](releases/release-1.1.0-rc.3.md) + - [1.1.0-rc.2](releases/release-1.1.0-rc.2.md) + - [1.1.0-rc.1](releases/release-1.1.0-rc.1.md) + - [1.1.0-beta.2](releases/release-1.1.0-beta.2.md) + - [1.1.0-beta.1](releases/release-1.1.0-beta.1.md) + - v1.0 + - [1.0.7](releases/release-1.0.7.md) + - [1.0.6](releases/release-1.0.6.md) + - [1.0.5](releases/release-1.0.5.md) + - [1.0.4](releases/release-1.0.4.md) + - [1.0.3](releases/release-1.0.3.md) + - [1.0.2](releases/release-1.0.2.md) + - [1.0.1](releases/release-1.0.1.md) + - [1.0 GA](releases/release-1.0-ga.md) + - [1.0.0-rc.1](releases/release-1.0.0-rc.1.md) + - [1.0.0-beta.3](releases/release-1.0.0-beta.3.md) + - [1.0.0-beta.2](releases/release-1.0.0-beta.2.md) + - [1.0.0-beta.1-p2](releases/release-1.0.0-beta.1-p2.md) + - [1.0.0-beta.1-p1](releases/release-1.0.0-beta.1-p1.md) + - [1.0.0-beta.1](releases/release-1.0.0-beta.1.md) + - [1.0.0-beta.0](releases/release-1.0.0-beta.0.md) + - v0 + - [0.4.0](releases/release-0.4.0.md) + - [0.3.1](releases/release-0.3.1.md) + - [0.3.0](releases/release-0.3.0.md) + - [0.2.1](releases/release-0.2.1.md) + - [0.2.0](releases/release-0.2.0.md) + - [0.1.0](releases/release-0.1.0.md) diff --git a/test/sync_pr_operator/data/markdown-pages/zh/tidb-in-kubernetes/master/grant-permissions-to-remote-storage.md b/test/sync_pr_operator/data/markdown-pages/zh/tidb-in-kubernetes/master/grant-permissions-to-remote-storage.md new file mode 100644 index 00000000..deab93b7 --- /dev/null +++ b/test/sync_pr_operator/data/markdown-pages/zh/tidb-in-kubernetes/master/grant-permissions-to-remote-storage.md @@ -0,0 +1,205 @@ +--- +title: 远程存储访问授权 +summary: 介绍如何授权访问远程存储。 +--- + +# 远程存储访问授权 + +本文详细描述了如何授权访问远程存储,以实现备份 TiDB 集群数据到远程存储或从远程存储恢复备份数据到 TiDB 集群。 + +## AWS 账号授权 + +在 AWS 云环境中,不同的类型的 Kubernetes 集群提供了不同的权限授予方式。本文分别介绍以下三种权限授予配置方式。 + +### 通过 AccessKey 和 SecretKey 授权 + +AWS 的客户端支持读取进程环境变量中的 `AWS_ACCESS_KEY_ID` 以及 `AWS_SECRET_ACCESS_KEY` 来获取与之相关联的用户或者角色的权限。 + +创建 `s3-secret` secret,在以下命令中使用 AWS 账号的 AccessKey 和 SecretKey 进行授权。该 secret 存放用于访问 S3 兼容存储的凭证。 + +{{< copyable "shell-regular" >}} + +```shell +kubectl create secret generic s3-secret --from-literal=access_key=xxx --from-literal=secret_key=yyy --namespace=test1 +``` + +### 通过 IAM 绑定 Pod 授权 + +通过将用户的 [IAM](https://aws.amazon.com/cn/iam/) 角色与所运行的 Pod 资源进行绑定,使 Pod 中运行的进程获得角色所拥有的权限,这种授权方式是由 [`kube2iam`](https://github.com/jtblin/kube2iam) 提供。 + +> **注意:** +> +> - 使用该授权模式时,可以参考 [kube2iam 文档](https://github.com/jtblin/kube2iam#usage)在 Kubernetes 集群中创建 kube2iam 环境,并且部署 TiDB Operator 以及 TiDB 集群。 +> - 该模式不适用于 [`hostNetwork`](https://kubernetes.io/docs/concepts/policy/pod-security-policy) 网络模式,请确保参数 `spec.tikv.hostNetwork` 的值为 `false`。 + +1. 创建 IAM 角色: + + 可以参考 [AWS 官方文档](https://docs.aws.amazon.com/IAM/latest/UserGuide/id_users_create.html)来为账号创建一个 IAM 角色,并且通过 [AWS 官方文档](https://docs.aws.amazon.com/IAM/latest/UserGuide/access_policies_manage-attach-detach.html)为 IAM 角色赋予需要的权限。由于 `Backup` 需要访问 AWS 的 S3 存储,所以这里给 IAM 赋予了 `AmazonS3FullAccess` 的权限。 + + 如果是进行基于 AWS Elastic Block Store (EBS) 快照的备份和恢复,除完整的 S3 权限 `AmazonS3FullAccess` 外,还需要以下权限: + + ```json + { + "Effect": "Allow", + "Action": [ + "ec2:AttachVolume", + "ec2:CreateSnapshot", + "ec2:CreateSnapshots", + "ec2:CreateTags", + "ec2:CreateVolume", + "ec2:DeleteSnapshot", + "ec2:DeleteTags", + "ec2:DeleteVolume", + "ec2:DescribeInstances", + "ec2:DescribeSnapshots", + "ec2:DescribeTags", + "ec2:DescribeVolumes", + "ec2:DetachVolume", + "ebs:ListSnapshotBlocks", + "ebs:ListChangedBlocks" + ], + "Resource": "*" + } + ``` + +2. 绑定 IAM 到 TiKV Pod: + + 在使用 BR 备份的过程中,TiKV Pod 和 BR Pod 一样需要对 S3 存储进行读写操作,所以这里需要给 TiKV Pod 打上 annotation 来绑定 IAM 角色。 + + {{< copyable "shell-regular" >}} + + ```shell + kubectl patch tc demo1 -n test1 --type merge -p '{"spec":{"tikv":{"annotations":{"iam.amazonaws.com/role":"arn:aws:iam::123456789012:role/user"}}}}' + ``` + + 等到 TiKV Pod 重启后,查看 Pod 是否加上了这个 annotation。 + +> **注意:** +> +> `arn:aws:iam::123456789012:role/user` 为步骤 1 中创建的 IAM 角色。 + +### 通过 IAM 绑定 ServiceAccount 授权 + +通过将用户的 [IAM](https://aws.amazon.com/cn/iam/) 角色与 Kubeneters 中的 [`serviceAccount`](https://kubernetes.io/docs/reference/access-authn-authz/admission-controllers/#serviceaccount) 资源进行绑定, 从而使得使用该 ServiceAccount 账号的 Pod 都拥有该角色所拥有的权限,这种授权方式由 [`EKS Pod Identity Webhook`](https://github.com/aws/amazon-eks-pod-identity-webhook) 服务提供。 + +使用该授权模式时,可以参考 [AWS 官方文档](https://docs.aws.amazon.com/zh_cn/eks/latest/userguide/create-cluster.html)创建 EKS 集群,并且部署 TiDB Operator 以及 TiDB 集群。 + +1. 在集群上为服务帐户启用 IAM 角色: + + 可以参考 [AWS 官方文档](https://docs.aws.amazon.com/eks/latest/userguide/enable-iam-roles-for-service-accounts.html)开启所在的 EKS 集群的 IAM 角色授权。 + +2. 创建 IAM 角色: + + 可以参考 [AWS 官方文档](https://docs.aws.amazon.com/eks/latest/userguide/associate-service-account-role.html)创建一个 IAM 角色,为角色赋予 `AmazonS3FullAccess` 的权限,并且编辑角色的 `Trust relationships`,赋予 tidb-backup-manager 使用此 IAM 角色的权限。 + + 如果是进行基于 AWS EBS 快照的备份和恢复,除完整的 S3 权限 `AmazonS3FullAccess` 外,还需要以下权限: + + {{< copyable "shell-regular" >}} + + ```json + { + "Effect": "Allow", + "Action": [ + "ec2:AttachVolume", + "ec2:CreateSnapshot", + "ec2:CreateSnapshots", + "ec2:CreateTags", + "ec2:CreateVolume", + "ec2:DeleteSnapshot", + "ec2:DeleteTags", + "ec2:DeleteVolume", + "ec2:DescribeInstances", + "ec2:DescribeSnapshots", + "ec2:DescribeTags", + "ec2:DescribeVolumes", + "ec2:DetachVolume", + "ebs:ListSnapshotBlocks", + "ebs:ListChangedBlocks" + ], + "Resource": "*" + } + ``` + + 同时编辑角色的 `Trust relationships`,赋予 tidb-controller-manager 使用此 IAM 角色的权限。 + +3. 绑定 IAM 到 ServiceAccount 资源上: + + {{< copyable "shell-regular" >}} + + ```shell + kubectl annotate sa tidb-backup-manager eks.amazonaws.com/role-arn=arn:aws:iam::123456789012:role/user --namespace=test1 + ``` + + 如果是进行基于 AWS EBS 快照的备份和恢复,需要绑定 IAM 到 tidb-controller-manager 的 ServiceAccount 上: + + ```shell + kubectl annotate sa tidb-controller-manager eks.amazonaws.com/role-arn=arn:aws:iam::123456789012:role/user --namespace=tidb-admin + ``` + + 重启 TiDB Operator 的 tidb-controller-manager Pod,使配置的 ServiceAccount 生效。 + +4. 将 ServiceAccount 绑定到 TiKV Pod: + + {{< copyable "shell-regular" >}} + + ```shell + kubectl patch tc demo1 -n test1 --type merge -p '{"spec":{"tikv":{"serviceAccount": "tidb-backup-manager"}}}' + ``` + + 将 `spec.tikv.serviceAccount` 修改为 tidb-backup-manager,等到 TiKV Pod 重启后,查看 Pod 的 `serviceAccountName` 是否有变化。 + +> **注意:** +> +> `arn:aws:iam::123456789012:role/user` 为步骤 2 中创建的 IAM 角色。 + +## GCS 账号授权 + +### 通过服务账号密钥授权 + +创建 `gcs-secret` secret。该 secret 存放用于访问 GCS 的凭证。`google-credentials.json` 文件存放用户从 Google Cloud console 上下载的 service account key。具体操作参考 [Google Cloud 官方文档](https://cloud.google.com/docs/authentication/getting-started)。 + +{{< copyable "shell-regular" >}} + +```shell +kubectl create secret generic gcs-secret --from-file=credentials=./google-credentials.json -n test1 +``` + +## Azure 账号授权 + +在 Azure 云环境中,不同的类型的 Kubernetes 集群提供了不同的权限授予方式。本文分别介绍以下两种权限授予配置方式。 + +### 通过访问密钥授权 + +Azure 的客户端支持读取进程环境变量中的 `AZURE_STORAGE_ACCOUNT` 以及 `AZURE_STORAGE_KEY` 来获取与之相关联的用户或者角色的权限。 + +创建 `azblob-secret` secret,在以下命令中使用 Azure 账号的访问密钥进行授权。该 secret 存放用于访问 Azure Blob Storage 的凭证。 + +{{< copyable "shell-regular" >}} + +```shell +kubectl create secret generic azblob-secret --from-literal=AZURE_STORAGE_ACCOUNT=xxx --from-literal=AZURE_STORAGE_KEY=yyy --namespace=test1 +``` + +### 通过 Azure AD 授权 + +Azure 的客户端支持读取进程环境变量中的 `AZURE_STORAGE_ACCOUNT`、`AZURE_CLIENT_ID`、`AZURE_TENANT_ID`、`AZURE_CLIENT_SECRET` 来获取与之相关联的用户或者角色的权限。 + +1. 创建 `azblob-secret-ad` secret,在以下命令中使用 Azure 账号的 AD 进行授权。该 secret 存放用于访问 Azure Blob Storage 的凭证。 + + {{< copyable "shell-regular" >}} + + ```shell + kubectl create secret generic azblob-secret-ad --from-literal=AZURE_STORAGE_ACCOUNT=xxx --from-literal=AZURE_CLIENT_ID=yyy --from- literal=AZURE_TENANT_ID=zzz --from-literal=AZURE_CLIENT_SECRET=aaa --namespace=test1 + ``` + +2. 绑定 secret 到 TiKV Pod: + + 在使用 BR 备份的过程中,TiKV Pod 和 BR Pod 一样需要对 Azure Blob Storage 进行读写操作,所以这里需要给 TiKV Pod 绑定 secret。 + + {{< copyable "shell-regular" >}} + + ```shell + kubectl patch tc demo1 -n test1 --type merge -p '{"spec":{"tikv":{"envFrom":[{"secretRef":{"name":"azblob-secret-ad"}}]}}}' + ``` + + 等到 TiKV Pod 重启后,查看 Pod 是否加上了这些环境变量。 diff --git a/test/sync_pr_operator/data/markdown-pages/zh/tidb-in-kubernetes/master/releases/release-1.5.0.md b/test/sync_pr_operator/data/markdown-pages/zh/tidb-in-kubernetes/master/releases/release-1.5.0.md new file mode 100644 index 00000000..6804a9a2 --- /dev/null +++ b/test/sync_pr_operator/data/markdown-pages/zh/tidb-in-kubernetes/master/releases/release-1.5.0.md @@ -0,0 +1,41 @@ +--- +title: TiDB Operator 1.5.0 Release Notes +summary: 了解 TiDB Operator 1.5.0 版本的新功能、优化提升,以及 Bug 修复。 +--- + +# TiDB Operator 1.5.0 Release Notes + +发布日期: 2023 年 8 月 4 日 + +TiDB Operator 版本:1.5.0 + +## 滚动升级改动 + +由于 [#5075](https://github.com/pingcap/tidb-operator/pull/5075) 的改动,如果 TiDB v7.1.0 或以上版本的集群中部署了 TiFlash,升级 TiDB Operator 到 v1.5.0 之后 TiFlash 组件会滚动升级。 + +## 新功能 + +- 新增 BR Federation Manager 组件,支持跨多个 Kubernetes 集群编排 `Backup` 和 `Restore` custom resources (CR) ([#4996](https://github.com/pingcap/tidb-operator/pull/4996), [@csuzhangxc](https://github.com/csuzhangxc)) +- 支持使用 `VolumeBackup` CR 对跨多个 Kubernetes 部署的 TiDB 集群进行基于 EBS 快照的备份 ([#5013](https://github.com/pingcap/tidb-operator/pull/5013), [@WangLe1321](https://github.com/WangLe1321)) +- 支持使用 `VolumeRestore` CR 对跨多个 Kubernetes 部署的 TiDB 集群进行基于 EBS 快照的恢复 ([#5039](https://github.com/pingcap/tidb-operator/pull/5039), [@WangLe1321](https://github.com/WangLe1321)) +- 支持使用 `VolumeBackupSchedule` CR 对跨多个 Kubernetes 部署的 TiDB 集群进行基于 EBS 快照的自动备份 ([#5036](https://github.com/pingcap/tidb-operator/pull/5036), [@BornChanger](https://github.com/BornChanger)) +- 当对跨多个 Kubernetes 部署的 TiDB 集群进行基于 EBS 快照的备份时,支持备份与 `TidbCluster` 相关的 CR 数据 ([#5207](https://github.com/pingcap/tidb-operator/pull/5207), [@WangLe1321](https://github.com/WangLe1321)) + +## 优化提升 + +- 为 DM master 添加 `startUpScriptVersion` 字段,支持设置启动脚本的版本 ([#4971](https://github.com/pingcap/tidb-operator/pull/4971), [@hanlins](https://github.com/hanlins)) +- 为 DmCluster、TidbDashboard、TidbMonitor 以及 TidbNGMonitoring 增加 `spec.preferIPv6` 支持 ([#4977](https://github.com/pingcap/tidb-operator/pull/4977), [@KanShiori](https://github.com/KanShiori)) +- 支持为 TiKV 驱逐 leader 和 PD 转移 leader 设置过期时间 ([#4997](https://github.com/pingcap/tidb-operator/pull/4997), [@Tema](https://github.com/Tema)) +- 支持为 `TidbInitializer` 设置 tolerations ([#5047](https://github.com/pingcap/tidb-operator/pull/5047), [@csuzhangxc](https://github.com/csuzhangxc)) +- 支持为 PD 设置启动超时时间 ([#5071](https://github.com/pingcap/tidb-operator/pull/5071), [@oliviachenairbnb](https://github.com/oliviachenairbnb)) +- 当 TiKV 在扩展 PVC 的大小时,不再执行驱逐 leader 操作,避免因磁盘容量不足而造成驱逐卡住 ([#5101](https://github.com/pingcap/tidb-operator/pull/5101), [@csuzhangxc](https://github.com/csuzhangxc)) +- 支持更新 PD、TiKV、TiFlash、TiProxy、DM-Master 与 DM-worker 组件 Service 的 annotation 与 label ([#4973](https://github.com/pingcap/tidb-operator/pull/4973), [@wxiaomou](https://github.com/wxiaomou)) +- 默认启用 volume resize,支持对 PV 的扩容 ([#5167](https://github.com/pingcap/tidb-operator/pull/5167), [@liubog2008](https://github.com/liubog2008)) + +## Bug 修复 + +- 修复升级 TiKV 时由于部分 store 下线而造成 quorum 丢失的问题 ([#4979](https://github.com/pingcap/tidb-operator/pull/4979), [@Tema](https://github.com/Tema)) +- 修复升级 PD 时由于部分 member 下线而造成 quorum 丢失的问题 ([#4995](https://github.com/pingcap/tidb-operator/pull/4995), [@Tema](https://github.com/Tema)) +- 修复 TiDB Operator 在未配置任何 Kubernetes 集群级别权限时 panic 的问题 ([#5058](https://github.com/pingcap/tidb-operator/pull/5058), [@liubog2008](https://github.com/liubog2008)) +- 修复在 `TidbCluster` CR 中设置 `AdditionalVolumeMounts` 时 TiDB Operator 可能 panic 的问题 ([#5058](https://github.com/pingcap/tidb-operator/pull/5058), [@liubog2008](https://github.com/liubog2008)) +- 修复 `TidbDashboard` CR 在使用自定义的 image registry 时解析 `baseImage` 错误的问题 ([#5014](https://github.com/pingcap/tidb-operator/pull/5014), [@linkinghack](https://github.com/linkinghack)) diff --git a/test/sync_pr_operator/data/markdown-pages/zh/tidb-in-kubernetes/master/tidb-operator-overview.md b/test/sync_pr_operator/data/markdown-pages/zh/tidb-in-kubernetes/master/tidb-operator-overview.md new file mode 100644 index 00000000..fb34f208 --- /dev/null +++ b/test/sync_pr_operator/data/markdown-pages/zh/tidb-in-kubernetes/master/tidb-operator-overview.md @@ -0,0 +1,71 @@ +--- +title: TiDB Operator 简介 +summary: 介绍 TiDB Operator 的整体架构及使用方式。 +aliases: ['/docs-cn/tidb-in-kubernetes/dev/tidb-operator-overview/'] +--- + +# TiDB Operator 简介 + +[TiDB Operator](https://github.com/pingcap/tidb-operator) 是 Kubernetes 上的 TiDB 集群自动运维系统,提供包括部署、升级、扩缩容、备份恢复、配置变更的 TiDB 全生命周期管理。借助 TiDB Operator,TiDB 可以无缝运行在公有云或自托管的 Kubernetes 集群上。 + +TiDB Operator 与适用的 TiDB 版本的对应关系如下: + +| TiDB 版本 | 适用的 TiDB Operator 版本 | +|:---|:---| +| dev | dev | +| TiDB >= 7.1 | 1.5(推荐),1.4 | +| 6.5 <= TiDB < 7.1 | 1.5, 1.4(推荐),1.3 | +| 5.4 <= TiDB < 6.5 | 1.4, 1.3(推荐) | +| 5.1 <= TiDB < 5.4 | 1.4,1.3(推荐),1.2 | +| 3.0 <= TiDB < 5.1 | 1.4,1.3(推荐),1.2,1.1 | +| 2.1 <= TiDB < v3.0| 1.0(停止维护) | + +## 使用 TiDB Operator 管理 TiDB 集群 + +TiDB Operator 提供了多种方式来部署 Kubernetes 上的 TiDB 集群: + ++ 测试环境: + + - [kind](get-started.md#方法一使用-kind-创建-kubernetes-集群) + - [Minikube](get-started.md#方法二使用-minikube-创建-kubernetes-集群) + - [Google Cloud Shell](https://console.cloud.google.com/cloudshell/open?cloudshell_git_repo=https://github.com/pingcap/docs-tidb-operator&cloudshell_tutorial=zh/deploy-tidb-from-kubernetes-gke.md) + ++ 生产环境: + + - 在公有云上部署生产可用的 TiDB 集群并进行后续的运维管理; + + - [在 AWS EKS 上部署 TiDB 集群](deploy-on-aws-eks.md) + - [在 Google Cloud GKE 上部署 TiDB 集群](deploy-on-gcp-gke.md) + - [在 Azure AKS 上部署 TiDB 集群](deploy-on-azure-aks.md) + - [在阿里云 ACK 上部署 TiDB 集群](deploy-on-alibaba-cloud.md) + + - 在自托管的 Kubernetes 集群中部署 TiDB 集群: + + 首先按照[部署 TiDB Operator](deploy-tidb-operator.md)在集群中安装 TiDB Operator,再根据[在标准 Kubernetes 集群上部署 TiDB 集群](deploy-on-general-kubernetes.md)来部署你的 TiDB 集群。对于生产级 TiDB 集群,你还需要参考 [TiDB 集群环境要求](prerequisites.md)调整 Kubernetes 集群配置并根据[本地 PV 配置](configure-storage-class.md#本地-pv-配置)为你的 Kubernetes 集群配置本地 PV,以满足 TiKV 的低延迟本地存储需求。 + +在任何环境上部署前,都可以参考 [TiDB 集群配置](configure-a-tidb-cluster.md)来自定义 TiDB 配置。 + +部署完成后,你可以参考下面的文档进行 Kubernetes 上 TiDB 集群的使用和运维: + ++ [部署 TiDB 集群](deploy-on-general-kubernetes.md) ++ [访问 TiDB 集群](access-tidb.md) ++ [TiDB 集群扩缩容](scale-a-tidb-cluster.md) ++ [TiDB 集群升级](upgrade-a-tidb-cluster.md) ++ [TiDB 集群配置变更](configure-a-tidb-cluster.md) ++ [TiDB 集群备份与恢复](backup-restore-overview.md) ++ [配置 TiDB 集群故障自动转移](use-auto-failover.md) ++ [监控 TiDB 集群](monitor-a-tidb-cluster.md) ++ [查看 TiDB 日志](view-logs.md) ++ [维护 TiDB 所在的 Kubernetes 节点](maintain-a-kubernetes-node.md) + +当集群出现问题需要进行诊断时,你可以: + ++ 查阅 [Kubernetes 上的 TiDB FAQ](faq.md) 寻找是否存在现成的解决办法; ++ 参考 [Kubernetes 上的 TiDB 故障诊断](tips.md)解决故障。 + +Kubernetes 上的 TiDB 提供了专用的命令行工具 `tkctl` 用于集群管理和辅助诊断,同时,在 Kubernetes 上,TiDB 的部分生态工具的使用方法也有所不同,你可以: + ++ 参考 [`tkctl` 使用指南](use-tkctl.md) 来使用 `tkctl`; ++ 参考 [Kubernetes 上的 TiDB 相关工具使用指南](tidb-toolkit.md)来了解 TiDB 生态工具在 Kubernetes 上的使用方法。 + +最后,当 TiDB Operator 发布新版本时,你可以参考[升级 TiDB Operator](upgrade-tidb-operator.md) 进行版本更新。 diff --git a/test/sync_pr_operator/data/markdown-pages/zh/tidb-in-kubernetes/master/whats-new-in-v1.5.md b/test/sync_pr_operator/data/markdown-pages/zh/tidb-in-kubernetes/master/whats-new-in-v1.5.md new file mode 100644 index 00000000..595d35c8 --- /dev/null +++ b/test/sync_pr_operator/data/markdown-pages/zh/tidb-in-kubernetes/master/whats-new-in-v1.5.md @@ -0,0 +1,31 @@ +--- +title: TiDB Operator v1.5 新特性 +Summary: 了解 TiDB Operator 1.5.0 版本引入的新特性。 +--- + +# TiDB Operator v1.5 新特性 + +TiDB Operator v1.5 引入了以下关键特性,从扩展性、易用性等方面帮助你更轻松地管理 TiDB 集群及其周边工具。 + +## 兼容性改动 + +如需使用在 [#4959](https://github.com/pingcap/tidb-operator/pull/4959) 中引入的 `PreferDualStack` 特性(通过 `spec.preferIPv6: true` 启用),Kubernetes 版本需要大于等于 v1.20。 + +## 滚动升级改动 + +由于 [#5075](https://github.com/pingcap/tidb-operator/pull/5075) 的改动,如果 TiDB v7.1.0 或以上版本的集群中部署了 TiFlash,升级 TiDB Operator 到 v1.5.0 之后 TiFlash 组件会滚动升级。 + +## 扩展性 + +- 支持通过 `bootstrapSQLConfigMapName` 字段指定 TiDB 首次启动时所执行的初始 SQL 文件。 +- 支持通过配置 `spec.preferIPv6: true` 为所有组件的 Service 的 `ipFamilyPolicy` 配置 `PreferDualStack`。 +- 支持使用 [Advanced StatefulSet](advanced-statefulset.md) 管理 TiCDC 和 TiProxy。 +- 新增 BR Federation Manager 组件,支持对跨多个 Kubernetes 部署的 TiDB 集群进行基于 EBS snapshot 的备份恢复。 + +## 易用性 + +- 支持通过为 PD Pod 加上 `tidb.pingcap.com/pd-transfer-leader` annotation 来优雅重启 PD Pod。 +- 支持通过为 TiDB Pod 加上 `tidb.pingcap.com/tidb-graceful-shutdown` annotation 来优雅重启 TiDB Pod。 +- 允许用户自定义策略来重启失败的备份任务,以提高备份的稳定性。 +- 添加与 reconciler 和 worker queue 相关的监控指标以提高可观测性。 +- 添加统计协调流程失败计数的监控指标以提高可观测性。 diff --git a/test_config.toml b/test_config.toml index 1dd34531..1ec0247c 100644 --- a/test_config.toml +++ b/test_config.toml @@ -8,3 +8,32 @@ test_target = "sync_scaffold.sh" name = "Sync scaffold from a commit" args = "265874160aec258f9c725b0e940bc803ca558bda" directory = "test/sync_scaffold/" + +[sync_pr] + +diff_command = "diff -qrs data actual --exclude temp --exclude '*.log' --exclude sync_pr.sh" +test_target = "sync_pr.sh" + +[[sync_pr.test_cases]] + +name = "Sync markdown-pages from a TiDB Cloud PR" +args = "preview-cloud/pingcap/docs/10098" +directory = "test/sync_pr_cloud/" + +[[sync_pr.test_cases]] + +name = "Sync markdown-pages from a docs PR" +args = "preview/pingcap/docs/12929" +directory = "test/sync_pr_docs/" + +[[sync_pr.test_cases]] + +name = "Sync markdown-pages from a docs-cn PR" +args = "preview/pingcap/docs-cn/14523" +directory = "test/sync_pr_docs_cn/" + +[[sync_pr.test_cases]] + +name = "Sync markdown-pages from a TiDB Operator PR" +args = "preview-operator/pingcap/docs-tidb-operator/2397" +directory = "test/sync_pr_operator/"