diff --git a/docs/en/connector-v2/sink/Mysql.md b/docs/en/connector-v2/sink/Mysql.md index 6c01c35ee8c..f453a60c4e9 100644 --- a/docs/en/connector-v2/sink/Mysql.md +++ b/docs/en/connector-v2/sink/Mysql.md @@ -122,7 +122,7 @@ transform { sink { jdbc { - url = "jdbc:mysql://localhost:3306/test" + url = "jdbc:mysql://localhost:3306/test?useUnicode=true&characterEncoding=UTF-8&rewriteBatchedStatements=true" driver = "com.mysql.cj.jdbc.Driver" user = "root" password = "123456" @@ -140,7 +140,7 @@ sink { ``` sink { jdbc { - url = "jdbc:mysql://localhost:3306/test" + url = "jdbc:mysql://localhost:3306/test?useUnicode=true&characterEncoding=UTF-8&rewriteBatchedStatements=true" driver = "com.mysql.cj.jdbc.Driver" user = "root" password = "123456" @@ -159,7 +159,7 @@ sink { ``` sink { jdbc { - url = "jdbc:mysql://localhost:3306/test" + url = "jdbc:mysql://localhost:3306/test?useUnicode=true&characterEncoding=UTF-8&rewriteBatchedStatements=true" driver = "com.mysql.cj.jdbc.Driver" max_retries = 0 @@ -181,7 +181,7 @@ sink { ``` sink { jdbc { - url = "jdbc:mysql://localhost:3306/test" + url = "jdbc:mysql://localhost:3306/test?useUnicode=true&characterEncoding=UTF-8&rewriteBatchedStatements=true" driver = "com.mysql.cj.jdbc.Driver" user = "root" password = "123456" diff --git a/docs/en/connector-v2/source/Clickhouse.md b/docs/en/connector-v2/source/Clickhouse.md index 7596bf72a8f..d70a8f0e33f 100644 --- a/docs/en/connector-v2/source/Clickhouse.md +++ b/docs/en/connector-v2/source/Clickhouse.md @@ -66,7 +66,7 @@ The following example demonstrates how to create a data synchronization job that ```bash # Set the basic configuration of the task to be performed env { - execution.parallelism = 1 + execution.parallelism = 10 job.mode = "BATCH" } diff --git a/docs/en/connector-v2/source/Jdbc.md b/docs/en/connector-v2/source/Jdbc.md index d82df87a02e..585c2bc0024 100644 --- a/docs/en/connector-v2/source/Jdbc.md +++ b/docs/en/connector-v2/source/Jdbc.md @@ -145,15 +145,25 @@ Jdbc { parallel: ``` -Jdbc { - url = "jdbc:mysql://localhost/test?serverTimezone=GMT%2b8" - driver = "com.mysql.cj.jdbc.Driver" - connection_check_timeout_sec = 100 - user = "root" - password = "123456" - query = "select * from type_bin" - partition_column = "id" - partition_num = 10 +env { + execution.parallelism = 10 + job.mode = "BATCH" +} +source { + Jdbc { + url = "jdbc:mysql://localhost/test?serverTimezone=GMT%2b8" + driver = "com.mysql.cj.jdbc.Driver" + connection_check_timeout_sec = 100 + user = "root" + password = "123456" + query = "select * from type_bin" + partition_column = "id" + partition_num = 10 + } +} + +sink { + Console {} } ``` diff --git a/docs/en/connector-v2/source/MongoDB.md b/docs/en/connector-v2/source/MongoDB.md index 137fb205b8c..d63d303fa24 100644 --- a/docs/en/connector-v2/source/MongoDB.md +++ b/docs/en/connector-v2/source/MongoDB.md @@ -283,6 +283,10 @@ By utilizing `flat.sync-string`, only one field attribute value can be set, and This operation will perform a string mapping on a single MongoDB data entry. ```bash +env { + execution.parallelism = 10 + job.mode = "BATCH" +} source { MongoDB { uri = "mongodb://user:password@127.0.0.1:27017" @@ -296,6 +300,9 @@ source { } } } +sink { + Console {} +} ``` Use the data samples synchronized with modified parameters, such as the following: diff --git a/docs/en/connector-v2/source/Mysql.md b/docs/en/connector-v2/source/Mysql.md index 001ef1463da..bdac5c0aec6 100644 --- a/docs/en/connector-v2/source/Mysql.md +++ b/docs/en/connector-v2/source/Mysql.md @@ -94,7 +94,7 @@ env { } source{ Jdbc { - url = "jdbc:mysql://localhost:3306/test?serverTimezone=GMT%2b8" + url = "jdbc:mysql://localhost:3306/test?serverTimezone=GMT%2b8&useUnicode=true&characterEncoding=UTF-8&rewriteBatchedStatements=true" driver = "com.mysql.cj.jdbc.Driver" connection_check_timeout_sec = 100 user = "root" @@ -118,9 +118,13 @@ sink { > Read your query table in parallel with the shard field you configured and the shard data You can do this if you want to read the whole table ``` +env { + execution.parallelism = 10 + job.mode = "BATCH" +} source { Jdbc { - url = "jdbc:mysql://localhost:3306/test?serverTimezone=GMT%2b8" + url = "jdbc:mysql://localhost:3306/test?serverTimezone=GMT%2b8&useUnicode=true&characterEncoding=UTF-8&rewriteBatchedStatements=true" driver = "com.mysql.cj.jdbc.Driver" connection_check_timeout_sec = 100 user = "root" @@ -133,6 +137,9 @@ source { partition_num = 10 } } +sink { + Console {} +} ``` ### Parallel Boundary: @@ -142,7 +149,7 @@ source { ``` source { Jdbc { - url = "jdbc:mysql://localhost:3306/test?serverTimezone=GMT%2b8" + url = "jdbc:mysql://localhost:3306/test?serverTimezone=GMT%2b8&useUnicode=true&characterEncoding=UTF-8&rewriteBatchedStatements=true" driver = "com.mysql.cj.jdbc.Driver" connection_check_timeout_sec = 100 user = "root" diff --git a/docs/en/connector-v2/source/OceanBase.md b/docs/en/connector-v2/source/OceanBase.md index bd035793eee..434e25284dd 100644 --- a/docs/en/connector-v2/source/OceanBase.md +++ b/docs/en/connector-v2/source/OceanBase.md @@ -127,6 +127,10 @@ sink { > Read your query table in parallel with the shard field you configured and the shard data. You can do this if you want to read the whole table ``` +env { + execution.parallelism = 10 + job.mode = "BATCH" +} source { Jdbc { driver = "com.oceanbase.jdbc.Driver" @@ -141,6 +145,9 @@ source { partition_num = 10 } } +sink { + Console {} +} ``` ### Parallel Boundary: diff --git a/docs/en/connector-v2/source/Oracle.md b/docs/en/connector-v2/source/Oracle.md index 385d55ca9e5..f191cda9d99 100644 --- a/docs/en/connector-v2/source/Oracle.md +++ b/docs/en/connector-v2/source/Oracle.md @@ -111,6 +111,10 @@ sink { > Read your query table in parallel with the shard field you configured and the shard data You can do this if you want to read the whole table ``` +env { + execution.parallelism = 10 + job.mode = "BATCH" +} source { Jdbc { url = "jdbc:oracle:thin:@datasource01:1523:xe" @@ -126,6 +130,9 @@ source { partition_num = 10 } } +sink { + Console {} +} ``` ### Parallel Boundary: diff --git a/docs/en/connector-v2/source/PostgreSQL.md b/docs/en/connector-v2/source/PostgreSQL.md index 50839780726..63ddbc25ecf 100644 --- a/docs/en/connector-v2/source/PostgreSQL.md +++ b/docs/en/connector-v2/source/PostgreSQL.md @@ -120,6 +120,10 @@ sink { > Read your query table in parallel with the shard field you configured and the shard data You can do this if you want to read the whole table ``` +env { + execution.parallelism = 10 + job.mode = "BATCH" +} source{ jdbc{ url = "jdbc:postgresql://localhost:5432/test" @@ -131,6 +135,9 @@ source{ partition_num = 5 } } +sink { + Console {} +} ``` ### Parallel Boundary: