From 9ef06545460bd1018fbf9c2e5d64e6e7e6705036 Mon Sep 17 00:00:00 2001 From: Grace Cai Date: Fri, 7 Mar 2025 17:48:31 +0800 Subject: [PATCH] This is an automated cherry-pick of #20462 Signed-off-by: ti-chi-bot --- benchmark/benchmark-sysbench-v5-vs-v4.md | 223 ------------------ .../benchmark-sysbench-v5.1.0-vs-v5.0.2.md | 186 --------------- .../benchmark-sysbench-v5.2.0-vs-v5.1.1.md | 186 --------------- .../benchmark-sysbench-v5.3.0-vs-v5.2.2.md | 203 ---------------- .../benchmark-sysbench-v5.4.0-vs-v5.3.0.md | 203 ---------------- .../benchmark-sysbench-v6.0.0-vs-v5.4.0.md | 199 ---------------- .../benchmark-sysbench-v6.1.0-vs-v6.0.0.md | 199 ---------------- .../benchmark-sysbench-v6.2.0-vs-v6.1.0.md | 199 ---------------- ...v5.0-performance-benchmarking-with-tpcc.md | 149 ------------ ...v5.1-performance-benchmarking-with-tpcc.md | 93 -------- ...v5.2-performance-benchmarking-with-tpcc.md | 93 -------- ...v5.3-performance-benchmarking-with-tpcc.md | 129 ---------- ...v5.4-performance-benchmarking-with-tpcc.md | 129 ---------- ...v5.4-performance-benchmarking-with-tpch.md | 128 ---------- ...v6.0-performance-benchmarking-with-tpcc.md | 125 ---------- ...v6.0-performance-benchmarking-with-tpch.md | 8 - ...v6.1-performance-benchmarking-with-tpcc.md | 124 ---------- ...v6.1-performance-benchmarking-with-tpch.md | 8 - ...v6.2-performance-benchmarking-with-tpcc.md | 124 ---------- ...v6.2-performance-benchmarking-with-tpch.md | 8 - faq/manage-cluster-faq.md | 25 +- .../information-schema-inspection-result.md | 1 - tidb-troubleshooting-map.md | 8 +- tikv-control.md | 10 +- 24 files changed, 16 insertions(+), 2744 deletions(-) delete mode 100644 benchmark/benchmark-sysbench-v5-vs-v4.md delete mode 100644 benchmark/benchmark-sysbench-v5.1.0-vs-v5.0.2.md delete mode 100644 benchmark/benchmark-sysbench-v5.2.0-vs-v5.1.1.md delete mode 100644 benchmark/benchmark-sysbench-v5.3.0-vs-v5.2.2.md delete mode 100644 benchmark/benchmark-sysbench-v5.4.0-vs-v5.3.0.md delete mode 100644 benchmark/benchmark-sysbench-v6.0.0-vs-v5.4.0.md delete mode 100644 benchmark/benchmark-sysbench-v6.1.0-vs-v6.0.0.md delete mode 100644 benchmark/benchmark-sysbench-v6.2.0-vs-v6.1.0.md delete mode 100644 benchmark/v5.0-performance-benchmarking-with-tpcc.md delete mode 100644 benchmark/v5.1-performance-benchmarking-with-tpcc.md delete mode 100644 benchmark/v5.2-performance-benchmarking-with-tpcc.md delete mode 100644 benchmark/v5.3-performance-benchmarking-with-tpcc.md delete mode 100644 benchmark/v5.4-performance-benchmarking-with-tpcc.md delete mode 100644 benchmark/v5.4-performance-benchmarking-with-tpch.md delete mode 100644 benchmark/v6.0-performance-benchmarking-with-tpcc.md delete mode 100644 benchmark/v6.0-performance-benchmarking-with-tpch.md delete mode 100644 benchmark/v6.1-performance-benchmarking-with-tpcc.md delete mode 100644 benchmark/v6.1-performance-benchmarking-with-tpch.md delete mode 100644 benchmark/v6.2-performance-benchmarking-with-tpcc.md delete mode 100644 benchmark/v6.2-performance-benchmarking-with-tpch.md diff --git a/benchmark/benchmark-sysbench-v5-vs-v4.md b/benchmark/benchmark-sysbench-v5-vs-v4.md deleted file mode 100644 index 87900a3a21222..0000000000000 --- a/benchmark/benchmark-sysbench-v5-vs-v4.md +++ /dev/null @@ -1,223 +0,0 @@ ---- -title: TiDB Sysbench Performance Test Report -- v5.0 vs. v4.0 -summary: TiDB v5.0 outperforms v4.0 in Sysbench performance tests. Point Select performance improved by 2.7%, Update Non-index by 81%, Update Index by 28%, and Read Write by 9%. The test aimed to compare performance in the OLTP scenario using AWS EC2. Test results were presented in tables and graphs. ---- - -# TiDB Sysbench Performance Test Report -- v5.0 vs. v4.0 - -## Test purpose - -This test aims at comparing the Sysbench performance of TiDB v5.0 and TiDB v4.0 in the Online Transactional Processing (OLTP) scenario. - -## Test environment (AWS EC2) - -### Hardware configuration - -| Service type | EC2 type | Instance count | -|:----------|:----------|:----------| -| PD | m5.xlarge | 3 | -| TiKV | i3.4xlarge| 3 | -| TiDB | c5.4xlarge| 3 | -| Sysbench | c5.9xlarge| 1 | - -### Software version - -| Service type | Software version | -|:----------|:-----------| -| PD | 4.0 and 5.0 | -| TiDB | 4.0 and 5.0 | -| TiKV | 4.0 and 5.0 | -| Sysbench | 1.0.20 | - -### Parameter configuration - -#### TiDB v4.0 configuration - -{{< copyable "" >}} - -```yaml -log.level: "error" -performance.max-procs: 20 -prepared-plan-cache.enabled: true -tikv-client.max-batch-wait-time: 2000000 -``` - -#### TiKV v4.0 configuration - -{{< copyable "" >}} - -```yaml -storage.scheduler-worker-pool-size: 5 -raftstore.store-pool-size: 3 -raftstore.apply-pool-size: 3 -rocksdb.max-background-jobs: 3 -raftdb.max-background-jobs: 3 -raftdb.allow-concurrent-memtable-write: true -server.grpc-concurrency: 6 -readpool.unified.min-thread-count: 5 -readpool.unified.max-thread-count: 20 -readpool.storage.normal-concurrency: 10 -pessimistic-txn.pipelined: true -``` - -#### TiDB v5.0 configuration - -{{< copyable "" >}} - -```yaml -log.level: "error" -performance.max-procs: 20 -prepared-plan-cache.enabled: true -tikv-client.max-batch-wait-time: 2000000 -``` - -#### TiKV v5.0 configuration - -{{< copyable "" >}} - -```yaml -storage.scheduler-worker-pool-size: 5 -raftstore.store-pool-size: 3 -raftstore.apply-pool-size: 3 -rocksdb.max-background-jobs: 8 -raftdb.max-background-jobs: 4 -raftdb.allow-concurrent-memtable-write: true -server.grpc-concurrency: 6 -readpool.unified.min-thread-count: 5 -readpool.unified.max-thread-count: 20 -readpool.storage.normal-concurrency: 10 -pessimistic-txn.pipelined: true -server.enable-request-batch: false -``` - -#### TiDB v4.0 global variable configuration - -{{< copyable "sql" >}} - -```sql -set global tidb_hashagg_final_concurrency=1; -set global tidb_hashagg_partial_concurrency=1; -``` - -#### TiDB v5.0 global variable configuration - -{{< copyable "sql" >}} - -```sql -set global tidb_hashagg_final_concurrency=1; -set global tidb_hashagg_partial_concurrency=1; -set global tidb_enable_async_commit = 1; -set global tidb_enable_1pc = 1; -set global tidb_guarantee_linearizability = 0; -set global tidb_enable_clustered_index = 1; - -``` - -## Test plan - -1. Deploy TiDB v5.0 and v4.0 using TiUP. -2. Use Sysbench to import 16 tables, each table with 10 million rows of data. -3. Execute the `analyze table` statement on each table. -4. Back up the data used for restore before different concurrency tests, which ensures data consistency for each test. -5. Start the Sysbench client to perform the `point_select`, `read_write`, `update_index`, and `update_non_index` tests. Perform stress tests on TiDB via AWS NLB. In each type of test, the warm-up takes 1 minute and the test takes 5 minutes. -6. After each type of test is completed, stop the cluster, overwrite the cluster with the backup data in step 4, and restart the cluster. - -### Prepare test data - -Execute the following command to prepare the test data: - -{{< copyable "shell-regular" >}} - -```bash -sysbench oltp_common \ - --threads=16 \ - --rand-type=uniform \ - --db-driver=mysql \ - --mysql-db=sbtest \ - --mysql-host=$aws_nlb_host \ - --mysql-port=$aws_nlb_port \ - --mysql-user=root \ - --mysql-password=password \ - prepare --tables=16 --table-size=10000000 -``` - -### Perform the test - -Execute the following command to perform the test. - -{{< copyable "shell-regular" >}} - -```bash -sysbench $testname \ - --threads=$threads \ - --time=300 \ - --report-interval=1 \ - --rand-type=uniform \ - --db-driver=mysql \ - --mysql-db=sbtest \ - --mysql-host=$aws_nlb_host \ - --mysql-port=$aws_nlb_port \ - run --tables=16 --table-size=10000000 -``` - -## Test results - -### Point Select performance - -| Threads | v4.0 QPS | v4.0 95% latency (ms) | v5.0 QPS | v5.0 95% latency (ms) | QPS improvement | -|:----------|:----------|:----------|:----------|:----------|:----------| -| 150 | 159451.19 | 1.32 | 177876.25 | 1.23 | 11.56% | -| 300 | 244790.38 | 1.96 | 252675.03 | 1.82 | 3.22% | -| 600 | 322929.05 | 3.75 | 331956.84 | 3.36 | 2.80% | -| 900 | 364840.05 | 5.67 | 365655.04 | 5.09 | 0.22% | -| 1200 | 376529.18 | 7.98 | 366507.47 | 7.04 | -2.66% | -| 1500 | 368390.52 | 10.84 | 372476.35 | 8.90 | 1.11% | - -Compared with v4.0, the Point Select performance of TiDB v5.0 has increased by 2.7%. - -![Point Select](/media/sysbench_v5vsv4_point_select.png) - -### Update Non-index performance - -| Threads | v4.0 QPS | v4.0 95% latency (ms) | v5.0 QPS | v5.0 95% latency (ms) | QPS improvement | -|:----------|:----------|:----------|:----------|:----------|:----------| -| 150 | 17243.78 | 11.04 | 30866.23 | 6.91 | 79.00% | -| 300 | 25397.06 | 15.83 | 45915.39 | 9.73 | 80.79% | -| 600 | 33388.08 | 25.28 | 60098.52 | 16.41 | 80.00% | -| 900 | 38291.75 | 36.89 | 70317.41 | 21.89 | 83.64% | -| 1200 | 41003.46 | 55.82 | 76376.22 | 28.67 | 86.27% | -| 1500 | 44702.84 | 62.19 | 80234.58 | 34.95 | 79.48% | - -Compared with v4.0, the Update Non-index performance of TiDB v5.0 has increased by 81%. - -![Update Non-index](/media/sysbench_v5vsv4_update_non_index.png) - -### Update Index performance - -| Threads | v4.0 QPS | v4.0 95% latency (ms) | v5.0 QPS | v5.0 95% latency (ms) | QPS improvement | -|:----------|:----------|:----------|:----------|:----------|:----------| -| 150 | 11736.21 | 17.01 | 15631.34 | 17.01 | 33.19% | -| 300 | 15435.95 | 28.67 | 19957.06 | 22.69 | 29.29% | -| 600 | 18983.21 | 49.21 | 23218.14 | 41.85 | 22.31% | -| 900 | 20855.29 | 74.46 | 26226.76 | 53.85 | 25.76% | -| 1200 | 21887.64 | 102.97 | 28505.41 | 69.29 | 30.24% | -| 1500 | 23621.15 | 110.66 | 30341.06 | 82.96 | 28.45% | - -Compared with v4.0, the Update Index performance of TiDB v5.0 has increased by 28%. - -![Update Index](/media/sysbench_v5vsv4_update_index.png) - -### Read Write performance - -| Threads | v4.0 QPS | v4.0 95% latency (ms) | v5.0 QPS | v5.0 95% latency (ms) | QPS improvement | -|:----------|:----------|:----------|:----------|:----------|:----------| -| 150 | 59979.91 | 61.08 | 66098.57 | 55.82 | 10.20% | -| 300 | 77118.32 | 102.97 | 84639.48 | 90.78 | 9.75% | -| 600 | 90619.52 | 183.21 | 101477.46 | 167.44 | 11.98% | -| 900 | 97085.57 | 267.41 | 109463.46 | 240.02 | 12.75% | -| 1200 | 106521.61 | 331.91 | 115416.05 | 320.17 | 8.35% | -| 1500 | 116278.96 | 363.18 | 118807.5 | 411.96 | 2.17% | - -Compared with v4.0, the read-write performance of TiDB v5.0 has increased by 9%. - -![Read Write](/media/sysbench_v5vsv4_read_write.png) diff --git a/benchmark/benchmark-sysbench-v5.1.0-vs-v5.0.2.md b/benchmark/benchmark-sysbench-v5.1.0-vs-v5.0.2.md deleted file mode 100644 index a9d398939913f..0000000000000 --- a/benchmark/benchmark-sysbench-v5.1.0-vs-v5.0.2.md +++ /dev/null @@ -1,186 +0,0 @@ ---- -title: TiDB Sysbench Performance Test Report -- v5.1.0 vs. v5.0.2 -summary: TiDB v5.1.0 shows a 19.4% improvement in Point Select performance compared to v5.0.2. However, the Read Write and Update Index performance is slightly reduced in v5.1.0. The test was conducted on AWS EC2 using Sysbench with specific hardware and software configurations. The test plan involved deploying, importing data, and performing stress tests. Overall, v5.1.0 demonstrates improved Point Select performance but reduced performance in other areas. ---- - -# TiDB Sysbench Performance Test Report -- v5.1.0 vs. v5.0.2 - -## Test overview - -This test aims at comparing the Sysbench performance of TiDB v5.1.0 and TiDB v5.0.2 in the Online Transactional Processing (OLTP) scenario. The results show that compared with v5.0.2, the Point Select performance of v5.1.0 is improved by 19.4%, and the performance of the Read Write and Update Index is slightly reduced. - -## Test environment (AWS EC2) - -### Hardware configuration - -| Service type | EC2 type | Instance count | -|:----------|:----------|:----------| -| PD | m5.xlarge | 3 | -| TiKV | i3.4xlarge| 3 | -| TiDB | c5.4xlarge| 3 | -| Sysbench | c5.9xlarge| 1 | - -### Software version - -| Service type | Software version | -|:----------|:-----------| -| PD | v5.0.2 and v5.1.0 | -| TiDB | v5.0.2 and v5.1.0 | -| TiKV | v5.0.2 and v5.1.0 | -| Sysbench | 1.0.20 | - -### Parameter configuration - -TiDB v5.1.0 and TiDB v5.0.2 use the same configuration. - -#### TiDB parameter configuration - -{{< copyable "" >}} - -```yaml -log.level: "error" -performance.max-procs: 20 -prepared-plan-cache.enabled: true -tikv-client.max-batch-wait-time: 2000000 -``` - -#### TiKV parameter configuration - -{{< copyable "" >}} - -```yaml -storage.scheduler-worker-pool-size: 5 -raftstore.store-pool-size: 3 -raftstore.apply-pool-size: 3 -rocksdb.max-background-jobs: 8 -raftdb.max-background-jobs: 4 -raftdb.allow-concurrent-memtable-write: true -server.grpc-concurrency: 6 -readpool.unified.min-thread-count: 5 -readpool.unified.max-thread-count: 20 -readpool.storage.normal-concurrency: 10 -pessimistic-txn.pipelined: true -server.enable-request-batch: false -``` - -#### TiDB global variable configuration - -{{< copyable "sql" >}} - -```sql -set global tidb_hashagg_final_concurrency=1; -set global tidb_hashagg_partial_concurrency=1; -set global tidb_enable_async_commit = 1; -set global tidb_enable_1pc = 1; -set global tidb_guarantee_linearizability = 0; -set global tidb_enable_clustered_index = 1; -``` - -## Test plan - -1. Deploy TiDB v5.1.0 and v5.0.2 using TiUP. -2. Use Sysbench to import 16 tables, each table with 10 million rows of data. -3. Execute the `analyze table` statement on each table. -4. Back up the data used for restore before different concurrency tests, which ensures data consistency for each test. -5. Start the Sysbench client to perform the `point_select`, `read_write`, `update_index`, and `update_non_index` tests. Perform stress tests on TiDB via HAProxy. The test takes 5 minutes. -6. After each type of test is completed, stop the cluster, overwrite the cluster with the backup data in step 4, and restart the cluster. - -### Prepare test data - -Execute the following command to prepare the test data: - -{{< copyable "shell-regular" >}} - -```bash -sysbench oltp_common \ - --threads=16 \ - --rand-type=uniform \ - --db-driver=mysql \ - --mysql-db=sbtest \ - --mysql-host=$aws_nlb_host \ - --mysql-port=$aws_nlb_port \ - --mysql-user=root \ - --mysql-password=password \ - prepare --tables=16 --table-size=10000000 -``` - -### Perform the test - -Execute the following command to perform the test: - -{{< copyable "shell-regular" >}} - -```bash -sysbench $testname \ - --threads=$threads \ - --time=300 \ - --report-interval=1 \ - --rand-type=uniform \ - --db-driver=mysql \ - --mysql-db=sbtest \ - --mysql-host=$aws_nlb_host \ - --mysql-port=$aws_nlb_port \ - run --tables=16 --table-size=10000000 -``` - -## Test results - -### Point Select performance - -| Threads | v5.0.2 QPS | v5.0.2 95% latency (ms) | v5.1.0 QPS | v5.1.0 95% latency (ms) | QPS improvement | -|:----------|:----------|:----------|:----------|:----------|:----------| -|150|137732.27|1.86|158861.67|2|15.34%| -|300|201420.58|2.91|238038.44|2.71|18.18%| -|600|303631.52|3.49|428573.21|2.07|41.15%| -|900|383628.13|3.55|464863.22|3.89|21.18%| -|1200|391451.54|5.28|413656.74|13.46|5.67%| -|1500|410276.93|7.43|471418.78|10.65|14.90%| - -Compared with v5.0.2, the Point Select performance of v5.1.0 is improved by 19.4%. - -![Point Select](/media/sysbench_v510vsv502_point_select.png) - -### Update Non-index performance - -| Threads | v5.0.2 QPS | v5.0.2 95% latency (ms) | v5.1.0 QPS | v5.1.0 95% latency (ms) | QPS improvement | -|:----------|:----------|:----------|:----------|:----------|:----------| -|150|29248.2|7.17|29362.7|8.13|0.39%| -|300|40316.09|12.52|39651.52|13.7|-1.65%| -|600|51011.11|22.28|47047.9|27.66|-7.77%| -|900|58814.16|27.66|59331.84|28.67|0.88%| -|1200|65286.52|32.53|67745.39|31.37|3.77%| -|1500|68300.86|39.65|67899.17|44.17|-0.59%| - -Compared with v5.0.2, the Update Non-index performance of v5.1.0 is reduced by 0.8%. - -![Update Non-index](/media/sysbench_v510vsv502_update_non_index.png) - -### Update Index performance - -| Threads | v5.0.2 QPS | v5.0.2 95% latency (ms) | v5.1.0 QPS | v5.1.0 95% latency (ms) | QPS improvement | -|:----------|:----------|:----------|:----------|:----------|:----------| -|150|15066.54|14.73|14829.31|14.73|-1.57%| -|300|18535.92|24.83|17401.01|29.72|-6.12%| -|600|22862.73|41.1|21923.78|44.98|-4.11%| -|900|25286.74|57.87|24916.76|58.92|-1.46%| -|1200|27566.18|70.55|27800.62|69.29|0.85%| -|1500|28184.76|92.42|28679.72|86|1.76%| - -Compared with v5.0.2, the Update Index performance of v5.1.0 is reduced by 1.8%. - -![Update Index](/media/sysbench_v510vsv502_update_index.png) - -### Read Write performance - -| Threads | v5.0.2 QPS | v5.0.2 95% latency (ms) | v5.1.0 QPS | v5.1.0 95% latency (ms) | QPS improvement | -|:----------|:----------|:----------|:----------|:----------|:----------| -|150|66415.33|56.84|66591.49|57.87|0.27%| -|300|82488.39|97.55|81226.41|101.13|-1.53%| -|600|99195.36|173.58|97357.86|179.94|-1.85%| -|900|107382.76|253.35|101665.95|267.41|-5.32%| -|1200|112389.23|337.94|107426.41|350.33|-4.42%| -|1500|113548.73|450.77|109805.26|442.73|-3.30%| - -Compared with v5.0.2, the Read Write performance of v5.1.0 is reduced by 2.7%. - -![Read Write](/media/sysbench_v510vsv502_read_write.png) diff --git a/benchmark/benchmark-sysbench-v5.2.0-vs-v5.1.1.md b/benchmark/benchmark-sysbench-v5.2.0-vs-v5.1.1.md deleted file mode 100644 index ae539a95e551b..0000000000000 --- a/benchmark/benchmark-sysbench-v5.2.0-vs-v5.1.1.md +++ /dev/null @@ -1,186 +0,0 @@ ---- -title: TiDB Sysbench Performance Test Report -- v5.2.0 vs. v5.1.1 -summary: TiDB v5.2.0 shows an 11.03% improvement in Point Select performance compared to v5.1.1. However, other scenarios show a slight reduction in performance. The hardware and software configurations, test plan, and results are detailed in the report. ---- - -# TiDB Sysbench Performance Test Report -- v5.2.0 vs. v5.1.1 - -## Test overview - -This test aims at comparing the Sysbench performance of TiDB v5.2.0 and TiDB v5.1.1 in the Online Transactional Processing (OLTP) scenario. The results show that compared with v5.1.1, the Point Select performance of v5.2.0 is improved by 11.03%, and the performance of other scenarios is slightly reduced. - -## Test environment (AWS EC2) - -### Hardware configuration - -| Service type | EC2 type | Instance count | -|:----------|:----------|:----------| -| PD | m5.xlarge | 3 | -| TiKV | i3.4xlarge| 3 | -| TiDB | c5.4xlarge| 3 | -| Sysbench | c5.9xlarge| 1 | - -### Software version - -| Service type | Software version | -|:----------|:-----------| -| PD | v5.1.1 and v5.2.0 | -| TiDB | v5.1.1 and v5.2.0 | -| TiKV | v5.1.1 and v5.2.0 | -| Sysbench | 1.1.0-ead2689 | - -### Parameter configuration - -TiDB v5.2.0 and TiDB v5.1.1 use the same configuration. - -#### TiDB parameter configuration - -{{< copyable "" >}} - -```yaml -log.level: "error" -performance.max-procs: 20 -prepared-plan-cache.enabled: true -tikv-client.max-batch-wait-time: 2000000 -``` - -#### TiKV parameter configuration - -{{< copyable "" >}} - -```yaml -storage.scheduler-worker-pool-size: 5 -raftstore.store-pool-size: 3 -raftstore.apply-pool-size: 3 -rocksdb.max-background-jobs: 8 -raftdb.max-background-jobs: 4 -raftdb.allow-concurrent-memtable-write: true -server.grpc-concurrency: 6 -readpool.unified.min-thread-count: 5 -readpool.unified.max-thread-count: 20 -readpool.storage.normal-concurrency: 10 -pessimistic-txn.pipelined: true -server.enable-request-batch: false -``` - -#### TiDB global variable configuration - -{{< copyable "sql" >}} - -```sql -set global tidb_hashagg_final_concurrency=1; -set global tidb_hashagg_partial_concurrency=1; -set global tidb_enable_async_commit = 1; -set global tidb_enable_1pc = 1; -set global tidb_guarantee_linearizability = 0; -set global tidb_enable_clustered_index = 1; -``` - -## Test plan - -1. Deploy TiDB v5.2.0 and v5.1.1 using TiUP. -2. Use Sysbench to import 16 tables, each table with 10 million rows of data. -3. Execute the `analyze table` statement on each table. -4. Back up the data used for restore before different concurrency tests, which ensures data consistency for each test. -5. Start the Sysbench client to perform the `point_select`, `read_write`, `update_index`, and `update_non_index` tests. Perform stress tests on TiDB via HAProxy. The test takes 5 minutes. -6. After each type of test is completed, stop the cluster, overwrite the cluster with the backup data in step 4, and restart the cluster. - -### Prepare test data - -Execute the following command to prepare the test data: - -{{< copyable "shell-regular" >}} - -```bash -sysbench oltp_common \ - --threads=16 \ - --rand-type=uniform \ - --db-driver=mysql \ - --mysql-db=sbtest \ - --mysql-host=$aws_nlb_host \ - --mysql-port=$aws_nlb_port \ - --mysql-user=root \ - --mysql-password=password \ - prepare --tables=16 --table-size=10000000 -``` - -### Perform the test - -Execute the following command to perform the test: - -{{< copyable "shell-regular" >}} - -```bash -sysbench $testname \ - --threads=$threads \ - --time=300 \ - --report-interval=1 \ - --rand-type=uniform \ - --db-driver=mysql \ - --mysql-db=sbtest \ - --mysql-host=$aws_nlb_host \ - --mysql-port=$aws_nlb_port \ - run --tables=16 --table-size=10000000 -``` - -## Test results - -### Point Select performance - -| Threads | v5.1.1 QPS | v5.1.1 95% latency (ms) | v5.2.0 QPS | v5.2.0 95% latency (ms) | QPS improvement | -|:----------|:----------|:----------|:----------|:----------|:----------| -|150|143014.13|2.35|174402.5|1.23|21.95%| -|300|199133.06|3.68|272018|1.64|36.60%| -|600|389391.65|2.18|393536.4|2.11|1.06%| -|900|468338.82|2.97|447981.98|3.3|-4.35%| -|1200|448348.52|5.18|468241.29|4.65|4.44%| -|1500|454376.79|7.04|483888.42|6.09|6.49%| - -Compared with v5.1.1, the Point Select performance of v5.2.0 is improved by 11.03%. - -![Point Select](/media/sysbench_v511vsv520_point_select.png) - -### Update Non-index performance - -| Threads | v5.1.1 QPS | v5.1.1 95% latency (ms) | v5.2.0 QPS | v5.2.0 95% latency (ms) | QPS improvement | -|:----------|:----------|:----------|:----------|:----------|:----------| -|150|31198.68|6.43|30714.73|6.09|-1.55%| -|300|43577.15|10.46|42997.92|9.73|-1.33%| -|600|57230.18|17.32|56168.81|16.71|-1.85%| -|900|65325.11|23.1|64098.04|22.69|-1.88%| -|1200|71528.26|28.67|69908.15|28.67|-2.26%| -|1500|76652.5|33.12|74371.79|33.72|-2.98%| - -Compared with v5.1.1, the Update Non-index performance of v5.2.0 is reduced by 1.98%. - -![Update Non-index](/media/sysbench_v511vsv520_update_non_index.png) - -### Update Index performance - -| Threads | v5.1.1 QPS | v5.1.1 95% latency (ms) | v5.2.0 QPS | v5.2.0 95% latency (ms) | QPS improvement | -|:----------|:----------|:----------|:----------|:----------|:----------| -|150|15641.04|13.22|15320|13.46|-2.05%| -|300|19787.73|21.89|19161.35|22.69|-3.17%| -|600|24566.74|36.89|23616.07|38.94|-3.87%| -|900|27516.57|50.11|26270.04|54.83|-4.53%| -|1200|29421.10|63.32|28002.65|69.29|-4.82%| -|1500|30957.84|77.19|28624.44|95.81|-7.54%| - -Compared with v5.0.2, the Update Index performance of v5.1.0 is reduced by 4.33%. - -![Update Index](/media/sysbench_v511vsv520_update_index.png) - -### Read Write performance - -| Threads | v5.1.1 QPS | v5.1.1 95% latency (ms) | v5.2.0 QPS | v5.2.0 95% latency (ms) | QPS improvement | -|:----------|:----------|:----------|:----------|:----------|:----------| -|150|68471.02|57.87|69246|54.83|1.13%| -|300|86573.09|97.55|85340.42|94.10|-1.42%| -|600|101760.75|176.73|102221.31|173.58|0.45%| -|900|111877.55|248.83|109276.45|257.95|-2.32%| -|1200|117479.4|337.94|114231.33|344.08|-2.76%| -|1500|119662.91|419.45|116663.28|434.83|-2.51%| - -Compared with v5.0.2, the Read Write performance of v5.1.0 is reduced by 1.24%. - -![Read Write](/media/sysbench_v511vsv520_read_write.png) diff --git a/benchmark/benchmark-sysbench-v5.3.0-vs-v5.2.2.md b/benchmark/benchmark-sysbench-v5.3.0-vs-v5.2.2.md deleted file mode 100644 index 8759d0928e53e..0000000000000 --- a/benchmark/benchmark-sysbench-v5.3.0-vs-v5.2.2.md +++ /dev/null @@ -1,203 +0,0 @@ ---- -title: TiDB Sysbench Performance Test Report -- v5.3.0 vs. v5.2.2 -summary: TiDB v5.3.0 and v5.2.2 were compared in a Sysbench performance test for Online Transactional Processing (OLTP). Results show that v5.3.0 performance is nearly the same as v5.2.2. Point Select performance of v5.3.0 is reduced by 0.81%, Update Non-index performance is improved by 0.95%, Update Index performance is improved by 1.83%, and Read Write performance is reduced by 0.62%. ---- - -# TiDB Sysbench Performance Test Report -- v5.3.0 vs. v5.2.2 - -## Test overview - -This test aims at comparing the Sysbench performance of TiDB v5.3.0 and TiDB v5.2.2 in the Online Transactional Processing (OLTP) scenario. The results show that the performance of v5.3.0 is nearly the same as that of v5.2.2. - -## Test environment (AWS EC2) - -### Hardware configuration - -| Service type | EC2 type | Instance count | -|:----------|:----------|:----------| -| PD | m5.xlarge | 3 | -| TiKV | i3.4xlarge| 3 | -| TiDB | c5.4xlarge| 3 | -| Sysbench | c5.9xlarge| 1 | - -### Software version - -| Service type | Software version | -|:----------|:-----------| -| PD | v5.2.2 and v5.3.0 | -| TiDB | v5.2.2 and v5.3.0 | -| TiKV | v5.2.2 and v5.3.0 | -| Sysbench | 1.1.0-ead2689 | - -### Parameter configuration - -TiDB v5.3.0 and TiDB v5.2.2 use the same configuration. - -#### TiDB parameter configuration - -{{< copyable "" >}} - -```yaml -log.level: "error" -performance.max-procs: 20 -prepared-plan-cache.enabled: true -tikv-client.max-batch-wait-time: 2000000 -``` - -#### TiKV parameter configuration - -{{< copyable "" >}} - -```yaml -storage.scheduler-worker-pool-size: 5 -raftstore.store-pool-size: 3 -raftstore.apply-pool-size: 3 -rocksdb.max-background-jobs: 8 -raftdb.max-background-jobs: 4 -raftdb.allow-concurrent-memtable-write: true -server.grpc-concurrency: 6 -readpool.unified.min-thread-count: 5 -readpool.unified.max-thread-count: 20 -readpool.storage.normal-concurrency: 10 -pessimistic-txn.pipelined: true -``` - -#### TiDB global variable configuration - -{{< copyable "sql" >}} - -```sql -set global tidb_hashagg_final_concurrency=1; -set global tidb_hashagg_partial_concurrency=1; -set global tidb_enable_async_commit = 1; -set global tidb_enable_1pc = 1; -set global tidb_guarantee_linearizability = 0; -set global tidb_enable_clustered_index = 1; -``` - -#### HAProxy configuration - haproxy.cfg - -For more details about how to use HAProxy on TiDB, see [Best Practices for Using HAProxy in TiDB](/best-practices/haproxy-best-practices.md). - -```yaml -global # Global configuration. - chroot /var/lib/haproxy # Changes the current directory and sets superuser privileges for the startup process to improve security. - pidfile /var/run/haproxy.pid # Writes the PIDs of HAProxy processes into this file. - maxconn 4000 # The maximum number of concurrent connections for a single HAProxy process. - user haproxy # Same with the UID parameter. - group haproxy # Same with the GID parameter. A dedicated user group is recommended. - nbproc 64 # The number of processes created when going daemon. When starting multiple processes to forward requests, ensure that the value is large enough so that HAProxy does not block processes. - daemon # Makes the process fork into background. It is equivalent to the command line "-D" argument. It can be disabled by the command line "-db" argument. - -defaults # Default configuration. - log global # Inherits the settings of the global configuration. - retries 2 # The maximum number of retries to connect to an upstream server. If the number of connection attempts exceeds the value, the backend server is considered unavailable. - timeout connect 2s # The maximum time to wait for a connection attempt to a backend server to succeed. It should be set to a shorter time if the server is located on the same LAN as HAProxy. - timeout client 30000s # The maximum inactivity time on the client side. - timeout server 30000s # The maximum inactivity time on the server side. - -listen tidb-cluster # Database load balancing. - bind 0.0.0.0:3390 # The Floating IP address and listening port. - mode tcp # HAProxy uses layer 4, the transport layer. - balance roundrobin # The server with the fewest connections receives the connection. "leastconn" is recommended where long sessions are expected, such as LDAP, SQL and TSE, rather than protocols using short sessions, such as HTTP. The algorithm is dynamic, which means that server weights might be adjusted on the fly for slow starts for instance. - server tidb-1 10.9.18.229:4000 check inter 2000 rise 2 fall 3 # Detects port 4000 at a frequency of once every 2000 milliseconds. If it is detected as successful twice, the server is considered available; if it is detected as failed three times, the server is considered unavailable. - server tidb-2 10.9.39.208:4000 check inter 2000 rise 2 fall 3 - server tidb-3 10.9.64.166:4000 check inter 2000 rise 2 fall 3 -``` - -## Test plan - -1. Deploy TiDB v5.3.0 and v5.2.2 using TiUP. -2. Use Sysbench to import 16 tables, each table with 10 million rows of data. -3. Execute the `analyze table` statement on each table. -4. Back up the data used for restore before different concurrency tests, which ensures data consistency for each test. -5. Start the Sysbench client to perform the `point_select`, `read_write`, `update_index`, and `update_non_index` tests. Perform stress tests on TiDB via HAProxy. For each concurrency under each workload, the test takes 20 minutes. -6. After each type of test is completed, stop the cluster, overwrite the cluster with the backup data in step 4, and restart the cluster. - -### Prepare test data - -Run the following command to prepare the test data: - -{{< copyable "shell-regular" >}} - -```bash -sysbench oltp_common \ - --threads=16 \ - --rand-type=uniform \ - --db-driver=mysql \ - --mysql-db=sbtest \ - --mysql-host=$aws_nlb_host \ - --mysql-port=$aws_nlb_port \ - --mysql-user=root \ - --mysql-password=password \ - prepare --tables=16 --table-size=10000000 -``` - -### Perform the test - -Run the following command to perform the test: - -{{< copyable "shell-regular" >}} - -```bash -sysbench $testname \ - --threads=$threads \ - --time=1200 \ - --report-interval=1 \ - --rand-type=uniform \ - --db-driver=mysql \ - --mysql-db=sbtest \ - --mysql-host=$aws_nlb_host \ - --mysql-port=$aws_nlb_port \ - run --tables=16 --table-size=10000000 -``` - -## Test results - -### Point Select performance - -| Threads | v5.2.2 TPS | v5.3.0 TPS | v5.2.2 95% latency (ms) | v5.3.0 95% latency (ms) | TPS improvement (%) | -|:----------|:----------|:----------|:----------|:----------|:----------| -|300|267673.17|267516.77|1.76|1.67|-0.06| -|600|369820.29|361672.56|2.91|2.97|-2.20| -|900|417143.31|416479.47|4.1|4.18|-0.16| - -Compared with v5.2.2, the Point Select performance of v5.3.0 is reduced slightly by 0.81%. - -![Point Select](/media/sysbench_v522vsv530_point_select.png) - -### Update Non-index performance - -| Threads | v5.2.2 TPS | v5.3.0 TPS | v5.2.2 95% latency (ms) | v5.3.0 95% latency (ms) | TPS improvement (%) | -|:----------|:----------|:----------|:----------|:----------|:----------| -|300|39715.31|40041.03|11.87|12.08|0.82| -|600|50239.42|51110.04|20.74|20.37|1.73| -|900|57073.97|57252.74|28.16|27.66|0.31| - -Compared with v5.2.2, the Update Non-index performance of v5.3.0 is improved slightly by 0.95%. - -![Update Non-index](/media/sysbench_v522vsv530_update_non_index.png) - -### Update Index performance - -| Threads | v5.2.2 TPS | v5.3.0 TPS | v5.2.2 95% latency (ms) | v5.3.0 95% latency (ms) | TPS improvement (%) | -|:----------|:----------|:----------|:----------|:----------|:----------| -|300|17634.03|17821.1|25.74|25.74|1.06| -|600|20998.59|21534.13|46.63|45.79|2.55| -|900|23420.75|23859.64|64.47|62.19|1.87| - -Compared with v5.2.2, the Update Index performance of v5.3.0 is improved slightly by 1.83%. - -![Update Index](/media/sysbench_v522vsv530_update_index.png) - -### Read Write performance - -| Threads | v5.2.2 TPS | v5.3.0 TPS | v5.2.2 95% latency (ms) | v5.3.0 95% latency (ms) | TPS improvement (%) | -|:----------|:----------|:----------|:----------|:----------|:----------| -|300|3872.01|3848.63|106.75|106.75|-0.60| -|600|4514.17|4471.77|200.47|196.89|-0.94| -|900|4877.05|4861.45|287.38|282.25|-0.32| - -Compared with v5.2.2, the Read Write performance of v5.3.0 is reduced slightly by 0.62%. - -![Read Write](/media/sysbench_v522vsv530_read_write.png) \ No newline at end of file diff --git a/benchmark/benchmark-sysbench-v5.4.0-vs-v5.3.0.md b/benchmark/benchmark-sysbench-v5.4.0-vs-v5.3.0.md deleted file mode 100644 index c1b0c055e805f..0000000000000 --- a/benchmark/benchmark-sysbench-v5.4.0-vs-v5.3.0.md +++ /dev/null @@ -1,203 +0,0 @@ ---- -title: TiDB Sysbench Performance Test Report -- v5.4.0 vs. v5.3.0 -summary: TiDB v5.4.0 shows improved performance of 2.59% to 4.85% in write-heavy workloads compared to v5.3.0. Results show performance improvements in point select, update non-index, update index, and read write scenarios. ---- - -# TiDB Sysbench Performance Test Report -- v5.4.0 vs. v5.3.0 - -## Test overview - -This test aims at comparing the Sysbench performance of TiDB v5.4.0 and TiDB v5.3.0 in the Online Transactional Processing (OLTP) scenario. The results show that performance of v5.4.0 is improved by 2.59% ~ 4.85% in the write-heavy workload. - -## Test environment (AWS EC2) - -### Hardware configuration - -| Service type | EC2 type | Instance count | -|:----------|:----------|:----------| -| PD | m5.xlarge | 3 | -| TiKV | i3.4xlarge| 3 | -| TiDB | c5.4xlarge| 3 | -| Sysbench | c5.9xlarge| 1 | - -### Software version - -| Service type | Software version | -|:----------|:-----------| -| PD | v5.3.0 and v5.4.0 | -| TiDB | v5.3.0 and v5.4.0 | -| TiKV | v5.3.0 and v5.4.0 | -| Sysbench | 1.1.0-ead2689 | - -### Parameter configuration - -TiDB v5.4.0 and TiDB v5.3.0 use the same configuration. - -#### TiDB parameter configuration - -{{< copyable "" >}} - -```yaml -log.level: "error" -performance.max-procs: 20 -prepared-plan-cache.enabled: true -tikv-client.max-batch-wait-time: 2000000 -``` - -#### TiKV parameter configuration - -{{< copyable "" >}} - -```yaml -storage.scheduler-worker-pool-size: 5 -raftstore.store-pool-size: 3 -raftstore.apply-pool-size: 3 -rocksdb.max-background-jobs: 8 -raftdb.max-background-jobs: 4 -raftdb.allow-concurrent-memtable-write: true -server.grpc-concurrency: 6 -readpool.unified.min-thread-count: 5 -readpool.unified.max-thread-count: 20 -readpool.storage.normal-concurrency: 10 -pessimistic-txn.pipelined: true -``` - -#### TiDB global variable configuration - -{{< copyable "sql" >}} - -```sql -set global tidb_hashagg_final_concurrency=1; -set global tidb_hashagg_partial_concurrency=1; -set global tidb_enable_async_commit = 1; -set global tidb_enable_1pc = 1; -set global tidb_guarantee_linearizability = 0; -set global tidb_enable_clustered_index = 1; -``` - -#### HAProxy configuration - haproxy.cfg - -For more details about how to use HAProxy on TiDB, see [Best Practices for Using HAProxy in TiDB](/best-practices/haproxy-best-practices.md). - -{{< copyable "" >}} - -```yaml -global # Global configuration. - chroot /var/lib/haproxy # Changes the current directory and sets superuser privileges for the startup process to improve security. - pidfile /var/run/haproxy.pid # Writes the PIDs of HAProxy processes into this file. - maxconn 4000 # The maximum number of concurrent connections for a single HAProxy process. - user haproxy # The same with the UID parameter. - group haproxy # The same with the GID parameter. A dedicated user group is recommended. - nbproc 64 # The number of processes created when going daemon. When starting multiple processes to forward requests, ensure that the value is large enough so that HAProxy does not block processes. - daemon # Makes the process fork into background. It is equivalent to the command line "-D" argument. It can be disabled by the command line "-db" argument. -defaults # Default configuration. - log global # Inherits the settings of the global configuration. - retries 2 # The maximum number of retries to connect to an upstream server. If the number of connection attempts exceeds the value, the backend server is considered unavailable. - timeout connect 2s # The maximum time to wait for a connection attempt to a backend server to succeed. It should be set to a shorter time if the server is located on the same LAN as HAProxy. - timeout client 30000s # The maximum inactivity time on the client side. - timeout server 30000s # The maximum inactivity time on the server side. -listen tidb-cluster # Database load balancing. - bind 0.0.0.0:3390 # The Floating IP address and listening port. - mode tcp # HAProxy uses layer 4, the transport layer. - balance roundrobin # The server with the fewest connections receives the connection. "leastconn" is recommended where long sessions are expected, such as LDAP, SQL and TSE, rather than protocols using short sessions, such as HTTP. The algorithm is dynamic, which means that server weights might be adjusted on the fly for slow starts for instance. - server tidb-1 10.9.18.229:4000 check inter 2000 rise 2 fall 3 # Detects port 4000 at a frequency of once every 2000 milliseconds. If it is detected as successful twice, the server is considered available; if it is detected as failed three times, the server is considered unavailable. - server tidb-2 10.9.39.208:4000 check inter 2000 rise 2 fall 3 - server tidb-3 10.9.64.166:4000 check inter 2000 rise 2 fall 3 -``` - -## Test plan - -1. Deploy TiDB v5.4.0 and v5.3.0 using TiUP. -2. Use Sysbench to import 16 tables, each table with 10 million rows of data. -3. Execute the `analyze table` statement on each table. -4. Back up the data used for restore before different concurrency tests, which ensures data consistency for each test. -5. Start the Sysbench client to perform the `point_select`, `read_write`, `update_index`, and `update_non_index` tests. Perform stress tests on TiDB via HAProxy. For each concurrency under each workload, the test takes 20 minutes. -6. After each type of test is completed, stop the cluster, overwrite the cluster with the backup data in step 4, and restart the cluster. - -### Prepare test data - -Run the following command to prepare the test data: - -{{< copyable "shell-regular" >}} - -```bash -sysbench oltp_common \ - --threads=16 \ - --rand-type=uniform \ - --db-driver=mysql \ - --mysql-db=sbtest \ - --mysql-host=$aws_nlb_host \ - --mysql-port=$aws_nlb_port \ - --mysql-user=root \ - --mysql-password=password \ - prepare --tables=16 --table-size=10000000 -``` - -### Perform the test - -Run the following command to perform the test: - -{{< copyable "shell-regular" >}} - -```bash -sysbench $testname \ - --threads=$threads \ - --time=1200 \ - --report-interval=1 \ - --rand-type=uniform \ - --db-driver=mysql \ - --mysql-db=sbtest \ - --mysql-host=$aws_nlb_host \ - --mysql-port=$aws_nlb_port \ - run --tables=16 --table-size=10000000 -``` - -## Test results - -### Point Select performance - -| Threads | v5.3.0 TPS | v5.4.0 TPS | v5.3.0 95% latency (ms) | v5.4.0 95% latency (ms) | TPS improvement (%) | -|:----------|:----------|:----------|:----------|:----------|:----------| -|300|266041.84|264345.73|1.96|2.07|-0.64| -|600|351782.71|348715.98|3.43|3.49|-0.87| -|900|386553.31|399777.11|5.09|4.74|3.42| - -Compared with v5.3.0, the Point Select performance of v5.4.0 is slightly improved by 0.64%. - -![Point Select](/media/sysbench_v530vsv540_point_select.png) - -### Update Non-index performance - -| Threads | v5.3.0 TPS | v5.4.0 TPS | v5.3.0 95% latency (ms) | v5.4.0 95% latency (ms) | TPS improvement (%) | -|:----------|:----------|:----------|:----------|:----------|:----------| -|300|40804.31|41187.1|11.87|11.87|0.94| -|600|51239.4|53172.03|20.74|19.65|3.77| -|900|57897.56|59666.8|27.66|27.66|3.06| - -Compared with v5.3.0, the Update Non-index performance of v5.4.0 is improved by 2.59%. - -![Update Non-index](/media/sysbench_v530vsv540_update_non_index.png) - -### Update Index performance - -| Threads | v5.3.0 TPS | v5.4.0 TPS | v5.3.0 95% latency (ms) | v5.4.0 95% latency (ms) | TPS improvement (%) | -|:----------|:----------|:----------|:----------|:----------|:----------| -|300|17737.82|18716.5|26.2|24.83|5.52| -|600|21614.39|22670.74|44.98|42.61|4.89| -|900|23933.7|24922.05|62.19|61.08|4.13| - -Compared with v5.3.0, the Update Index performance of v5.4.0 is improved by 4.85%. - -![Update Index](/media/sysbench_v530vsv540_update_index.png) - -### Read Write performance - -| Threads | v5.3.0 TPS | v5.4.0 TPS | v5.3.0 95% latency (ms) | v5.4.0 95% latency (ms) | TPS improvement (%) | -|:----------|:----------|:----------|:----------|:----------|:----------| -|300|3810.78|3929.29|108.68|106.75|3.11| -|600|4514.28|4684.64|193.38|186.54|3.77| -|900|4842.49|4988.49|282.25|277.21|3.01| - -Compared with v5.3.0, the Read Write performance of v5.4.0 is improved by 3.30%. - -![Read Write](/media/sysbench_v530vsv540_read_write.png) diff --git a/benchmark/benchmark-sysbench-v6.0.0-vs-v5.4.0.md b/benchmark/benchmark-sysbench-v6.0.0-vs-v5.4.0.md deleted file mode 100644 index 0af44a97299aa..0000000000000 --- a/benchmark/benchmark-sysbench-v6.0.0-vs-v5.4.0.md +++ /dev/null @@ -1,199 +0,0 @@ ---- -title: TiDB Sysbench Performance Test Report -- v6.0.0 vs. v5.4.0 -summary: TiDB v6.0.0 shows a 16.17% improvement in read-write workload performance compared to v5.4.0. Other workloads show similar performance between the two versions. Test results show performance comparisons for point select, update non-index, update index, and read-write workloads. ---- - -# TiDB Sysbench Performance Test Report -- v6.0.0 vs. v5.4.0 - -## Test overview - -This test aims at comparing the Sysbench performance of TiDB v6.0.0 and TiDB v5.4.0 in the Online Transactional Processing (OLTP) scenario. The results show that performance of v6.0.0 is significantly improved by 16.17% in the read-write workload. The performance of other workload is basically the same as in v5.4.0. - -## Test environment (AWS EC2) - -### Hardware configuration - -| Service type | EC2 type | Instance count | -|:----------|:----------|:----------| -| PD | m5.xlarge | 3 | -| TiKV | i3.4xlarge| 3 | -| TiDB | c5.4xlarge| 3 | -| Sysbench | c5.9xlarge| 1 | - -### Software version - -| Service type | Software version | -|:----------|:-----------| -| PD | v5.4.0 and v6.0.0 | -| TiDB | v5.4.0 and v6.0.0 | -| TiKV | v5.4.0 and v6.0.0 | -| Sysbench | 1.1.0-df89d34 | - -### Parameter configuration - -TiDB v6.0.0 and TiDB v5.4.0 use the same configuration. - -#### TiDB parameter configuration - -{{< copyable "" >}} - -```yaml -log.level: "error" -prepared-plan-cache.enabled: true -tikv-client.max-batch-wait-time: 2000000 -``` - -#### TiKV parameter configuration - -{{< copyable "" >}} - -```yaml -storage.scheduler-worker-pool-size: 5 -raftstore.store-pool-size: 3 -raftstore.apply-pool-size: 3 -rocksdb.max-background-jobs: 8 -raftdb.max-background-jobs: 4 -raftdb.allow-concurrent-memtable-write: true -server.grpc-concurrency: 6 -readpool.storage.normal-concurrency: 10 -pessimistic-txn.pipelined: true -``` - -#### TiDB global variable configuration - -{{< copyable "sql" >}} - -```sql -set global tidb_hashagg_final_concurrency=1; -set global tidb_hashagg_partial_concurrency=1; -set global tidb_enable_async_commit = 1; -set global tidb_enable_1pc = 1; -set global tidb_guarantee_linearizability = 0; -set global tidb_enable_clustered_index = 1; -``` - -#### HAProxy configuration - haproxy.cfg - -For more details about how to use HAProxy on TiDB, see [Best Practices for Using HAProxy in TiDB](/best-practices/haproxy-best-practices.md). - -{{< copyable "" >}} - -```yaml -global # Global configuration. - pidfile /var/run/haproxy.pid # Writes the PIDs of HAProxy processes into this file. - maxconn 4000 # The maximum number of concurrent connections for a single HAProxy process. - user haproxy # The same with the UID parameter. - group haproxy # The same with the GID parameter. A dedicated user group is recommended. - nbproc 64 # The number of processes created when going daemon. When starting multiple processes to forward requests, ensure that the value is large enough so that HAProxy does not block processes. - daemon # Makes the process fork into background. It is equivalent to the command line "-D" argument. It can be disabled by the command line "-db" argument. -defaults # Default configuration. - log global # Inherits the settings of the global configuration. - retries 2 # The maximum number of retries to connect to an upstream server. If the number of connection attempts exceeds the value, the backend server is considered unavailable. - timeout connect 2s # The maximum time to wait for a connection attempt to a backend server to succeed. It should be set to a shorter time if the server is located on the same LAN as HAProxy. - timeout client 30000s # The maximum inactivity time on the client side. - timeout server 30000s # The maximum inactivity time on the server side. -listen tidb-cluster # Database load balancing. - bind 0.0.0.0:3390 # The Floating IP address and listening port. - mode tcp # HAProxy uses layer 4, the transport layer. - balance leastconn # The server with the fewest connections receives the connection. "leastconn" is recommended where long sessions are expected, such as LDAP, SQL and TSE, rather than protocols using short sessions, such as HTTP. The algorithm is dynamic, which means that server weights might be adjusted on the fly for slow starts for instance. - server tidb-1 10.9.18.229:4000 check inter 2000 rise 2 fall 3 # Detects port 4000 at a frequency of once every 2000 milliseconds. If it is detected as successful twice, the server is considered available; if it is detected as failed three times, the server is considered unavailable. - server tidb-2 10.9.39.208:4000 check inter 2000 rise 2 fall 3 - server tidb-3 10.9.64.166:4000 check inter 2000 rise 2 fall 3 -``` - -## Test plan - -1. Deploy TiDB v6.0.0 and v5.4.0 using TiUP. -2. Use Sysbench to import 16 tables, each table with 10 million rows of data. -3. Execute the `analyze table` statement on each table. -4. Back up the data used for restore before different concurrency tests, which ensures data consistency for each test. -5. Start the Sysbench client to perform the `point_select`, `read_write`, `update_index`, and `update_non_index` tests. Perform stress tests on TiDB via HAProxy. For each concurrency under each workload, the test takes 20 minutes. -6. After each type of test is completed, stop the cluster, overwrite the cluster with the backup data in step 4, and restart the cluster. - -### Prepare test data - -Run the following command to prepare the test data: - -{{< copyable "shell-regular" >}} - -```bash -sysbench oltp_common \ - --threads=16 \ - --rand-type=uniform \ - --db-driver=mysql \ - --mysql-db=sbtest \ - --mysql-host=$aws_nlb_host \ - --mysql-port=$aws_nlb_port \ - --mysql-user=root \ - --mysql-password=password \ - prepare --tables=16 --table-size=10000000 -``` - -### Perform the test - -Run the following command to perform the test: - -{{< copyable "shell-regular" >}} - -```bash -sysbench $testname \ - --threads=$threads \ - --time=1200 \ - --report-interval=1 \ - --rand-type=uniform \ - --db-driver=mysql \ - --mysql-db=sbtest \ - --mysql-host=$aws_nlb_host \ - --mysql-port=$aws_nlb_port \ - run --tables=16 --table-size=10000000 -``` - -## Test results - -### Point Select performance - -| Threads | v5.4.0 TPS | v6.0.0 TPS | v5.4.0 95% latency (ms) | v6.0.0 95% latency (ms) | TPS improvement (%) | -|:----------|:----------|:----------|:----------|:----------|:----------| -|300|260085.19|265207.73|1.82|1.93|1.97| -|600|378098.48|365173.66|2.48|2.61|-3.42| -|900|441294.61|424031.23|3.75|3.49|-3.91| - -Compared with v5.4.0, the Point Select performance of v6.0.0 is slightly dropped by 1.79%. - -![Point Select](/media/sysbench_v540vsv600_point_select.png) - -### Update Non-index performance - -| Threads | v5.4.0 TPS | v6.0.0 TPS | v5.4.0 95% latency (ms) | v6.0.0 95% latency (ms) | TPS improvement (%) | -|:----------|:----------|:----------|:----------|:----------|:----------| -|300|41528.7|40814.23|11.65|11.45|-1.72| -|600|53220.96|51746.21|19.29|20.74|-2.77| -|900|59977.58|59095.34|26.68|28.16|-1.47| - -Compared with v5.4.0, the Update Non-index performance of v6.0.0 is slightly dropped by 1.98%. - -![Update Non-index](/media/sysbench_v540vsv600_update_non_index.png) - -### Update Index performance - -| Threads | v5.4.0 TPS | v6.0.0 TPS | v5.4.0 95% latency (ms) | v6.0.0 95% latency (ms) | TPS improvement (%) | -|:----------|:----------|:----------|:----------|:----------|:----------| -|300|18659.11|18187.54|23.95|25.74|-2.53| -|600|23195.83|22270.81|40.37|44.17|-3.99| -|900|25798.31|25118.78|56.84|57.87|-2.63| - -Compared with v5.4.0, the Update Index performance of v6.0.0 is dropped by 3.05%. - -![Update Index](/media/sysbench_v540vsv600_update_index.png) - -### Read Write performance - -| Threads | v5.4.0 TPS | v6.0.0 TPS | v5.4.0 95% latency (ms) | v6.0.0 95% latency (ms) | TPS improvement (%) | -|:----------|:----------|:----------|:----------|:----------|:----------| -|300|4141.72|4829.01|97.55|82.96|16.59| -|600|4892.76|5693.12|173.58|153.02|16.36| -|900|5217.94|6029.95|257.95|235.74|15.56| - -Compared with v5.4.0, the Read Write performance of v6.0.0 is significantly improved by 16.17%. - -![Read Write](/media/sysbench_v540vsv600_read_write.png) diff --git a/benchmark/benchmark-sysbench-v6.1.0-vs-v6.0.0.md b/benchmark/benchmark-sysbench-v6.1.0-vs-v6.0.0.md deleted file mode 100644 index a61d18baf57ea..0000000000000 --- a/benchmark/benchmark-sysbench-v6.1.0-vs-v6.0.0.md +++ /dev/null @@ -1,199 +0,0 @@ ---- -title: TiDB Sysbench Performance Test Report -- v6.1.0 vs. v6.0.0 -summary: TiDB v6.1.0 shows improved performance in write-heavy workloads compared to v6.0.0, with a 2.33% ~ 4.61% improvement. The test environment includes AWS EC2 instances and Sysbench 1.1.0-df89d34. Both versions use the same parameter configuration. Test plan involves deploying, importing data, and performing stress tests. Results show slight drop in Point Select performance, while Update Non-index, Update Index, and Read Write performance are improved by 2.90%, 4.61%, and 2.23% respectively. ---- - -# TiDB Sysbench Performance Test Report -- v6.1.0 vs. v6.0.0 - -## Test overview - -This test aims at comparing the Sysbench performance of TiDB v6.1.0 and TiDB v6.0.0 in the Online Transactional Processing (OLTP) scenario. The results show that performance of v6.1.0 is improved in the write workload. The performance of write-heavy workload is improved by 2.33% ~ 4.61%. - -## Test environment (AWS EC2) - -### Hardware configuration - -| Service type | EC2 type | Instance count | -|:----------|:----------|:----------| -| PD | m5.xlarge | 3 | -| TiKV | i3.4xlarge| 3 | -| TiDB | c5.4xlarge| 3 | -| Sysbench | c5.9xlarge| 1 | - -### Software version - -| Service type | Software version | -|:----------|:-----------| -| PD | v6.0.0 and v6.1.0 | -| TiDB | v6.0.0 and v6.1.0 | -| TiKV | v6.0.0 and v6.1.0 | -| Sysbench | 1.1.0-df89d34 | - -### Parameter configuration - -TiDB v6.1.0 and TiDB v6.0.0 use the same configuration. - -#### TiDB parameter configuration - -{{< copyable "" >}} - -```yaml -log.level: "error" -prepared-plan-cache.enabled: true -tikv-client.max-batch-wait-time: 2000000 -``` - -#### TiKV parameter configuration - -{{< copyable "" >}} - -```yaml -storage.scheduler-worker-pool-size: 5 -raftstore.store-pool-size: 3 -raftstore.apply-pool-size: 3 -rocksdb.max-background-jobs: 8 -server.grpc-concurrency: 6 -readpool.storage.normal-concurrency: 10 -``` - -#### TiDB global variable configuration - -{{< copyable "sql" >}} - -```sql -set global tidb_hashagg_final_concurrency=1; -set global tidb_hashagg_partial_concurrency=1; -set global tidb_enable_async_commit = 1; -set global tidb_enable_1pc = 1; -set global tidb_guarantee_linearizability = 0; -set global tidb_enable_clustered_index = 1; -set global tidb_prepared_plan_cache_size=1000; -``` - -#### HAProxy configuration - haproxy.cfg - -For more details about how to use HAProxy on TiDB, see [Best Practices for Using HAProxy in TiDB](/best-practices/haproxy-best-practices.md). - -{{< copyable "" >}} - -```yaml -global # Global configuration. - pidfile /var/run/haproxy.pid # Writes the PIDs of HAProxy processes into this file. - maxconn 4000 # The maximum number of concurrent connections for a single HAProxy process. - user haproxy # The same with the UID parameter. - group haproxy # The same with the GID parameter. A dedicated user group is recommended. - nbproc 64 # The number of processes created when going daemon. When starting multiple processes to forward requests, ensure that the value is large enough so that HAProxy does not block processes. - daemon # Makes the process fork into background. It is equivalent to the command line "-D" argument. It can be disabled by the command line "-db" argument. - -defaults # Default configuration. - log global # Inherits the settings of the global configuration. - retries 2 # The maximum number of retries to connect to an upstream server. If the number of connection attempts exceeds the value, the backend server is considered unavailable. - timeout connect 2s # The maximum time to wait for a connection attempt to a backend server to succeed. It should be set to a shorter time if the server is located on the same LAN as HAProxy. - timeout client 30000s # The maximum inactivity time on the client side. - timeout server 30000s # The maximum inactivity time on the server side. - -listen tidb-cluster # Database load balancing. - bind 0.0.0.0:3390 # The Floating IP address and listening port. - mode tcp # HAProxy uses layer 4, the transport layer. - balance leastconn # The server with the fewest connections receives the connection. "leastconn" is recommended where long sessions are expected, such as LDAP, SQL and TSE, rather than protocols using short sessions, such as HTTP. The algorithm is dynamic, which means that server weights might be adjusted on the fly for slow starts for instance. - server tidb-1 10.9.18.229:4000 check inter 2000 rise 2 fall 3 # Detects port 4000 at a frequency of once every 2000 milliseconds. If it is detected as successful twice, the server is considered available; if it is detected as failed three times, the server is considered unavailable. - server tidb-2 10.9.39.208:4000 check inter 2000 rise 2 fall 3 - server tidb-3 10.9.64.166:4000 check inter 2000 rise 2 fall 3 -``` - -## Test plan - -1. Deploy TiDB v6.1.0 and v6.0.0 using TiUP. -2. Use Sysbench to import 16 tables, each table with 10 million rows of data. -3. Execute the `analyze table` statement on each table. -4. Back up the data used for restore before different concurrency tests, which ensures data consistency for each test. -5. Start the Sysbench client to perform the `point_select`, `read_write`, `update_index`, and `update_non_index` tests. Perform stress tests on TiDB via HAProxy. For each concurrency under each workload, the test takes 20 minutes. -6. After each type of test is completed, stop the cluster, overwrite the cluster with the backup data in step 4, and restart the cluster. - -### Prepare test data - -Run the following command to prepare the test data: - -{{< copyable "shell-regular" >}} - -```bash -sysbench oltp_common \ - --threads=16 \ - --rand-type=uniform \ - --db-driver=mysql \ - --mysql-db=sbtest \ - --mysql-host=$aws_nlb_host \ - --mysql-port=$aws_nlb_port \ - --mysql-user=root \ - --mysql-password=password \ - prepare --tables=16 --table-size=10000000 -``` - -### Perform the test - -Run the following command to perform the test: - -{{< copyable "shell-regular" >}} - -```bash -sysbench $testname \ - --threads=$threads \ - --time=1200 \ - --report-interval=1 \ - --rand-type=uniform \ - --db-driver=mysql \ - --mysql-db=sbtest \ - --mysql-host=$aws_nlb_host \ - --mysql-port=$aws_nlb_port \ - run --tables=16 --table-size=10000000 -``` - -## Test results - -### Point Select performance - -| Threads | v6.0.0 TPS | v6.1.0 TPS | v6.0.0 95% latency (ms) | v6.1.0 95% latency (ms) | TPS improvement (%) | -|:----------|:----------|:----------|:----------|:----------|:----------| -|300|268934.84|265353.15|1.89|1.96|-1.33| -|600|365217.96|358976.94|2.57|2.66|-1.71| -|900|420799.64|407625.11|3.68|3.82|-3.13| - -Compared with v6.0.0, the Point Select performance of v6.1.0 slightly drops by 2.1%. - -![Point Select](/media/sysbench_v600vsv610_point_select.png) - -### Update Non-index performance - -| Threads | v6.0.0 TPS | v6.1.0 TPS | v6.0.0 95% latency (ms) | v6.1.0 95% latency (ms) | TPS improvement (%) | -|:----------|:----------|:----------|:----------|:----------|:----------| -|300|41778.95|42991.9|11.24|11.45|2.90 | -|600|52045.39|54099.58|20.74|20.37|3.95| -|900|59243.35|62084.65|27.66|26.68|4.80| - -Compared with v6.0.0, the Update Non-index performance of v6.1.0 is improved by 3.88%. - -![Update Non-index](/media/sysbench_v600vsv610_update_non_index.png) - -### Update Index performance - -| Threads | v6.0.0 TPS | v6.1.0 TPS | v6.0.0 95% latency (ms) | v6.1.0 95% latency (ms) | TPS improvement (%) | -|:----------|:----------|:----------|:----------|:----------|:----------| -|300|18085.79|19198.89|25.28|23.95|6.15| -|600|22210.8|22877.58|42.61|41.85|3.00| -|900|25249.81|26431.12|55.82|53.85|4.68| - -Compared with v6.0.0, the Update Index performance of v6.1.0 is improved by 4.61%. - -![Update Index](/media/sysbench_v600vsv610_update_index.png) - -### Read Write performance - -| Threads | v6.0.0 TPS | v6.1.0 TPS | v6.0.0 95% latency (ms) | v6.1.0 95% latency (ms) | TPS improvement (%) | -|:----------|:----------|:----------|:----------|:----------|:----------| -|300|4856.23|4914.11|84.47|82.96|1.19| -|600|5676.46|5848.09|161.51|150.29|3.02| -|900|6072.97|6223.95|240.02|223.34|2.49| - -Compared with v6.0.0, the Read Write performance of v6.1.0 is improved by 2.23%. - -![Read Write](/media/sysbench_v600vsv610_read_write.png) diff --git a/benchmark/benchmark-sysbench-v6.2.0-vs-v6.1.0.md b/benchmark/benchmark-sysbench-v6.2.0-vs-v6.1.0.md deleted file mode 100644 index f98bbf3130b13..0000000000000 --- a/benchmark/benchmark-sysbench-v6.2.0-vs-v6.1.0.md +++ /dev/null @@ -1,199 +0,0 @@ ---- -title: TiDB Sysbench Performance Test Report -- v6.2.0 vs. v6.1.0 -summary: TiDB v6.2.0 and v6.1.0 show similar performance in the Sysbench test. Point Select performance slightly drops by 3.58%. Update Non-index and Update Index performance are basically unchanged, reduced by 0.85% and 0.47% respectively. Read Write performance is reduced by 1.21%. ---- - -# TiDB Sysbench Performance Test Report -- v6.2.0 vs. v6.1.0 - -## Test overview - -This test aims at comparing the Sysbench performance of TiDB v6.2.0 and TiDB v6.1.0 in the Online Transactional Processing (OLTP) scenario. The results show that performance of v6.2.0 is basically the same as that of v6.1.0. The performance of Point Select slightly drops by 3.58%. - -## Test environment (AWS EC2) - -### Hardware configuration - -| Service type | EC2 type | Instance count | -|:----------|:----------|:----------| -| PD | m5.xlarge | 3 | -| TiKV | i3.4xlarge| 3 | -| TiDB | c5.4xlarge| 3 | -| Sysbench | c5.9xlarge| 1 | - -### Software version - -| Service type | Software version | -|:----------|:-----------| -| PD | v6.1.0 and v6.2.0 | -| TiDB | v6.1.0 and v6.2.0 | -| TiKV | v6.1.0 and v6.2.0 | -| Sysbench | 1.1.0-df89d34 | - -### Parameter configuration - -TiDB v6.2.0 and TiDB v6.1.0 use the same configuration. - -#### TiDB parameter configuration - -{{< copyable "" >}} - -```yaml -log.level: "error" -prepared-plan-cache.enabled: true -tikv-client.max-batch-wait-time: 2000000 -``` - -#### TiKV parameter configuration - -{{< copyable "" >}} - -```yaml -storage.scheduler-worker-pool-size: 5 -raftstore.store-pool-size: 3 -raftstore.apply-pool-size: 3 -rocksdb.max-background-jobs: 8 -server.grpc-concurrency: 6 -readpool.unified.max-thread-count: 10 -``` - -#### TiDB global variable configuration - -{{< copyable "sql" >}} - -```sql -set global tidb_hashagg_final_concurrency=1; -set global tidb_hashagg_partial_concurrency=1; -set global tidb_enable_async_commit = 1; -set global tidb_enable_1pc = 1; -set global tidb_guarantee_linearizability = 0; -set global tidb_enable_clustered_index = 1; -set global tidb_prepared_plan_cache_size=1000; -``` - -#### HAProxy configuration - haproxy.cfg - -For more details about how to use HAProxy on TiDB, see [Best Practices for Using HAProxy in TiDB](/best-practices/haproxy-best-practices.md). - -{{< copyable "" >}} - -```yaml -global # Global configuration. - pidfile /var/run/haproxy.pid # Writes the PIDs of HAProxy processes into this file. - maxconn 4000 # The maximum number of concurrent connections for a single HAProxy process. - user haproxy # The same with the UID parameter. - group haproxy # The same with the GID parameter. A dedicated user group is recommended. - nbproc 64 # The number of processes created when going daemon. When starting multiple processes to forward requests, ensure that the value is large enough so that HAProxy does not block processes. - daemon # Makes the process fork into background. It is equivalent to the command line "-D" argument. It can be disabled by the command line "-db" argument. - -defaults # Default configuration. - log global # Inherits the settings of the global configuration. - retries 2 # The maximum number of retries to connect to an upstream server. If the number of connection attempts exceeds the value, the backend server is considered unavailable. - timeout connect 2s # The maximum time to wait for a connection attempt to a backend server to succeed. It should be set to a shorter time if the server is located on the same LAN as HAProxy. - timeout client 30000s # The maximum inactivity time on the client side. - timeout server 30000s # The maximum inactivity time on the server side. - -listen tidb-cluster # Database load balancing. - bind 0.0.0.0:3390 # The Floating IP address and listening port. - mode tcp # HAProxy uses layer 4, the transport layer. - balance leastconn # The server with the fewest connections receives the connection. "leastconn" is recommended where long sessions are expected, such as LDAP, SQL and TSE, rather than protocols using short sessions, such as HTTP. The algorithm is dynamic, which means that server weights might be adjusted on the fly for slow starts for instance. - server tidb-1 10.9.18.229:4000 check inter 2000 rise 2 fall 3 # Detects port 4000 at a frequency of once every 2000 milliseconds. If it is detected as successful twice, the server is considered available; if it is detected as failed three times, the server is considered unavailable. - server tidb-2 10.9.39.208:4000 check inter 2000 rise 2 fall 3 - server tidb-3 10.9.64.166:4000 check inter 2000 rise 2 fall 3 -``` - -## Test plan - -1. Deploy TiDB v6.2.0 and v6.1.0 using TiUP. -2. Use Sysbench to import 16 tables, each table with 10 million rows of data. -3. Execute the `analyze table` statement on each table. -4. Back up the data used for restore before different concurrency tests, which ensures data consistency for each test. -5. Start the Sysbench client to perform the `point_select`, `read_write`, `update_index`, and `update_non_index` tests. Perform stress tests on TiDB via HAProxy. For each concurrency under each workload, the test takes 20 minutes. -6. After each type of test is completed, stop the cluster, overwrite the cluster with the backup data in step 4, and restart the cluster. - -### Prepare test data - -Run the following command to prepare the test data: - -{{< copyable "shell-regular" >}} - -```bash -sysbench oltp_common \ - --threads=16 \ - --rand-type=uniform \ - --db-driver=mysql \ - --mysql-db=sbtest \ - --mysql-host=$aws_nlb_host \ - --mysql-port=$aws_nlb_port \ - --mysql-user=root \ - --mysql-password=password \ - prepare --tables=16 --table-size=10000000 -``` - -### Perform the test - -Run the following command to perform the test: - -{{< copyable "shell-regular" >}} - -```bash -sysbench $testname \ - --threads=$threads \ - --time=1200 \ - --report-interval=1 \ - --rand-type=uniform \ - --db-driver=mysql \ - --mysql-db=sbtest \ - --mysql-host=$aws_nlb_host \ - --mysql-port=$aws_nlb_port \ - run --tables=16 --table-size=10000000 -``` - -## Test results - -### Point Select performance - -| Threads | v6.1.0 TPS | v6.2.0 TPS | v6.1.0 95% latency (ms) | v6.2.0 95% latency (ms) | TPS improvement (%) | -| :------ | :--------- | :--------- | :---------------------- | :---------------------- | :----------- | -| 300 | 243530.01 | 236885.24 | 1.93 | 2.07 | -2.73 | -| 600 | 304121.47 | 291395.84 | 3.68 | 4.03 | -4.18 | -| 900 | 327301.23 | 314720.02 | 5 | 5.47 | -3.84 | - -Compared with v6.1.0, the Point Select performance of v6.2.0 slightly drops by 3.58%. - -![Point Select](/media/sysbench_v610vsv620_point_select.png) - -### Update Non-index performance - -| Threads | v6.1.0 TPS | v6.2.0 TPS | v6.1.0 95% latency (ms) | v6.2.0 95% latency (ms) | TPS improvement (%) | -| :------ | :--------- | :--------- | :---------------------- | :---------------------- | :----------- | -| 300 | 42608.8 | 42372.82 | 11.45 | 11.24 | -0.55 | -| 600 | 54264.47 | 53672.69 | 18.95 | 18.95 | -1.09 | -| 900 | 60667.47 | 60116.14 | 26.2 | 26.68 | -0.91 | - -Compared with v6.1.0, the Update Non-index performance of v6.2.0 is basically unchanged, reduced by 0.85%. - -![Update Non-index](/media/sysbench_v610vsv620_update_non_index.png) - -### Update Index performance - -| Threads | v6.1.0 TPS | v6.2.0 TPS | v6.1.0 95% latency (ms) | v6.2.0 95% latency (ms) | TPS improvement (%) | -| :------ | :--------- | :--------- | :---------------------- | :---------------------- | :----------- | -| 300 | 19384.75 | 19353.58 | 23.52 | 23.52 | -0.16 | -| 600 | 24144.78 | 24007.57 | 38.25 | 37.56 | -0.57 | -| 900 | 26770.9 | 26589.84 | 51.94 | 52.89 | -0.68 | - -Compared with v6.1.0, the Update Index performance of v6.2.0 is basically unchanged, reduced by 0.47%. - -![Update Index](/media/sysbench_v610vsv620_update_index.png) - -### Read Write performance - -| Threads | v6.1.0 TPS | v6.2.0 TPS | v6.1.0 95% latency (ms) | v6.2.0 95% latency (ms) | TPS improvement (%) | -| :------ | :--------- | :--------- | :---------------------- | :---------------------- | :----------- | -| 300 | 4849.67 | 4797.59 | 86 | 84.47 | -1.07 | -| 600 | 5643.89 | 5565.17 | 161.51 | 161.51 | -1.39 | -| 900 | 5954.91 | 5885.22 | 235.74 | 235.74 | -1.17 | - -Compared with v6.1.0, the Read Write performance of v6.2.0 is reduced by 1.21%. - -![Read Write](/media/sysbench_v610vsv620_read_write.png) diff --git a/benchmark/v5.0-performance-benchmarking-with-tpcc.md b/benchmark/v5.0-performance-benchmarking-with-tpcc.md deleted file mode 100644 index 5472e67664cca..0000000000000 --- a/benchmark/v5.0-performance-benchmarking-with-tpcc.md +++ /dev/null @@ -1,149 +0,0 @@ ---- -title: TiDB TPC-C Performance Test Report -- v5.0 vs. v4.0 -summary: TiDB v5.0 outperforms v4.0 in TPC-C performance, showing a 36% increase. ---- - -# TiDB TPC-C Performance Test Report -- v5.0 vs. v4.0 - -## Test purpose - -This test aims at comparing the TPC-C performance of TiDB v5.0 and TiDB v4.0 in the Online Transactional Processing (OLTP) scenario. - -## Test environment (AWS EC2) - -### Hardware configuration - -| Service type | EC2 type | Instance count | -|:----------|:----------|:----------| -| PD | m5.xlarge | 3 | -| TiKV | i3.4xlarge| 3 | -| TiDB | c5.4xlarge| 3 | -| TPC-C | c5.9xlarge| 1 | - -### Software version - -| Service type | Software version | -|:----------|:-----------| -| PD | 4.0 and 5.0 | -| TiDB | 4.0 and 5.0 | -| TiKV | 4.0 and 5.0 | -| BenchmarkSQL | None | - -### Parameter configuration - -#### TiDB v4.0 configuration - -{{< copyable "" >}} - -```yaml -log.level: "error" -performance.max-procs: 20 -prepared-plan-cache.enabled: true -tikv-client.max-batch-wait-time: 2000000 -``` - -#### TiKV v4.0 configuration - -{{< copyable "" >}} - -```yaml -pessimistic-txn.pipelined: true -raftdb.allow-concurrent-memtable-write: true -raftdb.max-background-jobs: 4 -raftstore.apply-max-batch-size: 2048 -raftstore.apply-pool-size: 3 -raftstore.store-max-batch-size: 2048 -raftstore.store-pool-size: 3 -readpool.storage.normal-concurrency: 10 -readpool.unified.max-thread-count: 20 -readpool.unified.min-thread-count: 5 -rocksdb.max-background-jobs: 8 -server.grpc-concurrency: 6 -storage.scheduler-worker-pool-size: 20 -``` - -#### TiDB v5.0 configuration - -{{< copyable "" >}} - -```yaml -log.level: "error" -performance.max-procs: 20 -prepared-plan-cache.enabled: true -tikv-client.max-batch-wait-time: 2000000 -``` - -#### TiKV v5.0 configuration - -{{< copyable "" >}} - -```yaml -pessimistic-txn.pipelined: true -raftdb.allow-concurrent-memtable-write: true -raftdb.max-background-jobs: 4 -raftstore.apply-max-batch-size: 2048 -raftstore.apply-pool-size: 3 -raftstore.store-max-batch-size: 2048 -raftstore.store-pool-size: 3 -readpool.storage.normal-concurrency: 10 -readpool.unified.max-thread-count: 20 -readpool.unified.min-thread-count: 5 -rocksdb.max-background-jobs: 8 -server.grpc-concurrency: 6 -storage.scheduler-worker-pool-size: 20 -server.enable-request-batch: false -``` - -#### TiDB v4.0 global variable configuration - -{{< copyable "sql" >}} - -```sql -set global tidb_hashagg_final_concurrency=1; -set global tidb_hashagg_partial_concurrency=1; -``` - -#### TiDB v5.0 global variable configuration - -{{< copyable "sql" >}} - -```sql -set global tidb_hashagg_final_concurrency=1; -set global tidb_hashagg_partial_concurrency=1; -set global tidb_enable_async_commit = 1; -set global tidb_enable_1pc = 1; -set global tidb_guarantee_linearizability = 0; -set global tidb_enable_clustered_index = 1; -``` - -## Test plan - -1. Deploy TiDB v5.0 and v4.0 using TiUP. - -2. Use BenchmarkSQL to import the TPC-C 5000 Warehouse data. - - 1. Compile BenchmarkSQL: - - {{< copyable "bash" >}} - - ```bash - git clone https://github.com/pingcap/benchmarksql && cd benchmarksql && ant - ``` - - 2. Enter the `run` directory, edit the `props.mysql` file according to the actual situation, and modify the `conn`, `warehouses`, `loadWorkers`, `terminals`, and `runMins` configuration items. - - 3. Execute the `runSQL.sh ./props.mysql sql.mysql/tableCreates.sql` command. - - 4. Execute the `runSQL.sh ./props.mysql sql.mysql/indexCreates.sql` command. - - 5. Run MySQL client and execute the `analyze table` statement on every table. - -3. Execute the `runBenchmark.sh ./props.mysql` command. - -4. Extract the tpmC data of New Order from the result. - -## Test result - -According to the test statistics, the TPC-C performance of TiDB v5.0 has **increased by 36%** compared with that of TiDB v4.0. - -![TPC-C](/media/tpcc_v5vsv4_corrected_v2.png) diff --git a/benchmark/v5.1-performance-benchmarking-with-tpcc.md b/benchmark/v5.1-performance-benchmarking-with-tpcc.md deleted file mode 100644 index a9d8a26503b69..0000000000000 --- a/benchmark/v5.1-performance-benchmarking-with-tpcc.md +++ /dev/null @@ -1,93 +0,0 @@ ---- -title: TiDB TPC-C Performance Test Report -- v5.1.0 vs. v5.0.2 -summary: TiDB v5.1.0 TPC-C performance is 2.8% better than v5.0.2. Parameter configuration is the same for both versions. Test plan includes deployment, database creation, data import, stress testing, and result extraction. ---- - -# TiDB TPC-C Performance Test Report -- v5.1.0 vs. v5.0.2 - -## Test overview - -This test aims to compare the TPC-H performance of TiDB v5.1.0 and TiDB v5.0.2 in the online analytical processing (OLAP) scenario. The results show that compared with v5.0.2, the TPC-C performance of v5.1.0 is improved by 2.8%. - -## Test environment (AWS EC2) - -## Hardware configuration - -| Service type | EC2 type | Instance count | -|:----------|:----------|:----------| -| PD | m5.xlarge | 3 | -| TiKV | i3.4xlarge| 3 | -| TiDB | c5.4xlarge| 3 | -| TPC-C | c5.9xlarge| 1 | - -### Software version - -| Service type | Software version | -|:----------|:-----------| -| PD | v5.0.2 and v5.1.0 | -| TiDB | v5.0.2 and v5.1.0 | -| TiKV | v5.0.2 and v5.1.0 | -| TiUP | 1.5.1 | - -### Parameter configuration - -TiDB v5.1.0 and TiDB v5.0.2 use the same configuration. - -#### TiDB parameter configuration - -{{< copyable "" >}} - -```yaml -log.level: "error" -performance.max-procs: 20 -prepared-plan-cache.enabled: true -tikv-client.max-batch-wait-time: 2000000 -``` - -#### TiKV parameter configuration - -{{< copyable "" >}} - -```yaml -pessimistic-txn.pipelined: true -raftdb.allow-concurrent-memtable-write: true -raftdb.max-background-jobs: 4 -raftstore.apply-max-batch-size: 2048 -raftstore.apply-pool-size: 3 -raftstore.store-max-batch-size: 2048 -raftstore.store-pool-size: 3 -readpool.storage.normal-concurrency: 10 -readpool.unified.max-thread-count: 20 -readpool.unified.min-thread-count: 5 -rocksdb.max-background-jobs: 8 -server.grpc-concurrency: 6 -storage.scheduler-worker-pool-size: 20 -server.enable-request-batch: false -``` - -#### TiDB global variable configuration - -{{{< copyable "sql" >}} - -```sql -set global tidb_hashagg_final_concurrency=1; -set global tidb_hashagg_partial_concurrency=1; -set global tidb_enable_async_commit = 1; -set global tidb_enable_1pc = 1; -set global tidb_guarantee_linearizability = 0; -set global tidb_enable_clustered_index = 1; -``` - -## Test plan - -1. Deploy TiDB v5.1.0 and v5.0.2 using TiUP. -2. Create a database named `tpcc`: `create database tpcc;`. -3. Use BenchmarkSQL to import the TPC-C 5000 Warehouse data: `tiup bench tpcc prepare --warehouses 5000 --db tpcc -H 127.0.0.1 -p 4000`. -4. Execute the `tiup bench tpcc run -U root --db tpcc --host 127.0.0.1 --port 4000 --time 300s --warehouses 5000 --threads {{thread}}` command to perform stress tests on TiDB via HAProxy. -5. Extract the tpmC data of New Order from the result. - -## Test result - -Compared with v5.0.2, the TPC-C performance of v5.1.0 is **improved by 2.8%**. - -![TPC-C](/media/tpcc_v510_vs_v502.png) diff --git a/benchmark/v5.2-performance-benchmarking-with-tpcc.md b/benchmark/v5.2-performance-benchmarking-with-tpcc.md deleted file mode 100644 index 8d26565e7f607..0000000000000 --- a/benchmark/v5.2-performance-benchmarking-with-tpcc.md +++ /dev/null @@ -1,93 +0,0 @@ ---- -title: TiDB TPC-C Performance Test Report -- v5.2.0 vs. v5.1.1 -summary: TiDB v5.2.0 TPC-C performance is 4.22% lower than v5.1.1. Test environment AWS EC2. Hardware and software configurations are the same for both versions. Test plan includes deployment, database creation, data import, stress testing, and result extraction. ---- - -# TiDB TPC-C Performance Test Report -- v5.2.0 vs. v5.1.1 - -## Test overview - -This test aims to compare the TPC-C performance of TiDB v5.2.0 and TiDB v5.1.1 in the online transactional processing (OLTP) scenario. The results show that compared with v5.1.1, the TPC-C performance of v5.2.0 is reduced by 4.22%. - -## Test environment (AWS EC2) - -## Hardware configuration - -| Service type | EC2 type | Instance count | -|:----------|:----------|:----------| -| PD | m5.xlarge | 3 | -| TiKV | i3.4xlarge| 3 | -| TiDB | c5.4xlarge| 3 | -| TPC-C | c5.9xlarge| 1 | - -### Software version - -| Service type | Software version | -|:----------|:-----------| -| PD | v5.1.1 and v5.2.0 | -| TiDB | v5.1.1 and v5.2.0 | -| TiKV | v5.1.1 and v5.2.0 | -| TiUP | 1.5.1 | - -### Parameter configuration - -TiDB v5.2.0 and TiDB v5.1.1 use the same configuration. - -#### TiDB parameter configuration - -{{< copyable "" >}} - -```yaml -log.level: "error" -performance.max-procs: 20 -prepared-plan-cache.enabled: true -tikv-client.max-batch-wait-time: 2000000 -``` - -#### TiKV parameter configuration - -{{< copyable "" >}} - -```yaml -pessimistic-txn.pipelined: true -raftdb.allow-concurrent-memtable-write: true -raftdb.max-background-jobs: 4 -raftstore.apply-max-batch-size: 2048 -raftstore.apply-pool-size: 3 -raftstore.store-max-batch-size: 2048 -raftstore.store-pool-size: 3 -readpool.storage.normal-concurrency: 10 -readpool.unified.max-thread-count: 20 -readpool.unified.min-thread-count: 5 -rocksdb.max-background-jobs: 8 -server.grpc-concurrency: 6 -storage.scheduler-worker-pool-size: 20 -server.enable-request-batch: false -``` - -#### TiDB global variable configuration - -{{{< copyable "sql" >}} - -```sql -set global tidb_hashagg_final_concurrency=1; -set global tidb_hashagg_partial_concurrency=1; -set global tidb_enable_async_commit = 1; -set global tidb_enable_1pc = 1; -set global tidb_guarantee_linearizability = 0; -set global tidb_enable_clustered_index = 1; -``` - -## Test plan - -1. Deploy TiDB v5.2.0 and v5.1.1 using TiUP. -2. Create a database named `tpcc`: `create database tpcc;`. -3. Use BenchmarkSQL to import the TPC-C 5000 Warehouse data: `tiup bench tpcc prepare --warehouses 5000 --db tpcc -H 127.0.0.1 -p 4000`. -4. Execute the `tiup bench tpcc run -U root --db tpcc --host 127.0.0.1 --port 4000 --time 300s --warehouses 5000 --threads {{thread}}` command to perform stress tests on TiDB via HAProxy. -5. Extract the tpmC data of New Order from the result. - -## Test result - -Compared with v5.1.1, the TPC-C performance of v5.2.0 is **reduced by 4.22%**. - -![TPC-C](/media/tpcc_v511_vs_v520.png) diff --git a/benchmark/v5.3-performance-benchmarking-with-tpcc.md b/benchmark/v5.3-performance-benchmarking-with-tpcc.md deleted file mode 100644 index a5a4c35261850..0000000000000 --- a/benchmark/v5.3-performance-benchmarking-with-tpcc.md +++ /dev/null @@ -1,129 +0,0 @@ ---- -title: TiDB TPC-C Performance Test Report -- v5.3.0 vs. v5.2.2 -summary: TiDB v5.3.0 TPC-C performance is slightly reduced by 2.99% compared to v5.2.2. The test used AWS EC2 with specific hardware and software configurations. The test plan involved deploying TiDB, creating a database, importing data, and running stress tests. The result showed a decrease in performance across different thread counts. ---- - -# TiDB TPC-C Performance Test Report -- v5.3.0 vs. v5.2.2 - -## Test overview - -This test aims at comparing the TPC-C performance of TiDB v5.3.0 and TiDB v5.2.2 in the online transactional processing (OLTP) scenario. The result shows that compared with v5.2.2, the TPC-C performance of v5.3.0 is reduced by 2.99%. - -## Test environment (AWS EC2) - -## Hardware configuration - -| Service type | EC2 type | Instance count | -|:----------|:----------|:----------| -| PD | m5.xlarge | 3 | -| TiKV | i3.4xlarge| 3 | -| TiDB | c5.4xlarge| 3 | -| TPC-C | c5.9xlarge| 1 | - -### Software version - -| Service type | Software version | -|:----------|:-----------| -| PD | v5.2.2 and v5.3.0 | -| TiDB | v5.2.2 and v5.3.0 | -| TiKV | v5.2.2 and v5.3.0 | -| TiUP | 1.5.1 | - -### Parameter configuration - -TiDB v5.3.0 and TiDB v5.2.2 use the same configuration. - -#### TiDB parameter configuration - -{{< copyable "" >}} - -```yaml -log.level: "error" -performance.max-procs: 20 -prepared-plan-cache.enabled: true -tikv-client.max-batch-wait-time: 2000000 -``` - -#### TiKV parameter configuration - -{{< copyable "" >}} - -```yaml -pessimistic-txn.pipelined: true -raftdb.allow-concurrent-memtable-write: true -raftdb.max-background-jobs: 4 -raftstore.apply-max-batch-size: 2048 -raftstore.apply-pool-size: 3 -raftstore.store-max-batch-size: 2048 -raftstore.store-pool-size: 3 -readpool.storage.normal-concurrency: 10 -readpool.unified.max-thread-count: 20 -readpool.unified.min-thread-count: 5 -rocksdb.max-background-jobs: 8 -server.grpc-concurrency: 6 -storage.scheduler-worker-pool-size: 20 -``` - -#### TiDB global variable configuration - -{{< copyable "sql" >}} - -```sql -set global tidb_hashagg_final_concurrency=1; -set global tidb_hashagg_partial_concurrency=1; -set global tidb_enable_async_commit = 1; -set global tidb_enable_1pc = 1; -set global tidb_guarantee_linearizability = 0; -set global tidb_enable_clustered_index = 1; -``` - -#### HAProxy configuration - haproxy.cfg - -For more details about how to use HAProxy on TiDB, see [Best Practices for Using HAProxy in TiDB](/best-practices/haproxy-best-practices.md). - -```yaml -global # Global configuration. - chroot /var/lib/haproxy # Changes the current directory and sets superuser privileges for the startup process to improve security. - pidfile /var/run/haproxy.pid # Writes the PIDs of HAProxy processes into this file. - maxconn 4000 # The maximum number of concurrent connections for a single HAProxy process. - user haproxy # Same with the UID parameter. - group haproxy # Same with the GID parameter. A dedicated user group is recommended. - nbproc 64 # The number of processes created when going daemon. When starting multiple processes to forward requests, ensure that the value is large enough so that HAProxy does not block processes. - daemon # Makes the process fork into background. It is equivalent to the command line "-D" argument. It can be disabled by the command line "-db" argument. - -defaults # Default configuration. - log global # Inherits the settings of the global configuration. - retries 2 # The maximum number of retries to connect to an upstream server. If the number of connection attempts exceeds the value, the backend server is considered unavailable. - timeout connect 2s # The maximum time to wait for a connection attempt to a backend server to succeed. It should be set to a shorter time if the server is located on the same LAN as HAProxy. - timeout client 30000s # The maximum inactivity time on the client side. - timeout server 30000s # The maximum inactivity time on the server side. - -listen tidb-cluster # Database load balancing. - bind 0.0.0.0:3390 # The Floating IP address and listening port. - mode tcp # HAProxy uses layer 4, the transport layer. - balance roundrobin # The server with the fewest connections receives the connection. "leastconn" is recommended where long sessions are expected, such as LDAP, SQL and TSE, rather than protocols using short sessions, such as HTTP. The algorithm is dynamic, which means that server weights might be adjusted on the fly for slow starts for instance. - server tidb-1 10.9.18.229:4000 check inter 2000 rise 2 fall 3 # Detects port 4000 at a frequency of once every 2000 milliseconds. If it is detected as successful twice, the server is considered available; if it is detected as failed three times, the server is considered unavailable. - server tidb-2 10.9.39.208:4000 check inter 2000 rise 2 fall 3 - server tidb-3 10.9.64.166:4000 check inter 2000 rise 2 fall 3 -``` - -## Test plan - -1. Deploy TiDB v5.3.0 and v5.2.2 using TiUP. -2. Create a database named `tpcc`: `create database tpcc;`. -3. Use BenchmarkSQL to import the TPC-C 5000 Warehouse data: `tiup bench tpcc prepare --warehouses 5000 --db tpcc -H 127.0.0.1 -p 4000`. -4. Run the `tiup bench tpcc run -U root --db tpcc --host 127.0.0.1 --port 4000 --time 1800s --warehouses 5000 --threads {{thread}}` command to perform stress tests on TiDB via HAProxy. For each concurrency, the test takes 30 minutes. -5. Extract the tpmC data of New Order from the result. - -## Test result - -Compared with v5.2.2, the TPC-C performance of v5.3.0 is **reduced slightly by 2.99%**. - -| Threads | v5.2.2 tpmC | v5.3.0 tpmC | tpmC improvement (%) | -|:----------|:----------|:----------|:----------| -|50|42228.8|41580|-1.54| -|100|49400|48248.2|-2.33| -|200|54436.6|52809.4|-2.99| -|400|57026.7|54117.1|-5.10| - -![TPC-C](/media/tpcc_v522_vs_v530.png) \ No newline at end of file diff --git a/benchmark/v5.4-performance-benchmarking-with-tpcc.md b/benchmark/v5.4-performance-benchmarking-with-tpcc.md deleted file mode 100644 index 05be43f4ca314..0000000000000 --- a/benchmark/v5.4-performance-benchmarking-with-tpcc.md +++ /dev/null @@ -1,129 +0,0 @@ ---- -title: TiDB TPC-C Performance Test Report -- v5.4.0 vs. v5.3.0 -summary: TiDB v5.4.0 TPC-C performance is 3.16% better than v5.3.0. The improvement is consistent across different thread counts 2.80% (50 threads), 4.27% (100 threads), 3.45% (200 threads), and 2.11% (400 threads). ---- - -# TiDB TPC-C Performance Test Report -- v5.4.0 vs. v5.3.0 - -## Test overview - -This test aims at comparing the TPC-C performance of TiDB v5.4.0 and v5.3.0 in the Online Transactional Processing (OLTP) scenario. The results show that compared with v5.3.0, the TPC-C performance of v5.4.0 is improved by 3.16%. - -## Test environment (AWS EC2) - -### Hardware configuration - -| Service type | EC2 type | Instance count | -|:----------|:----------|:----------| -| PD | m5.xlarge | 3 | -| TiKV | i3.4xlarge| 3 | -| TiDB | c5.4xlarge| 3 | -| TPC-C | c5.9xlarge| 1 | - -### Software version - -| Service type | Software version | -|:----------|:-----------| -| PD | v5.3.0 and v5.4.0 | -| TiDB | v5.3.0 and v5.4.0 | -| TiKV | v5.3.0 and v5.4.0 | -| TiUP | 1.5.1 | - -### Parameter configuration - -TiDB v5.4.0 and TiDB v5.3.0 use the same configuration. - -#### TiDB parameter configuration - -{{< copyable "" >}} - -```yaml -log.level: "error" -performance.max-procs: 20 -prepared-plan-cache.enabled: true -tikv-client.max-batch-wait-time: 2000000 -``` - -#### TiKV parameter configuration - -{{< copyable "" >}} - -```yaml -pessimistic-txn.pipelined: true -raftdb.allow-concurrent-memtable-write: true -raftdb.max-background-jobs: 4 -raftstore.apply-max-batch-size: 2048 -raftstore.apply-pool-size: 3 -raftstore.store-max-batch-size: 2048 -raftstore.store-pool-size: 3 -readpool.storage.normal-concurrency: 10 -readpool.unified.max-thread-count: 20 -readpool.unified.min-thread-count: 5 -rocksdb.max-background-jobs: 8 -server.grpc-concurrency: 6 -storage.scheduler-worker-pool-size: 20 -``` - -#### TiDB global variable configuration - -{{< copyable "sql" >}} - -```sql -set global tidb_hashagg_final_concurrency=1; -set global tidb_hashagg_partial_concurrency=1; -set global tidb_enable_async_commit = 1; -set global tidb_enable_1pc = 1; -set global tidb_guarantee_linearizability = 0; -set global tidb_enable_clustered_index = 1; -``` - -#### HAProxy configuration - haproxy.cfg - -For more details about how to use HAProxy on TiDB, see [Best Practices for Using HAProxy in TiDB](/best-practices/haproxy-best-practices.md). - -{{< copyable "" >}} - -```yaml -global # Global configuration. - chroot /var/lib/haproxy # Changes the current directory and sets superuser privileges for the startup process to improve security. - pidfile /var/run/haproxy.pid # Writes the PIDs of HAProxy processes into this file. - maxconn 4000 # The maximum number of concurrent connections for a single HAProxy process. - user haproxy # The same with the UID parameter. - group haproxy # The same with the GID parameter. A dedicated user group is recommended. - nbproc 64 # The number of processes created when going daemon. When starting multiple processes to forward requests, ensure that the value is large enough so that HAProxy does not block processes. - daemon # Makes the process fork into background. It is equivalent to the command line "-D" argument. It can be disabled by the command line "-db" argument. -defaults # Default configuration. - log global # Inherits the settings of the global configuration. - retries 2 # The maximum number of retries to connect to an upstream server. If the number of connection attempts exceeds the value, the backend server is considered unavailable. - timeout connect 2s # The maximum time to wait for a connection attempt to a backend server to succeed. It should be set to a shorter time if the server is located on the same LAN as HAProxy. - timeout client 30000s # The maximum inactivity time on the client side. - timeout server 30000s # The maximum inactivity time on the server side. -listen tidb-cluster # Database load balancing. - bind 0.0.0.0:3390 # The Floating IP address and listening port. - mode tcp # HAProxy uses layer 4, the transport layer. - balance roundrobin # The server with the fewest connections receives the connection. "leastconn" is recommended where long sessions are expected, such as LDAP, SQL and TSE, rather than protocols using short sessions, such as HTTP. The algorithm is dynamic, which means that server weights might be adjusted on the fly for slow starts for instance. - server tidb-1 10.9.18.229:4000 check inter 2000 rise 2 fall 3 # Detects port 4000 at a frequency of once every 2000 milliseconds. If it is detected as successful twice, the server is considered available; if it is detected as failed three times, the server is considered unavailable. - server tidb-2 10.9.39.208:4000 check inter 2000 rise 2 fall 3 - server tidb-3 10.9.64.166:4000 check inter 2000 rise 2 fall 3 -``` - -### Prepare test data - -1. Deploy TiDB v5.4.0 and v5.3.0 using TiUP. -2. Create a database named `tpcc`: `create database tpcc;`. -3. Use BenchmarkSQL to import the TPC-C 5000 Warehouse data: `tiup bench tpcc prepare --warehouses 5000 --db tpcc -H 127.0.0.1 -P 4000`. -4. Run the `tiup bench tpcc run -U root --db tpcc --host 127.0.0.1 --port 4000 --time 1800s --warehouses 5000 --threads {{thread}}` command to perform stress tests on TiDB via HAProxy. For each concurrency, the test takes 30 minutes. -5. Extract the tpmC data of New Order from the result. - -## Test result - -Compared with v5.3.0, the TPC-C performance of v5.4.0 is **improved by 3.16%**. - -| Threads | v5.3.0 tpmC | v5.4.0 tpmC | tpmC improvement (%) | -|:----------|:----------|:----------|:----------| -|50|43002.4|44204.4|2.80| -|100|50162.7|52305|4.27| -|200|55768.2|57690.7|3.45| -|400|56836.8|58034.6|2.11| - -![TPC-C](/media/tpcc_v530_vs_v540.png) diff --git a/benchmark/v5.4-performance-benchmarking-with-tpch.md b/benchmark/v5.4-performance-benchmarking-with-tpch.md deleted file mode 100644 index c459c56268f35..0000000000000 --- a/benchmark/v5.4-performance-benchmarking-with-tpch.md +++ /dev/null @@ -1,128 +0,0 @@ ---- -title: TiDB TPC-H Performance Test Report -- v5.4 MPP mode vs. Greenplum 6.15.0 and Apache Spark 3.1.1 -summary: TiDB v5.4 MPP mode outperforms Greenplum 6.15.0 and Apache Spark 3.1.1 in TPC-H 100 GB performance test. TiDB's MPP mode is 2-3 times faster. Test results show TiDB v5.4 has significantly lower query execution times compared to Greenplum and Apache Spark. ---- - -# TiDB TPC-H Performance Test Report -- TiDB v5.4 MPP mode vs. Greenplum 6.15.0 and Apache Spark 3.1.1 - -## Test overview - -This test aims at comparing the TPC-H 100 GB performance of TiDB v5.4 in the MPP mode with that of Greenplum and Apache Spark, two mainstream analytics engines, in their latest versions. The test result shows that the performance of TiDB v5.4 in the MPP mode is two to three times faster than that of the other two solutions under TPC-H workload. - -In v5.0, TiDB introduces the MPP mode for [TiFlash](/tiflash/tiflash-overview.md), which significantly enhances TiDB's Hybrid Transactional and Analytical Processing (HTAP) capabilities. Test objects in this report are as follows: - -+ TiDB v5.4 columnar storage in the MPP mode -+ Greenplum 6.15.0 -+ Apache Spark 3.1.1 + Parquet - -## Test environment - -### Hardware prerequisite - -| Instance type | Instance count | -|:----------|:----------| -| PD | 1 | -| TiDB | 1 | -| TiKV | 3 | -| TiFlash | 3 | - -+ CPU: Intel(R) Xeon(R) CPU E5-2630 v4 @ 2.20GHz, 40 cores -+ Memory: 189 GB -+ Disks: NVMe 3TB * 2 - -### Software version - -| Service type | Software version | -|:----------|:-----------| -| TiDB | 5.4 | -| Greenplum | 6.15.0 | -| Apache Spark | 3.1.1 | - -### Parameter configuration - -#### TiDB v5.4 - -For the v5.4 cluster, TiDB uses the default parameter configuration except for the following configuration items. - -In the configuration file `users.toml` of TiFlash, configure `max_memory_usage` as follows: - -```toml -[profiles.default] -max_memory_usage = 10000000000000 -``` - -Set session variables with the following SQL statements: - -```sql -set @@tidb_isolation_read_engines='tiflash'; -set @@tidb_allow_mpp=1; -set @@tidb_mem_quota_query = 10 << 30; -``` - -All TPC-H test tables are replicated to TiFlash in columnar format, with no additional partitions or indexes. - -#### Greenplum - -Except for the initial 3 nodes, the Greenplum cluster is deployed using an additional master node. Each segment server contains 8 segments, which means 4 segments per NVMe SSD. So there are 24 segments in total. The storage format is append-only/column-oriented storage and partition keys are used as primary keys. - -{{< copyable "" >}} - -``` -log_statement = all -gp_autostats_mode = none -statement_mem = 2048MB -gp_vmem_protect_limit = 16384 -``` - -#### Apache Spark - -The test of Apache Spark uses Apache Parquet as the storage format and stores the data on HDFS. The HDFS system consists of three nodes. Each node has two assigned NVMe SSD disks as the data disks. The Spark cluster is deployed in standalone mode, using NVMe SSD disks as the local directory of `spark.local.dir` to speed up the shuffle spill, with no additional partitions or indexes. - -{{< copyable "" >}} - -``` ---driver-memory 20G ---total-executor-cores 120 ---executor-cores 5 ---executor-memory 15G -``` - -## Test result - -> **Note:** -> -> The following test results are the average data of three tests. All numbers are in seconds. - -| Query ID | TiDB v5.4 | Greenplum 6.15.0 | Apache Spark 3.1.1 + Parquet | -| :-------- | :----------- | :------------ | :-------------- | -| 1 | 8.08 | 64.1307 | 52.64 | -| 2 | 2.53 | 4.76612 | 11.83 | -| 3 | 4.84 | 15.62898 | 13.39 | -| 4 | 10.94 | 12.88318 | 8.54 | -| 5 | 12.27 | 23.35449 | 25.23 | -| 6 | 1.32 | 6.033 | 2.21 | -| 7 | 5.91 | 12.31266 | 25.45 | -| 8 | 6.71 | 11.82444 | 23.12 | -| 9 | 44.19 | 22.40144 | 35.2 | -| 10 | 7.13 | 12.51071 | 12.18 | -| 11 | 2.18 | 2.6221 | 10.99 | -| 12 | 2.88 | 7.97906 | 6.99 | -| 13 | 6.84 | 10.15873 | 12.26 | -| 14 | 1.69 | 4.79394 | 3.89 | -| 15 | 3.29 | 10.48785 | 9.82 | -| 16 | 5.04 | 4.64262 | 6.76 | -| 17 | 11.7 | 74.65243 | 44.65 | -| 18 | 12.87 | 64.87646 | 30.27 | -| 19 | 4.75 | 8.08625 | 4.7 | -| 20 | 8.89 | 15.47016 | 8.4 | -| 21 | 24.44 | 39.08594 | 34.83 | -| 22 | 1.23 | 7.67476 | 4.59 | - -![TPC-H](/media/tidb-v5.4-tpch-100-vs-gp-spark.png) - -In the performance diagram above: - -- Blue lines represent TiDB v5.4; -- Red lines represent Greenplum 6.15.0; -- Yellow lines represent Apache Spark 3.1.1. -- The y-axis represents the execution time of the query. The less the time is, the better the performance is. diff --git a/benchmark/v6.0-performance-benchmarking-with-tpcc.md b/benchmark/v6.0-performance-benchmarking-with-tpcc.md deleted file mode 100644 index e8fa4c4a9e278..0000000000000 --- a/benchmark/v6.0-performance-benchmarking-with-tpcc.md +++ /dev/null @@ -1,125 +0,0 @@ ---- -title: TiDB TPC-C Performance Test Report -- v6.0.0 vs. v5.4.0 -summary: TiDB v6.0.0 TPC-C performance is 24.20% better than v5.4.0. The improvement is consistent across different thread counts, with the highest improvement at 26.97% for 100 threads. ---- - -# TiDB TPC-C Performance Test Report -- v6.0.0 vs. v5.4.0 - -## Test overview - -This test aims at comparing the TPC-C performance of TiDB v6.0.0 and v5.4.0 in the Online Transactional Processing (OLTP) scenario. The results show that compared with v5.4.0, the TPC-C performance of v6.0.0 is improved by 24.20%. - -## Test environment (AWS EC2) - -### Hardware configuration - -| Service type | EC2 type | Instance count | -|:----------|:----------|:----------| -| PD | m5.xlarge | 3 | -| TiKV | i3.4xlarge| 3 | -| TiDB | c5.4xlarge| 3 | -| TPC-C | c5.9xlarge| 1 | - -### Software version - -| Service type | Software version | -| :----------- | :---------------- | -| PD | v5.4.0 and v6.0.0 | -| TiDB | v5.4.0 and v6.0.0 | -| TiKV | v5.4.0 and v6.0.0 | -| TiUP | 1.9.3 | -| HAProxy | 2.5.0 | - -### Parameter configuration - -TiDB v6.0.0 and TiDB v5.4.0 use the same configuration. - -#### TiDB parameter configuration - -{{< copyable "" >}} - -```yaml -log.level: "error" -prepared-plan-cache.enabled: true -tikv-client.max-batch-wait-time: 2000000 -``` - -#### TiKV parameter configuration - -{{< copyable "" >}} - -```yaml -pessimistic-txn.pipelined: true -raftdb.allow-concurrent-memtable-write: true -raftdb.max-background-jobs: 4 -raftstore.apply-max-batch-size: 2048 -raftstore.apply-pool-size: 3 -raftstore.store-max-batch-size: 2048 -raftstore.store-pool-size: 3 -readpool.storage.normal-concurrency: 10 -rocksdb.max-background-jobs: 8 -server.grpc-concurrency: 6 -``` - -#### TiDB global variable configuration - -{{< copyable "sql" >}} - -```sql -set global tidb_hashagg_final_concurrency=1; -set global tidb_hashagg_partial_concurrency=1; -set global tidb_enable_async_commit = 1; -set global tidb_enable_1pc = 1; -set global tidb_guarantee_linearizability = 0; -set global tidb_enable_clustered_index = 1; -``` - -#### HAProxy configuration - haproxy.cfg - -For more details about how to use HAProxy on TiDB, see [Best Practices for Using HAProxy in TiDB](/best-practices/haproxy-best-practices.md). - -{{< copyable "" >}} - -```yaml -global # Global configuration. - pidfile /var/run/haproxy.pid # Writes the PIDs of HAProxy processes into this file. - maxconn 4000 # The maximum number of concurrent connections for a single HAProxy process. - user haproxy # The same with the UID parameter. - group haproxy # The same with the GID parameter. A dedicated user group is recommended. - nbproc 64 # The number of processes created when going daemon. When starting multiple processes to forward requests, ensure that the value is large enough so that HAProxy does not block processes. - daemon # Makes the process fork into background. It is equivalent to the command line "-D" argument. It can be disabled by the command line "-db" argument. -defaults # Default configuration. - log global # Inherits the settings of the global configuration. - retries 2 # The maximum number of retries to connect to an upstream server. If the number of connection attempts exceeds the value, the backend server is considered unavailable. - timeout connect 2s # The maximum time to wait for a connection attempt to a backend server to succeed. It should be set to a shorter time if the server is located on the same LAN as HAProxy. - timeout client 30000s # The maximum inactivity time on the client side. - timeout server 30000s # The maximum inactivity time on the server side. -listen tidb-cluster # Database load balancing. - bind 0.0.0.0:3390 # The Floating IP address and listening port. - mode tcp # HAProxy uses layer 4, the transport layer. - balance leastconn # The server with the fewest connections receives the connection. "leastconn" is recommended where long sessions are expected, such as LDAP, SQL and TSE, rather than protocols using short sessions, such as HTTP. The algorithm is dynamic, which means that server weights might be adjusted on the fly for slow starts for instance. - server tidb-1 10.9.18.229:4000 check inter 2000 rise 2 fall 3 # Detects port 4000 at a frequency of once every 2000 milliseconds. If it is detected as successful twice, the server is considered available; if it is detected as failed three times, the server is considered unavailable. - server tidb-2 10.9.39.208:4000 check inter 2000 rise 2 fall 3 - server tidb-3 10.9.64.166:4000 check inter 2000 rise 2 fall 3 -``` - -### Prepare test data - -1. Deploy TiDB v6.0.0 and v5.4.0 using TiUP. -2. Create a database named `tpcc`: `create database tpcc;`. -3. Use BenchmarkSQL to import the TPC-C 5000 Warehouse data: `tiup bench tpcc prepare --warehouse 5000 --db tpcc -H 127.0.0.1 -p 4000`. -4. Run the `tiup bench tpcc run -U root --db tpcc --host 127.0.0.1 --port 4000 --time 1800s --warehouses 5000 --threads {{thread}}` command to perform stress tests on TiDB via HAProxy. For each concurrency, the test takes 30 minutes. -5. Extract the tpmC data of New Order from the result. - -## Test result - -Compared with v5.4.0, the TPC-C performance of v6.0.0 is **improved by 24.20%**. - -| Threads | v5.4.0 tpmC | v6.0.0 tpmC | tpmC improvement (%) | -|:----------|:----------|:----------|:----------| -|50|44822.8|54956.6|22.61| -|100|52150.3|66216.6|26.97| -|200|57344.9|72116.7|25.76| -|400|58675|71254.8|21.44| - -![TPC-C](/media/tpcc_v540_vs_v600.png) diff --git a/benchmark/v6.0-performance-benchmarking-with-tpch.md b/benchmark/v6.0-performance-benchmarking-with-tpch.md deleted file mode 100644 index b22e4e3b0afab..0000000000000 --- a/benchmark/v6.0-performance-benchmarking-with-tpch.md +++ /dev/null @@ -1,8 +0,0 @@ ---- -title: Performance Comparison between TiFlash and Greenplum/Spark -summary: Performance Comparison between TiFlash and Greenplum/Spark. Refer to TiDB v5.4 TPC-H performance benchmarking report for details. ---- - -# Performance Comparison between TiFlash and Greenplum/Spark - -Refer to [TiDB v5.4 TPC-H performance benchmarking report](https://docs.pingcap.com/tidb/stable/v5.4-performance-benchmarking-with-tpch). \ No newline at end of file diff --git a/benchmark/v6.1-performance-benchmarking-with-tpcc.md b/benchmark/v6.1-performance-benchmarking-with-tpcc.md deleted file mode 100644 index 17265b670510b..0000000000000 --- a/benchmark/v6.1-performance-benchmarking-with-tpcc.md +++ /dev/null @@ -1,124 +0,0 @@ ---- -title: TiDB TPC-C Performance Test Report -- v6.1.0 vs. v6.0.0 -summary: TiDB v6.1.0 TPC-C performance is 2.85% better than v6.0.0. TiDB and TiKV parameter configurations are the same for both versions. HAProxy is used for load balancing. Results show performance improvement across different thread counts. ---- - -# TiDB TPC-C Performance Test Report -- v6.1.0 vs. v6.0.0 - -## Test overview - -This test aims at comparing the TPC-C performance of TiDB v6.1.0 and v6.0.0 in the Online Transactional Processing (OLTP) scenario. The results show that compared with v6.0.0, the TPC-C performance of v6.1.0 is improved by 2.85%. - -## Test environment (AWS EC2) - -### Hardware configuration - -| Service type | EC2 type | Instance count | -|:----------|:----------|:----------| -| PD | m5.xlarge | 3 | -| TiKV | i3.4xlarge| 3 | -| TiDB | c5.4xlarge| 3 | -| TPC-C | c5.9xlarge| 1 | - -### Software version - -| Service type | Software version | -| :----------- | :---------------- | -| PD | v6.0.0 and v6.1.0 | -| TiDB | v6.0.0 and v6.1.0 | -| TiKV | v6.0.0 and v6.1.0 | -| TiUP | 1.9.3 | -| HAProxy | 2.5.0 | - -### Parameter configuration - -TiDB v6.1.0 and TiDB v6.0.0 use the same configuration. - -#### TiDB parameter configuration - -{{< copyable "" >}} - -```yaml -log.level: "error" -prepared-plan-cache.enabled: true -tikv-client.max-batch-wait-time: 2000000 -``` - -#### TiKV parameter configuration - -{{< copyable "" >}} - -```yaml -raftstore.apply-max-batch-size: 2048 -raftstore.apply-pool-size: 3 -raftstore.store-max-batch-size: 2048 -raftstore.store-pool-size: 2 -readpool.storage.normal-concurrency: 10 -server.grpc-concurrency: 6 -``` - -#### TiDB global variable configuration - -{{< copyable "sql" >}} - -```sql -set global tidb_hashagg_final_concurrency=1; -set global tidb_hashagg_partial_concurrency=1; -set global tidb_enable_async_commit = 1; -set global tidb_enable_1pc = 1; -set global tidb_guarantee_linearizability = 0; -set global tidb_enable_clustered_index = 1; -set global tidb_prepared_plan_cache_size=1000; -``` - -#### HAProxy configuration - haproxy.cfg - -For more details about how to use HAProxy on TiDB, see [Best Practices for Using HAProxy in TiDB](/best-practices/haproxy-best-practices.md). - -{{< copyable "" >}} - -```yaml -global # Global configuration. - pidfile /var/run/haproxy.pid # Writes the PIDs of HAProxy processes into this file. - maxconn 4000 # The maximum number of concurrent connections for a single HAProxy process. - user haproxy # The same with the UID parameter. - group haproxy # The same with the GID parameter. A dedicated user group is recommended. - nbproc 64 # The number of processes created when going daemon. When starting multiple processes to forward requests, ensure that the value is large enough so that HAProxy does not block processes. - daemon # Makes the process fork into background. It is equivalent to the command line "-D" argument. It can be disabled by the command line "-db" argument. - -defaults # Default configuration. - log global # Inherits the settings of the global configuration. - retries 2 # The maximum number of retries to connect to an upstream server. If the number of connection attempts exceeds the value, the backend server is considered unavailable. - timeout connect 2s # The maximum time to wait for a connection attempt to a backend server to succeed. It should be set to a shorter time if the server is located on the same LAN as HAProxy. - timeout client 30000s # The maximum inactivity time on the client side. - timeout server 30000s # The maximum inactivity time on the server side. - -listen tidb-cluster # Database load balancing. - bind 0.0.0.0:3390 # The Floating IP address and listening port. - mode tcp # HAProxy uses layer 4, the transport layer. - balance leastconn # The server with the fewest connections receives the connection. "leastconn" is recommended where long sessions are expected, such as LDAP, SQL and TSE, rather than protocols using short sessions, such as HTTP. The algorithm is dynamic, which means that server weights might be adjusted on the fly for slow starts for instance. - server tidb-1 10.9.18.229:4000 check inter 2000 rise 2 fall 3 # Detects port 4000 at a frequency of once every 2000 milliseconds. If it is detected as successful twice, the server is considered available; if it is detected as failed three times, the server is considered unavailable. - server tidb-2 10.9.39.208:4000 check inter 2000 rise 2 fall 3 - server tidb-3 10.9.64.166:4000 check inter 2000 rise 2 fall 3 -``` - -### Prepare test data - -1. Deploy TiDB v6.1.0 and v6.0.0 using TiUP. -2. Create a database named `tpcc`: `create database tpcc;`. -3. Use BenchmarkSQL to import the TPC-C 5000 Warehouse data: `tiup bench tpcc prepare --warehouse 5000 --db tpcc -H 127.0.0.1 -p 4000`. -4. Run the `tiup bench tpcc run -U root --db tpcc --host 127.0.0.1 --port 4000 --time 1800s --warehouses 5000 --threads {{thread}}` command to perform stress tests on TiDB via HAProxy. For each concurrency, the test takes 30 minutes. -5. Extract the tpmC data of New Order from the result. - -## Test result - -Compared with v6.0.0, the TPC-C performance of v6.1.0 is **improved by 2.85%**. - -| Threads | v6.0.0 tpmC | v6.1.0 tpmC | tpmC improvement (%) | -|:----------|:----------|:----------|:----------| -|50|59059.2|60424.4|2.31| -|100|69357.6|71235.5|2.71| -|200|71364.8|74117.8|3.86| -|400|72694.3|74525.3|2.52| - -![TPC-C](/media/tpcc_v600_vs_v610.png) diff --git a/benchmark/v6.1-performance-benchmarking-with-tpch.md b/benchmark/v6.1-performance-benchmarking-with-tpch.md deleted file mode 100644 index b22e4e3b0afab..0000000000000 --- a/benchmark/v6.1-performance-benchmarking-with-tpch.md +++ /dev/null @@ -1,8 +0,0 @@ ---- -title: Performance Comparison between TiFlash and Greenplum/Spark -summary: Performance Comparison between TiFlash and Greenplum/Spark. Refer to TiDB v5.4 TPC-H performance benchmarking report for details. ---- - -# Performance Comparison between TiFlash and Greenplum/Spark - -Refer to [TiDB v5.4 TPC-H performance benchmarking report](https://docs.pingcap.com/tidb/stable/v5.4-performance-benchmarking-with-tpch). \ No newline at end of file diff --git a/benchmark/v6.2-performance-benchmarking-with-tpcc.md b/benchmark/v6.2-performance-benchmarking-with-tpcc.md deleted file mode 100644 index 455f3ca53a90f..0000000000000 --- a/benchmark/v6.2-performance-benchmarking-with-tpcc.md +++ /dev/null @@ -1,124 +0,0 @@ ---- -title: TiDB TPC-C Performance Test Report -- v6.2.0 vs. v6.1.0 -summary: TiDB v6.2.0 TPC-C performance declined by 2.00% compared to v6.1.0. The test used AWS EC2 with specific hardware and software configurations. Test data was prepared and stress tests were conducted via HAProxy. Results showed a decline in performance across different thread counts. ---- - -# TiDB TPC-C Performance Test Report -- v6.2.0 vs. v6.1.0 - -## Test overview - -This test aims at comparing the TPC-C performance of TiDB v6.2.0 and v6.1.0 in the Online Transactional Processing (OLTP) scenario. The results show that compared with v6.1.0, the TPC-C performance of v6.2.0 is declined by 2.00%. - -## Test environment (AWS EC2) - -### Hardware configuration - -| Service type | EC2 type | Instance count | -|:----------|:----------|:----------| -| PD | m5.xlarge | 3 | -| TiKV | i3.4xlarge| 3 | -| TiDB | c5.4xlarge| 3 | -| TPC-C | c5.9xlarge| 1 | - -### Software version - -| Service type | Software version | -| :----------- | :---------------- | -| PD | v6.1.0 and v6.2.0 | -| TiDB | v6.1.0 and v6.2.0 | -| TiKV | v6.1.0 and v6.2.0 | -| TiUP | 1.9.3 | -| HAProxy | 2.5.0 | - -### Parameter configuration - -TiDB v6.2.0 and TiDB v6.1.0 use the same configuration. - -#### TiDB parameter configuration - -{{< copyable "" >}} - -```yaml -log.level: "error" -prepared-plan-cache.enabled: true -tikv-client.max-batch-wait-time: 2000000 -``` - -#### TiKV parameter configuration - -{{< copyable "" >}} - -```yaml -raftstore.apply-max-batch-size: 2048 -raftstore.apply-pool-size: 3 -raftstore.store-max-batch-size: 2048 -raftstore.store-pool-size: 2 -readpool.storage.normal-concurrency: 10 -server.grpc-concurrency: 6 -``` - -#### TiDB global variable configuration - -{{< copyable "sql" >}} - -```sql -set global tidb_hashagg_final_concurrency=1; -set global tidb_hashagg_partial_concurrency=1; -set global tidb_enable_async_commit = 1; -set global tidb_enable_1pc = 1; -set global tidb_guarantee_linearizability = 0; -set global tidb_enable_clustered_index = 1; -set global tidb_prepared_plan_cache_size=1000; -``` - -#### HAProxy configuration - haproxy.cfg - -For more details about how to use HAProxy on TiDB, see [Best Practices for Using HAProxy in TiDB](/best-practices/haproxy-best-practices.md). - -{{< copyable "" >}} - -```yaml -global # Global configuration. - pidfile /var/run/haproxy.pid # Writes the PIDs of HAProxy processes into this file. - maxconn 4000 # The maximum number of concurrent connections for a single HAProxy process. - user haproxy # The same with the UID parameter. - group haproxy # The same with the GID parameter. A dedicated user group is recommended. - nbproc 64 # The number of processes created when going daemon. When starting multiple processes to forward requests, ensure that the value is large enough so that HAProxy does not block processes. - daemon # Makes the process fork into background. It is equivalent to the command line "-D" argument. It can be disabled by the command line "-db" argument. - -defaults # Default configuration. - log global # Inherits the settings of the global configuration. - retries 2 # The maximum number of retries to connect to an upstream server. If the number of connection attempts exceeds the value, the backend server is considered unavailable. - timeout connect 2s # The maximum time to wait for a connection attempt to a backend server to succeed. It should be set to a shorter time if the server is located on the same LAN as HAProxy. - timeout client 30000s # The maximum inactivity time on the client side. - timeout server 30000s # The maximum inactivity time on the server side. - -listen tidb-cluster # Database load balancing. - bind 0.0.0.0:3390 # The Floating IP address and listening port. - mode tcp # HAProxy uses layer 4, the transport layer. - balance leastconn # The server with the fewest connections receives the connection. "leastconn" is recommended where long sessions are expected, such as LDAP, SQL and TSE, rather than protocols using short sessions, such as HTTP. The algorithm is dynamic, which means that server weights might be adjusted on the fly for slow starts for instance. - server tidb-1 10.9.18.229:4000 check inter 2000 rise 2 fall 3 # Detects port 4000 at a frequency of once every 2000 milliseconds. If it is detected as successful twice, the server is considered available; if it is detected as failed three times, the server is considered unavailable. - server tidb-2 10.9.39.208:4000 check inter 2000 rise 2 fall 3 - server tidb-3 10.9.64.166:4000 check inter 2000 rise 2 fall 3 -``` - -### Prepare test data - -1. Deploy TiDB v6.2.0 and v6.1.0 using TiUP. -2. Create a database named `tpcc`: `create database tpcc;`. -3. Use BenchmarkSQL to import the TPC-C 5000 Warehouse data: `tiup bench tpcc prepare --warehouse 5000 --db tpcc -H 127.0.0.1 -P 4000`. -4. Run the `tiup bench tpcc run -U root --db tpcc --host 127.0.0.1 --port 4000 --time 1800s --warehouses 5000 --threads {{thread}}` command to perform stress tests on TiDB via HAProxy. For each concurrency, the test takes 30 minutes. -5. Extract the tpmC data of New Order from the result. - -## Test result - -Compared with v6.1.0, the TPC-C performance of v6.2.0 is **declined by 2.00%**. - -| Threads | v6.1.0 tpmC | v6.2.0 tpmC | tpmC improvement (%) | -| :------ | :---------- | :---------- | :------------ | -| 50 | 62212.4 | 61874.4 | -0.54 | -| 100 | 72790.7 | 71317.5 | -2.02 | -| 200 | 75818.6 | 73090.4 | -3.60 | -| 400 | 74515.3 | 73156.9 | -1.82 | - -![TPC-C](/media/tpcc_v610_vs_v620.png) diff --git a/benchmark/v6.2-performance-benchmarking-with-tpch.md b/benchmark/v6.2-performance-benchmarking-with-tpch.md deleted file mode 100644 index 834a7bb3276bf..0000000000000 --- a/benchmark/v6.2-performance-benchmarking-with-tpch.md +++ /dev/null @@ -1,8 +0,0 @@ ---- -title: Performance Comparison between TiFlash and Greenplum/Spark -summary: Performance Comparison between TiFlash and Greenplum/Spark. Refer to TiDB v5.4 TPC-H performance benchmarking report at the provided link. ---- - -# Performance Comparison between TiFlash and Greenplum/Spark - -Refer to [TiDB v5.4 TPC-H performance benchmarking report](https://docs.pingcap.com/tidb/stable/v5.4-performance-benchmarking-with-tpch). diff --git a/faq/manage-cluster-faq.md b/faq/manage-cluster-faq.md index 7d18201c0ea34..693b4bb297012 100644 --- a/faq/manage-cluster-faq.md +++ b/faq/manage-cluster-faq.md @@ -365,7 +365,7 @@ Region is not divided in advance, but it follows a Region split mechanism. When ### Does TiKV have the `innodb_flush_log_trx_commit` parameter like MySQL, to guarantee the security of data? -Yes. Currently, the standalone storage engine uses two RocksDB instances. One instance is used to store the raft-log. When the `sync-log` parameter in TiKV is set to true, each commit is mandatorily flushed to the raft-log. If a crash occurs, you can restore the KV data using the raft-log. +TiKV does not have a similar parameter, but each commit on TiKV is forced to be flushed to Raft logs (TiKV uses [Raft Engine](/glossary.md#raft-engine) to store Raft logs and forces a flush when committing). If TiKV crashes, the KV data will be recovered automatically according to the Raft logs. ### What is the recommended server configuration for WAL storage, such as SSD, RAID level, cache strategy of RAID card, NUMA configuration, file system, I/O scheduling strategy of the operating system? @@ -377,17 +377,13 @@ WAL belongs to ordered writing, and currently, we do not apply a unique configur - NUMA: no specific suggestion; for memory allocation strategy, you can use `interleave = all` - File system: ext4 -### How is the write performance in the most strict data available mode (`sync-log = true`)? +### Can Raft + multiple replicas in the TiKV architecture achieve absolute data safety? -Generally, enabling `sync-log` reduces about 30% of the performance. For write performance when `sync-log` is set to `false`, see [Performance test result for TiDB using Sysbench](/benchmark/v3.0-performance-benchmarking-with-sysbench.md). +Data is redundantly replicated between TiKV nodes using the [Raft Consensus Algorithm](https://raft.github.io/) to ensure recoverability should a node failure occur. Only when the data has been written into more than 50% of the replicas will the application return ACK (two out of three nodes). -### Can Raft + multiple replicas in the TiKV architecture achieve absolute data safety? Is it necessary to apply the most strict mode (`sync-log = true`) to a standalone storage? +Because theoretically two nodes might crash, data written to TiKV is spilled to disk by default starting from v5.0, which means each commit is forced to be flushed to Raft logs. If TiKV crashes, the KV data will be recovered automatically according to the Raft logs. -Data is redundantly replicated between TiKV nodes using the [Raft Consensus Algorithm](https://raft.github.io/) to ensure recoverability should a node failure occur. Only when the data has been written into more than 50% of the replicas will the application return ACK (two out of three nodes). However, theoretically, two nodes might crash. Therefore, except for scenarios with less strict requirement on data safety but extreme requirement on performance, it is strongly recommended that you enable the `sync-log` mode. - -As an alternative to using `sync-log`, you may also consider having five replicas instead of three in your Raft group. This would allow for the failure of two replicas, while still providing data safety. - -For a standalone TiKV node, it is still recommended to enable the `sync-log` mode. Otherwise, the last write might be lost in case of a node failure. +In addition, you might consider using five replicas instead of three in your Raft group. This approach would allow for the failure of two replicas, while still providing data safety. ### Since TiKV uses the Raft protocol, multiple network roundtrips occur during data writing. What is the actual write delay? @@ -421,12 +417,17 @@ It depends on your TiDB version and whether TiKV API V2 is enabled ([`storage.ap This section describes common problems you might encounter during TiDB testing, their causes, and solutions. +### How to conduct a Sysbench benchmark test for TiDB? + +See [How to Test TiDB Using Sysbench](/benchmark/benchmark-tidb-using-sysbench.md). + ### What is the performance test result for TiDB using Sysbench? -At the beginning, many users tend to do a benchmark test or a comparison test between TiDB and MySQL. We have also done a similar official test and find the test result is consistent at large, although the test data has some bias. Because the architecture of TiDB differs greatly from MySQL, it is hard to find a benchmark point. The suggestions are as follows: +At the beginning, many users tend to do a benchmark test or a comparison test between TiDB and MySQL. We have also done similar tests and find the test results are consistent at large, although the test data has some bias. Because the architecture of TiDB differs greatly from MySQL, it is hard to find an entirely equivalent benchmark across many aspects. + +Therefore, there is no need to overly focus on these benchmark tests. Instead, it is recommended to pay more attention to the difference of scenarios using TiDB. -- Do not spend too much time on the benchmark test. Pay more attention to the difference of scenarios using TiDB. -- See [Performance test result for TiDB using Sysbench](/benchmark/v3.0-performance-benchmarking-with-sysbench.md). +To learn about the performance of TiDB v8.5.0, you can refer to the [performance test reports](https://docs.pingcap.com/tidbcloud/v8.5-performance-highlights) of the TiDB Cloud Dedicated cluster. ### What's the relationship between the TiDB cluster capacity (QPS) and the number of nodes? How does TiDB compare to MySQL? diff --git a/information-schema/information-schema-inspection-result.md b/information-schema/information-schema-inspection-result.md index 5807fdd1d3a23..3c717bf75d279 100644 --- a/information-schema/information-schema-inspection-result.md +++ b/information-schema/information-schema-inspection-result.md @@ -229,7 +229,6 @@ In the `config` diagnostic rule, the following two diagnostic rules are executed | Component | Configuration item | Expected value | | ---- | ---- | ---- | | TiDB | log.slow-threshold | larger than `0` | - | TiKV | raftstore.sync-log | `true` | ### `version` diagnostic rule diff --git a/tidb-troubleshooting-map.md b/tidb-troubleshooting-map.md index 55d6fd0523096..6985ac626f44a 100644 --- a/tidb-troubleshooting-map.md +++ b/tidb-troubleshooting-map.md @@ -210,15 +210,11 @@ To learn more about the inconsistency error and how to bypass the check, see [Tr ### 4.1 TiKV panics and fails to start -- 4.1.1 `sync-log = false`. The `unexpected raft log index: last_index X < applied_index Y` error is returned after the machine is powered off. - - This issue is expected. You can restore the Region using `tikv-ctl`. - -- 4.1.2 If TiKV is deployed on a virtual machine, when the virtual machine is killed or the physical machine is powered off, the `entries[X, Y] is unavailable from storage` error is reported. +- 4.1.1 If TiKV is deployed on a virtual machine, when the virtual machine is killed or the physical machine is powered off, the `entries[X, Y] is unavailable from storage` error is reported. This issue is expected. The `fsync` of virtual machines is not reliable, so you need to restore the Region using `tikv-ctl`. -- 4.1.3 For other unexpected causes, [report a bug](https://github.com/tikv/tikv/issues/new?template=bug-report.md). +- 4.1.2 For other unexpected causes, [report a bug](https://github.com/tikv/tikv/issues/new?template=bug-report.md). ### 4.2 TiKV OOM diff --git a/tikv-control.md b/tikv-control.md index 9162184cb8a19..551d68e116d34 100644 --- a/tikv-control.md +++ b/tikv-control.md @@ -328,7 +328,7 @@ Use the `compact-cluster` command to manually compact data of the whole TiKV clu ### Set a Region to tombstone -The `tombstone` command is usually used in circumstances where the sync-log is not enabled, and some data written in the Raft state machine is lost caused by power down. +The `tombstone` command is usually used in circumstances where some data written in the Raft state machine is lost caused by power down. In a TiKV instance, you can use this command to set the status of some Regions to tombstone. Then when you restart the instance, those Regions are skipped to avoid the restart failure caused by damaged Raft state machines of those Regions. Those Regions need to have enough healthy replicas in other TiKV instances to be able to continue the reads and writes through the Raft mechanism. @@ -447,14 +447,6 @@ tikv-ctl --host ip:port modify-tikv-config -n raftdb.defaultcf.disable-auto-comp success ``` -```shell -tikv-ctl --host ip:port modify-tikv-config -n raftstore.sync-log -v false -``` - -``` -success -``` - When the compaction rate limit causes accumulated compaction pending bytes, disable the `rate-limiter-auto-tuned` mode or set a higher limit for the compaction flow: ```shell