topo.yaml示例
Global variables are applied to all deployments and used as the default value of
the deployments if a specific deployment value is missing.
global:
user: "root"
ssh_port: 22
deploy_dir: "/yourpath/data/tidb-deploy"
data_dir: "/yourpath/data/tidb-data"
server_configs:
tikv:
raftstore.sync-log: true
storage.reserve-space: "0"
storage.block-cache.capacity: "4G"
server.grpc-concurrency: 48
server.grpc-concurrent-stream: 4096
server.grpc-stream-initial-window-size: "32M"
storage.scheduler-concurrency: 1048576
storage.scheduler-worker-pool-size: 32
rocksdb.titan.enabled: true
rocksdb.defaultcf.write-buffer-size: "512MB"
rocksdb.defaultcf.max-write-buffer-number: 32
rocksdb.max-background-jobs: 32
rocksdb.defaultcf.block-cache-size: "16GB"
rocksdb.defaultcf.compression-per-level: [
'zstd',
'zstd',
'lz4',
'lz4',
'lz4',
'lz4',
'lz4',
]
pd:
replication.location-labels: ["host"]
replication.max-replicas: 1
pd_servers:
- host: 127.0.0.1
tikv_servers:
host: 127.0.0.1
port: 20160
status_port: 20180
deploy_dir: "/yourpath/data/deploy/tikv1"
data_dir: "/yourpath/data/data/tikv1"
log_dir: "/yourpath/data/log/tikv1"
config:server.labels: { host: "logic-host-1" }
host: 127.0.0.1
port: 20161
status_port: 20181
deploy_dir: "/yourpath/data/deploy/tikv2"
data_dir: "/yourpath/data/data/tikv2"
log_dir: "/yourpath/data/log/tikv2"
config:server.labels: { host: "logic-host-2" }
host: 127.0.0.1
port: 20162
status_port: 20182
deploy_dir: "/yourpath/data/deploy/tikv3"
data_dir: "/yourpath/data/data/tikv3"
log_dir: "/yourpath/data/log/tikv3"
config:server.labels: { host: "logic-host-3" }
monitoring_servers:
- host: 127.0.0.1
grafana_servers:
- host: 127.0.0.1
alertmanager_servers:
- host: 127.0.0.1
5.1.3. 配置ssh连接上限
添加如下配置到/etc/ssh/sshd_config MaxSessions 100 MaxStartups 50:30:100
重启centos上 sshd: systemctl restart sshd.service
vim /etc/ssh/sshd_config
5.1.4. tiup操作tidb cluster
tiup cluster deploy chainmaker-tidb v5.1.1 topo.yaml --user root -p # 部署 tidb cluster, 输入密码为服务器登陆密码
tiup cluster list # 查看 tidb cluster
tiup cluster start chainmaker-tidb # 启动 tidb cluster
tiup cluster display chainmaker-tidb # 查看 tidb cluster 状态
tiup cluster stop chainmaker-tidb # 停止 tidb cluster
tiup cluster clean chainmaker-tidb --all --ignore-role prometheus --ignore-role grafana # 清理 tidb cluster数据,并保留监控数据
tiup cluster destroy chainmaker-tidb # 销毁 tidb cluster
5.1.5. 配置chainmaker storage模块
blockdb_config:
provider: tikvdb
tikvdb_config:
endpoints: "127.0.0.1:2379" # tikv pd server url,支持多个url, 如: "192.168.1.2:2379,192.168.1.3:2379"
max_batch_count: 128 # 每次kv batch最大大小 默认128
grpc_connection_count: 16 # chainmaker连接tikv的连接数, 默认4
grpc_keep_alive_time: 10 # 保持连接的连接数, 默认10
grpc_keep_alive_timeout: 3 # 保持连接的超时时间 默认3
write_batch_size: 128 # 每次提交tikv批次最大大小,默认128
statedb_config:
provider: tikvdb
tikvdb_config:
endpoints: "127.0.0.1:2379"
max_batch_count: 128
grpc_connection_count: 16
grpc_keep_alive_time: 10
grpc_keep_alive_timeout: 3
write_batch_size: 128
disable_historydb: true
historydb_config:
provider: tikvdb
tikvdb_config:
endpoints: "127.0.0.1:2379"
max_batch_count: 128
grpc_connection_count: 16
grpc_keep_alive_time: 10
grpc_keep_alive_timeout: 3
write_batch_size: 128
resultdb_config:
provider: tikvdb
tikvdb_config:
endpoints: "127.0.0.1:2379"
max_batch_count: 128
grpc_connection_count: 16
grpc_keep_alive_time: 10
grpc_keep_alive_timeout: 3
write_batch_size: 128