1.首先准备服务器环境,使用Ubuntu 16.0.4
| IP | 用途 | 说明 |
|---|---|---|
| 192.168.3.70 | TiDB | sql 请求路由,数据处理,计算节点 |
| 192.168.3.76 | TiPD、TiKV | 集群管理模块;存储数据,存储节点 |
| 192.168.3.77 | TiPD、TiKV | 集群管理模块;存储数据,存储节点 |
| 192.168.3.78 | TiPD、TiKV | 集群管理模块;存储数据,存储节点 |
==将tidb-v3.0.7.tar.gz拷贝到所有机器中==
ansible方式下载:
git clone https://github.com/pingcap/tidb-ansible.git
cd tidb-ansible
ansible-playbook local_prepare.yml
cd downloads (可以看到下载的tidb安装包,包括工具等。)
或者
http://download.pingcap.org/tidb-latest-linux-amd64-unportable.tar.gz
再或者从云盘上获取(此云盘存储的内容,就是使用ansible-playbook local_prepare.yml下载的所有内容)
https://pan.baidu.com/s/1ReejN9fvffvOwOR7OikYqg
link
2.TiPD 安装
1.安装软件
1.解压tar
tar -xzvf tidb-v3.0.7.tar.gz
2.移动到/user/local目录下
sudo mv tidb-v3.0.7-linux-amd64 /usr/local/
3.安装tidib
sudo ln -s /usr/local/tidb-v3.0.7-linux-amd64 /usr/local/tidb
2. 准备数据目录
sudo mkdir -p /usr/local/tidb/conf/
sudo mkdir -p /data/1/tidb/pd/4205
sudo mkdir -p /data/log1/tidb/pd
3. 准备配置文件
vi /usr/local/tidb/conf/tipd_4205.conf
client-urls="http://192.168.3.78:4205"
name="pd3"
data-dir="/data/1/tidb/pd/4205/"
peer-urls="http://192.168.3.78:4206"
initial-cluster="pd1=http://192.168.3.76:4206,pd2=http://192.168.3.77:4206,pd3=http://192.168.3.78:4206"
log-file="/data/log1/tidb/pd/4205_run.log"
注意:==name== 指定的名称必须和初始化集群名称的“==pd3==”(initial-cluster) 相同;
4. 启动TiPD
nohup /usr/local/tidb/bin/pd-server --config=/usr/local/tidb/conf/tipd_4205.conf > /dev/null 2>&1 &
3.TiKV安装
1.安装软件
1.解压tar
tar -xvf tidb-v3.0.7.tar.gz
2.移动到/user/local目录下
sudo mv tidb-v3.0.7-linux-amd64 /usr/local/
3.安装tidib
sudo ln -s /usr/local/tidb-v3.0.7-linux-amd64 /usr/local/tidb
2. 准备数据目录
sudo mkdir -p /usr/local/tidb/conf/
sudo mkdir -p /data/1/tidb/kv/4402/import
3. 准备配置文件
vi /usr/local/tidb/conf/tikv_4402.conf
client-urls="http://192.168.3.78:4205"
name="pd3"
data-dir="/data/1/tidb/pd/4205/"
peer-urls="http://192.168.3.78:4206"
initial-cluster="pd1=http://192.168.3.76:4206,pd2=http://192.168.3.77:4206,pd3=http://192.168.3.78:4206"
log-file="/data/log1/tidb/pd/4205_run.log"
注意:==name== 指定的名称必须和初始化集群名称的“==pd3==”(initial-cluster) 相同;其他2台机器也是注意此处。
4. 启动TiPD
nohup /usr/local/tidb/bin/pd-server --config=/usr/local/tidb/conf/tipd_4205.conf > /dev/null 2>&1 &
log-level = "info"
log-file = "/data/1/tidb/kv/4402/run.log"
[server]
addr = "192.168.3.78:4402"
[storage]
data-dir = "/data/1/tidb/kv/4402"
scheduler-concurrency = 1024000
scheduler-worker-pool-size = 100
#labels = {zone = "ZONE3", host = "10074"}
[pd]
#指定tipd节点 这里指定的都是tipd的client-urls
endpoints = ["192.168.3.76:4205","192.168.3.77:4205","192.168.3.78:4205"]
[metric]
interval = "15s"
address = ""
job = "tikv"
[raftstore]
sync-log = false
region-max-size = "384MB"
region-split-size = "256MB"
[rocksdb]
max-background-jobs = 28
max-open-files = 409600
max-manifest-file-size = "20MB"
compaction-readahead-size = "20MB"
[rocksdb.defaultcf]
block-size = "64KB"
compression-per-level = ["no", "no", "lz4", "lz4", "lz4", "zstd", "zstd"]
write-buffer-size = "128MB"
max-write-buffer-number = 10
level0-slowdown-writes-trigger = 20
level0-stop-writes-trigger = 36
max-bytes-for-level-base = "512MB"
target-file-size-base = "32MB"
[rocksdb.writecf]
compression-per-level = ["no", "no", "lz4", "lz4", "lz4", "zstd", "zstd"]
write-buffer-size = "128MB"
max-write-buffer-number = 5
min-write-buffer-number-to-merge = 1
max-bytes-for-level-base = "512MB"
target-file-size-base = "32MB"
[raftdb]
max-open-files = 409600
compaction-readahead-size = "20MB"
[raftdb.defaultcf]
compression-per-level = ["no", "no", "lz4", "lz4", "lz4", "zstd", "zstd"]
write-buffer-size = "128MB"
max-write-buffer-number = 5
min-write-buffer-number-to-merge = 1
max-bytes-for-level-base = "512MB"
target-file-size-base = "32MB"
block-cache-size = "10G"
[import]
import-dir = "/data/1/tidb/kv/4402/import"
num-threads = 8
stream-channel-window = 128
==只需注意: [server] > addr 和 [pd] > endpoints ;其他2台机器也是注意此处。==
4. 启动TiKV
nohup /usr/local/tidb/bin/tikv-server --config=/usr/local/tidb/conf/tikv_4402.conf > /dev/null 2>&1 &
4.TiDB 安装
1.安装软件
跟前面TiPD一样。
2. 准备配置文件
vi /usr/local/tidb/conf/tidb_4001.conf
host = "0.0.0.0"
port = 4001
#存储类型指定为tikv。
store = "tikv"
#指定tipd节点。这里指定的都是tipd的client-urls
path = "192.168.3.76:4205,192.168.3.77:4205,192.168.3.78:4205"
socket = ""
run-ddl = true
lease = "45s"
split-table = true
token-limit = 1000
oom-action = "log"
enable-streaming = false
lower-case-table-names = 2
[log]
level = "info"
format = "text"
disable-timestamp = false
slow-query-file = ""
slow-threshold = 300
expensive-threshold = 10000
query-log-max-len = 2048
[log.file]
filename = "/data/1/tidb/db/4001/tidb.log"
max-size = 300
max-days = 0
max-backups = 0
log-rotate = true
[security]
ssl-ca = ""
ssl-cert = ""
ssl-key = ""
cluster-ssl-ca = ""
cluster-ssl-cert = ""
cluster-ssl-key = ""
[status]
report-status = true
status-port = 10080 #±¨¸ædb״̬µÄ¨Ѷ¶˿Úmetrics-addr = ""
metrics-interval = 15
[performance]
max-procs = 0
stmt-count-limit = 5000
tcp-keep-alive = true
cross-join = true
stats-lease = "3s"
run-auto-analyze = true
feedback-probability = 0.05
query-feedback-limit = 1024
pseudo-estimate-ratio = 0.8
[proxy-protocol]
networks = ""
header-timeout = 5
[plan-cache]
enabled = false
capacity = 2560
shards = 256
[prepared-plan-cache]
enabled = false
capacity = 100
[opentracing]
enable = false
rpc-metrics = false
[opentracing.sampler]
type = "const"
param = 1.0
sampling-server-url = ""
max-operations = 0
sampling-refresh-interval = 0
[opentracing.reporter]
queue-size = 0
buffer-flush-interval = 0
log-spans = false
local-agent-host-port = ""
[tikv-client]
grpc-connection-count = 16
commit-timeout = "41s"
[txn-local-latches]
enabled = false
capacity = 1024000
[binlog]
binlog-socket = ""
注意:==path== 这里指定的都是tipd的client-urls;
4. 启动TiBD
nohup /usr/local/tidb/bin/tidb-server --config=/usr/local/tidb/conf/tidb_4001.conf > /dev/null 2>&1 &
5.测试连接
mysql -h 192.168.3.70 -uroot -P 4001
made in andy
2019-12-19