一、简介
二、生产集群搭建
准备
- Zookeeper集群
- Linux服务器四台(以四台为列)
创建用户组和用户
groupadd clickhouse ---创建用户组
useradd -m -d /home/clickhouse -g clickhouse clickhouse ---创建用户
passwd clickhouse ---修改密码
下载
准备好ClickHouse安装包,官网下载地址(选择需要下载的版本)
- clickhouse-common-static-$LATEST_VERSION.tgz
- clickhouse-common-static-dbg-$LATEST_VERSION.tgz
- clickhouse-server-$LATEST_VERSION.tgz
- clickhouse-client-$LATEST_VERSION.tgz
解压、安装(注意切换用户clickhouse)
tar -xzvf clickhouse-common-static-$LATEST_VERSION.tgz
sudo clickhouse-common-static-$LATEST_VERSION/install/doinst.sh
tar -xzvf clickhouse-common-static-dbg-$LATEST_VERSION.tgz
sudo clickhouse-common-static-dbg-$LATEST_VERSION/install/doinst.sh
tar -xzvf clickhouse-server-$LATEST_VERSION.tgz
sudo clickhouse-server-$LATEST_VERSION/install/doinst.sh
#启动命令
sudo systemctl start clickhouse-server 或 sudo /etc/init.d/clickhouse-server start
#安装clickhouse-client,在其中一台机器安装即可,用于命令连接clickhouse-server
tar -xzvf clickhouse-client-$LATEST_VERSION.tgz
sudo clickhouse-client-$LATEST_VERSION/install/doinst.sh
修改配置
安装成功后,在/etc/clickhouse-server/ 目录下生成配置文件config.xml及user.xml、自定义配置目录config.d及users.d,推荐在config.d及users.d下新建配置文件,然后在config.xml及user.xml中引入config.d及users.d下的配置文件。
- 修改config.xml
mkdir /opt/clickhouse_data #新建ClickHouse数据目录
chown -R clickhouse:clickhouse /opt/clickhouse_data #注意修改目录权限
sudo vim /etc/clickhouse-server/config.xml
<level>trace</level>
<log>/opt/clickhouse_data/log/clickhouse-server/clickhouse-server.log</log>
<errorlog>/opt/clickhouse_data/log/clickhouse-server/clickhouse-server.err.log</errorlog>
<path>/opt/clickhouse_data/clickhouse/</path>
<tmp_path>/opt/clickhouse_data/clickhouse/tmp/</tmp_path>
<user_files_path>/opt/clickhouse_data/clickhouse/user_files/</user_files_path>
<include_from>/etc/clickhouse-server/config.d/metrika.xml</include_from> <!--添加该参数-->
- 新建/etc/clickhouse-server/config.d/metrika.xml
vim /etc/clickhouse-server/config.d/metrika.xml
<yandex>
<!--ck集群节点-->
<remote_servers>
<test_sipo_cluster>
<!--分片1-->
<shard>
<weight>1</weight>
<internal_replication>true</internal_replication>
<replica>
<host>iZqj001np4kv0sxzzgvyu1Z</host>
<port>9099</port>
<user>default</user>
<password>admin</password>
<compression>true</compression>
</replica>
<replica>
<host>iZqj001np4kv0sxzzgvyu0Z</host>
<port>9099</port>
<user>default</user>
<password>admin</password>
<compression>true</compression>
</replica>
</shard>
<!--分片2-->
<shard>
<weight>1</weight>
<internal_replication>true</internal_replication>
<replica>
<host>iZqj001np4kv0sxzzgvyu3Z</host>
<port>9099</port>
<user>default</user>
<password>admin</password>
<compression>true</compression>
</replica>
<replica>
<host>iZqj001np4kv0sxzzgvyu4Z</host>
<port>9099</port>
<user>default</user>
<password>admin</password>
<compression>true</compression>
</replica>
</shard>
<!--分片3-->
<shard>
<weight>1</weight>
<internal_replication>true</internal_replication>
<replica>
<host>iZqj001np4kv0sxzzgvyu2Z</host>
<port>9099</port>
<user>default</user>
<password>admin</password>
<compression>true</compression>
</replica>
<replica>
<host>iZqj001np4kv0sy1yi00tbZ</host>
<port>9099</port>
<user>default</user>
<password>admin</password>
<compression>true</compression>
</replica>
</shard>
<!--分片4-->
<shard>
<weight>1</weight>
<internal_replication>true</internal_replication>
<replica>
<host>iZqj001np4kv0sy1yi00tcZ</host>
<port>9099</port>
<user>default</user>
<password>admin</password>
<compression>true</compression>
</replica>
<replica>
<host>iZqj001np4kv0sy1yi00t9Z</host>
<port>9099</port>
<user>default</user>
<password>admin</password>
<compression>true</compression>
</replica>
</shard>
</test_sipo_cluster>
<test_sipo_cluster_8>
<!--分片1-->
<shard>
<weight>1</weight>
<internal_replication>true</internal_replication>
<replica>
<host>iZqj001np4kv0sxzzgvyu1Z</host>
<port>9099</port>
<user>default</user>
<password>admin</password>
<compression>true</compression>
</replica>
</shard>
<!--分片2-->
<shard>
<weight>1</weight>
<internal_replication>true</internal_replication>
<replica>
<host>iZqj001np4kv0sxzzgvyu0Z</host>
<port>9099</port>
<user>default</user>
<password>admin</password>
<compression>true</compression>
</replica>
</shard>
<!--分片3-->
<shard>
<weight>1</weight>
<internal_replication>true</internal_replication>
<replica>
<host>iZqj001np4kv0sxzzgvyu3Z</host>
<port>9099</port>
<user>default</user>
<password>admin</password>
<compression>true</compression>
</replica>
</shard>
<!--分片4-->
<shard>
<weight>1</weight>
<internal_replication>true</internal_replication>
<replica>
<host>iZqj001np4kv0sxzzgvyu4Z</host>
<port>9099</port>
<user>default</user>
<password>admin</password>
<compression>true</compression>
</replica>
</shard>
<!--分片5-->
<shard>
<weight>1</weight>
<internal_replication>true</internal_replication>
<replica>
<host>iZqj001np4kv0sxzzgvyu2Z</host>
<port>9099</port>
<user>default</user>
<password>admin</password>
<compression>true</compression>
</replica>
</shard>
<!--分片6-->
<shard>
<weight>1</weight>
<internal_replication>true</internal_replication>
<replica>
<host>iZqj001np4kv0sy1yi00tbZ</host>
<port>9099</port>
<user>default</user>
<password>admin</password>
<compression>true</compression>
</replica>
</shard>
<!--分片7-->
<shard>
<weight>1</weight>
<internal_replication>true</internal_replication>
<replica>
<host>iZqj001np4kv0sy1yi00tcZ</host>
<port>9099</port>
<user>default</user>
<password>admin</password>
<compression>true</compression>
</replica>
</shard>
<!--分片8-->
<shard>
<weight>1</weight>
<internal_replication>true</internal_replication>
<replica>
<host>iZqj001np4kv0sy1yi00t9Z</host>
<port>9099</port>
<user>default</user>
<password>admin</password>
<compression>true</compression>
</replica>
</shard>
</test_sipo_cluster_8>
</remote_servers>
<!--zookeeper相关配置-->
<zookeeper>
<node>
<host>master01</host>
<port>2181</port>
</node>
<node>
<host>master02</host>
<port>2181</port>
</node>
<node>
<host>slave01</host>
<port>2181</port>
</node>
</zookeeper>
<macros>
<replica>iZqj001np4kv0sxzzgvyu1Z</replica> <!--当前节点主机名-->
</macros>
<!--
<macros>
<shard>01</shard>
<replica>example01-01-1</replica>
</macros>
-->
<networks>
<ip>::/0</ip>
</networks>
<!--压缩相关配置-->
<clickhouse_compression>
<case>
<min_part_size>10000000000</min_part_size>
<min_part_size_ratio>0.01</min_part_size_ratio>
<method>lz4</method> <!--压缩算法lz4压缩比zstd快, 更占磁盘-->
</case>
</clickhouse_compression>
</yandex>
上述配置中三个节点中不同的地方在于
<macros>
<replica>192.168.xxx.xxx</replica>
</macros>
改为当前节点的IP即可
- 新建/etc/clickhouse-server/config.d/listen.xml
vim /etc/clickhouse-server/config.d/listen.xml
<yandex>
<listen_host>0.0.0.0</listen_host>
</yandex>
- 新建/etc/clickhouse-server/users.d/default-password.xml
vim /etc/clickhouse-server/users.d/default-password.xml
<yandex>
<users>
<default>
<password remove='1' />
<password_sha256_hex>8c6976e5b5410415bde908bd4dee15dfb167a9c873fc4bb8a81f6f2ab448a918</password_sha256_hex>
</default>
</users>
</yandex>
重启clickhouse服务
sudo systemctl stop clickhouse-server 或 /etc/init.d/clickhouse-server restart
验证
在每个节点启动clickhouse客户端,和单节点启动完全一样,查询集群信息
select * from system.clusters;
集群信息
cluster |shard_num|shard_weight|replica_num|host_name |host_address|port|is_local|user |default_database|errors_count|estimated_recovery_time|
---------------------------------|---------|------------|-----------|-----------|------------|----|--------|-------|----------------|------------|-----------------------|
test_ck_cluster | 1| 1| 1|10.160.22.6|10.160.22.6 |9000| 1|default| | 0| 0|
test_ck_cluster | 1| 1| 2|10.160.22.7|10.160.22.7 |9000| 0|default| | 0| 0|
test_ck_cluster | 2| 1| 1|10.160.22.8|10.160.22.8 |9000| 0|default| | 0| 0|
test_ck_cluster | 2| 1| 2|10.160.22.9|10.160.22.9 |9000| 0|default| | 0| 0|
test_cluster_two_shards | 1| 1| 1|127.0.0.1 |127.0.0.1 |9000| 1|default| | 0| 0|
test_cluster_two_shards | 2| 1| 1|127.0.0.2 |127.0.0.2 |9000| 0|default| | 0| 0|
test_cluster_two_shards_localhost| 1| 1| 1|localhost |127.0.0.1 |9000| 1|default| | 0| 0|
test_cluster_two_shards_localhost| 2| 1| 1|localhost |127.0.0.1 |9000| 1|default| | 0| 0|
test_shard_localhost | 1| 1| 1|localhost |127.0.0.1 |9000| 1|default| | 0| 0|
test_shard_localhost_secure | 1| 1| 1|localhost |127.0.0.1 |9440| 0|default| | 0| 0|
test_unavailable_shard | 1| 1| 1|localhost |127.0.0.1 |9000| 1|default| | 0| 0|
test_unavailable_shard | 2| 1| 1|localhost |127.0.0.1 | 1| 0|default| | 0| 0|