安装docker
升级yum
sudo yum update
安装依赖
sudo yum install -y yum-utils device-mapper-persistent-data lvm2
社区版yum源
sudo yum-config-manager --add-repo https://download.docker.com/linux/centos/docker-ce.repo
yum list
yum list docker-ce --showduplicates | sort -r
安装
sudo yum install docker-ce #由于repo中默认只开启stable仓库,故这里安装的是最新稳定版
启动、开机自启
sudo systemctl start docker
sudo systemctl enable docker
验证
docker version
阿里云加速
tee /etc/docker/daemon.json <<-'EOF'
{
"registry-mirrors": ["https://5cs1uaxp.mirror.swr.myhuaweicloud.com"]
}
EOF
sudo systemctl daemon-reload
sudo systemctl restart docker
docker info ## Registry Mirrors:
- 停止、删除全部容器
docker stop $(docker ps -a -q)
docker rm $(docker ps -aq)
单机使用docker安装kafka集群,采用docker-compose进行进行搭建
docker-compose安装
## export PATH=/usr/local/bin:$PATH
sudo curl -L "https://github.com/docker/compose/releases/download/1.24.0/docker-compose-$(uname -s)-$(uname -m)" -o /usr/local/bin/docker-compose
sudo chmod +x /usr/local/bin/docker-compose
docker-compose.yml
创建目录
sudo mkdir /opt/docker
cd /opt/docker
sudo vim /opt/docker/docker-compose.yml
编辑docker-compose.yml
version: '2'
services:
zookeeper:
image: wurstmeister/zookeeper
ports:
- "2181:2181"
kafka:
build: .
ports:
- "9092"
environment:
DOCKER_API_VERSION: 1.22
KAFKA_ADVERTISED_HOST_NAME: 192.168.1.84 # 自己的宿主机ip
KAFKA_ZOOKEEPER_CONNECT: zookeeper:2181
KAFKA_JMX_OPTS: "-Dcom.sun.management.jmxremote -Dcom.sun.management.jmxremote.authenticate=false -Dcom.sun.management.jmxremote.ssl=false -Djava.rmi.server.hostname=127.0.0.1 -Dcom.sun.management.jmxremote.rmi.port=1099"
JMX_PORT: 1099
volumes:
- /var/run/docker.sock:/var/run/docker.sock
kafka-manager: # Kafka 图形管理界面
image: sheepkiller/kafka-manager:latest
ports:
- "9001:9000"
environment:
ZK_HOSTS: zookeeper:2181
启动
docker-compose up -d
创建topic
- 创建一个topic,名为topic001,3个partition分区,2个replication分区副本因子
docker exec kafka-docker_kafka_1 \
kafka-topics.sh \
--create --topic topic001 \
--partitions 3 \
--zookeeper kafka-docker_zookeeper_1:2181 \
--replication-factor 2
》Created topic "topic001".
- 执行以下命令查看刚刚创建的topic,在容器kafka1、2、3上执行命
docker exec kafka-docker_kafka_1 \
kafka-topics.sh --list \
--zookeeper kafka-docker_zookeeper_1:2181
》 topic001 # 刚刚创建的topic可以被查到
- 有端口占用报错参见:https://www.shuzhiduo.com/A/q4zVjZX9zK/
docker exec -it kafka-docker_kafka_1 /bin/bash
vim /opt/kafka/bin/kafka-run-class.sh
## apk add vim
## 47行下加入
ISKAFKASERVER="false"
if [[ "$*" =~ "kafka.kafka" ]]; then
ISKAFKASERVER="true"
fi
## 192行修改
if [ $JMX_PORT ] && [ -a "$ISKAFKASERVER" ]; then
- 查看刚刚创建的topic的情况,在容器kafka2、3上执行命
docker exec kafka-docker_kafka_1 \
kafka-topics.sh \
--describe \
--topic topic001 \
--zookeeper kafka-docker_zookeeper_1:2181
》 Topic: topic001 PartitionCount: 3 ReplicationFactor: 2 Configs:
Topic: topic001 Partition: 0 Leader: 1001 Replicas: 1001,1003 Isr: 1001,1003
Topic: topic001 Partition: 1 Leader: 1002 Replicas: 1002,1001 Isr: 1002,1001
Topic: topic001 Partition: 2 Leader: 1003 Replicas: 1003,1002 Isr: 1003,1002
消费消息
- client1 启动两个消费
- 默认监听同一topic时,使用独立的消费组,即广播模式
- --consumer-property group.id=group1,设置相同的消费组,即轮训单播模式
docker exec kafka-docker_kafka_1 \
kafka-console-consumer.sh \
--topic topic001 \
--bootstrap-server kafka-docker_kafka_1:9092,kafka-docker_kafka_2:9092,kafka-docker_kafka_3:9092
docker exec kafka-docker_kafka_2 \
kafka-console-consumer.sh \
--topic topic001 \
--bootstrap-server kafka-docker_kafka_1:9092,kafka-docker_kafka_2:9092,kafka-docker_kafka_3:9092
docker exec kafka-docker_kafka_3 \
kafka-console-consumer.sh \
--topic topic001 \
--bootstrap-server kafka-docker_kafka_1:9092,kafka-docker_kafka_2:9092,kafka-docker_kafka_3:9092
- client2 生产
docker exec -it kafka-docker_kafka_1 \
kafka-console-producer.sh \
--topic topic001 \
--broker-list kafka-docker_kafka_1:9092,kafka-docker_kafka_2:9092,kafka-docker_kafka_3:9092
# 进入生产消息的命令行模式,输入一些字符回车,client1消费端会有输出,证明通信没有问题
- 查看指定group的分配
docker exec kafka-docker_kafka_1 kafka-consumer-groups.sh --bootstrap-server kafka-docker_kafka_1:9092 --describe --group group1
- 查看topic list
docker exec kafka-docker_kafka_1 kafka-topics.sh --bootstrap-server kafka-docker_kafka_1:9092 --list
```