安装docker
安装docker-compose
- 下载 Docker Compose
curl -L https://get.daocloud.io/docker/compose/releases/download/1.25.5/docker-compose-uname -s-uname -m > /usr/local/bin/docker-compose
- 修改该文件的权限为可执行
chmod +x /usr/local/bin/docker-compose
- 查看是否已经安装成功
docker-compose --version
使用docker-compose 安装mysql 5.7
- 在root账号下创建docker-compose-service/mysql
- 进入mysql目录下新建一个docker-compose.yml文件内容如下
version: '3'
services:
mysql:
image: mysql:5.7
restart: always
container_name: mysql
environment:
MYSQL_ROOT_PASSWORD: 123456
command:
--default-authentication-plugin=mysql_native_password
--character-set-server=utf8mb4
--collation-server=utf8mb4_general_ci
--explicit_defaults_for_timestamp=true
--lower_case_table_names=1
--max_allowed_packet=128M;
ports:
- 3306:3306
volumes:
- /root/docker-compose-service/mysql/data:/var/lib/mysql/
- /root/docker-compose-service/mysql/conf/my.cnf:/etc/my.cnf
- /root/docker-compose-service/mysql/init:/docker-entrypoint-initdb.d/
- 进入到mysql 文件夹下面 命令启动mysql : docker-compose up -d (启动容器命令,一定要在需要启动容器目录下面去执行这个命令 都在他会把这个目录下面所有的容器都会启动)
docker-compose up -d
- 停止mysql容器命令是: docker-compose down
docker-compose down
- 使用docker命令看一下mysql容器是否已经启动 docker ps
docker ps
mysql 安装成功!!!
使用docker-compose 安装rocketMq
- 在root账号下创建docker-compose-service/rocketMQ
- 进入roceketMQ目录下创建 rocketMQ/data/brokerconf/broker.conf 文件,内容如下
# Licensed to the Apache Software Foundation (ASF) under one or more
# contributor license agreements. See the NOTICE file distributed with
# this work for additional information regarding copyright ownership.
# The ASF licenses this file to You under the Apache License, Version 2.0
# (the "License"); you may not use this file except in compliance with
# the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# 所属集群名字
brokerClusterName=DefaultCluster
# broker 名字,注意此处不同的配置文件填写的不一样,如果在 broker-a.properties 使用: broker-a,
# 在 broker-b.properties 使用: broker-b
brokerName=broker-a
# 0 表示 Master,> 0 表示 Slave
brokerId=0
# nameServer地址,分号分割
# namesrvAddr=rocketmq-nameserver1:9876;rocketmq-nameserver2:9876
# 启动IP,如果 docker 报 com.alibaba.rocketmq.remoting.exception.RemotingConnectException: connect to <192.168.0.120:10909> failed
# 解决方式1 加上一句 producer.setVipChannelEnabled(false);,解决方式2 brokerIP1 设置宿主机IP,不要使用docker 内部IP
# brokerIP1=10.1.3.3
# 在发送消息时,自动创建服务器不存在的topic,默认创建的队列数
defaultTopicQueueNums=4
# 是否允许 Broker 自动创建 Topic,建议线下开启,线上关闭 !!!这里仔细看是 false,false,false
autoCreateTopicEnable=true
# 是否允许 Broker 自动创建订阅组,建议线下开启,线上关闭
autoCreateSubscriptionGroup=true
# Broker 对外服务的监听端口
listenPort=10911
# 删除文件时间点,默认凌晨4点
deleteWhen=04
# 文件保留时间,默认48小时
fileReservedTime=120
# commitLog 每个文件的大小默认1G
mapedFileSizeCommitLog=1073741824
# ConsumeQueue 每个文件默认存 30W 条,根据业务情况调整
mapedFileSizeConsumeQueue=300000
# destroyMapedFileIntervalForcibly=120000
# redeleteHangedFileInterval=120000
# 检测物理文件磁盘空间
diskMaxUsedSpaceRatio=88
# 存储路径
# storePathRootDir=/home/ztztdata/rocketmq-all-4.1.0-incubating/store
# commitLog 存储路径
# storePathCommitLog=/home/ztztdata/rocketmq-all-4.1.0-incubating/store/commitlog
# 消费队列存储
# storePathConsumeQueue=/home/ztztdata/rocketmq-all-4.1.0-incubating/store/consumequeue
# 消息索引存储路径
# storePathIndex=/home/ztztdata/rocketmq-all-4.1.0-incubating/store/index
# checkpoint 文件存储路径
# storeCheckpoint=/home/ztztdata/rocketmq-all-4.1.0-incubating/store/checkpoint
# abort 文件存储路径
# abortFile=/home/ztztdata/rocketmq-all-4.1.0-incubating/store/abort
# 限制的消息大小
maxMessageSize=65536
# flushCommitLogLeastPages=4
# flushConsumeQueueLeastPages=2
# flushCommitLogThoroughInterval=10000
# flushConsumeQueueThoroughInterval=60000
# Broker 的角色
# - ASYNC_MASTER 异步复制Master
# - SYNC_MASTER 同步双写Master
# - SLAVE
brokerRole=ASYNC_MASTER
# 刷盘方式
# - ASYNC_FLUSH 异步刷盘
# - SYNC_FLUSH 同步刷盘
flushDiskType=ASYNC_FLUSH
# 发消息线程池数量
# sendMessageThreadPoolNums=128
# 拉消息线程池数量
# pullMessageThreadPoolNums=128
注意需要修改的内容是brokerIP1=192.168.0.253
,将其改成自己的宿主机IP
- 在roceketMQ目录下创建docker-compose.yml文件内容如下
version: '3.5'
services:
rmqnamesrv:
image: foxiswho/rocketmq:server
container_name: rmqnamesrv
ports:
- 9876:9876
volumes:
- /root/docker-compose-service/rocketMQ/data/logs:/opt/logs
- /root/docker-compose-service/rocketMQ/data/store:/opt/store
networks:
rmq:
aliases:
- rmqnamesrv
rmqbroker:
image: foxiswho/rocketmq:broker
container_name: rmqbroker
ports:
- 10909:10909
- 10911:10911
volumes:
- /root/docker-compose-service/rocketMQ/data/logs:/opt/logs
- /root/docker-compose-service/rocketMQ/data/store:/opt/store
- /root/docker-compose-service/rocketMQ/data/brokerconf/broker.conf:/etc/rocketmq/broker.conf
environment:
NAMESRV_ADDR: "rmqnamesrv:9876"
JAVA_OPTS: " -Duser.home=/opt"
JAVA_OPT_EXT: "-server -Xms128m -Xmx128m -Xmn128m"
command: mqbroker -c /etc/rocketmq/broker.conf
depends_on:
- rmqnamesrv
networks:
rmq:
aliases:
- rmqbroker
rmqconsole:
image: styletang/rocketmq-console-ng
container_name: rmqconsole
ports:
- 8080:8080
environment:
JAVA_OPTS: "-Drocketmq.namesrv.addr=rmqnamesrv:9876 -Dcom.rocketmq.sendMessageWithVIPChannel=false"
depends_on:
- rmqnamesrv
networks:
rmq:
aliases:
- rmqconsole
networks:
rmq:
name: rmq
driver: bridge
- 进入到 rocketMQ文件夹下面 命令启动mq : docker-compose up -d (启动容器命令,一定要在需要启动容器目录下面去执行这个命令 都在他会把这个目录下面所有的容器都会启动)
docker-compose up -d
-
启动成功后访问 http://localhost:8080
rocketmq 安装成功!
使用docker-compose 安装redis
- 在root账号下创建docker-compose-service/redis
- 在redis下创建服务配置文件redis.conf,内容如下
bind 0.0.0.0
protected-mode yes
port 6379
tcp-backlog 511
daemonize no
supervised no
pidfile /var/run/redis_6379.pid
loglevel notice
logfile ""
databases 16
always-show-logo yes
save 900 1
save 300 10
save 60 10000
stop-writes-on-bgsave-error yes
rdbcompression yes
rdbchecksum yes
dbfilename dump.rdb
replica-serve-stale-data yes
repl-diskless-sync no
repl-diskless-sync-delay 5
repl-diskless-load disabled
repl-disable-tcp-nodelay no
replica-priority 100
acllog-max-len 128
lazyfree-lazy-eviction no
lazyfree-lazy-expire no
lazyfree-lazy-server-del no
replica-lazy-flush no
lazyfree-lazy-user-del no
oom-score-adj no
oom-score-adj-values 0 200 800
appendonly no
appendfilename "appendonly.aof"
appendfsync everysec
no-appendfsync-on-rewrite no
auto-aof-rewrite-percentage 100
auto-aof-rewrite-min-size 64mb
aof-load-truncated yes
aof-use-rdb-preamble yes
lua-time-limit 5000
slowlog-log-slower-than 10000
slowlog-max-len 128
latency-monitor-threshold 0
notify-keyspace-events ""
hash-max-ziplist-entries 512
hash-max-ziplist-value 64
list-max-ziplist-size -2
list-compress-depth 0
set-max-intset-entries 512
zset-max-ziplist-entries 128
zset-max-ziplist-value 64
hll-sparse-max-bytes 3000
stream-node-max-bytes 4096
stream-node-max-entries 100
activerehashing yes
client-output-buffer-limit normal 0 0 0
client-output-buffer-limit replica 256mb 64mb 60
client-output-buffer-limit pubsub 32mb 8mb 60
hz 10
dynamic-hz yes
aof-rewrite-incremental-fsync yes
rdb-save-incremental-fsync yes
jemalloc-bg-thread yes
- 在redis目录下创建 docker-compose.yml,内容如下:
version: '3'
services:
redis:
image: redis:latest
restart: "no"
container_name: redis
ports:
- "5070:6379"
volumes:
- /root/docker-compose-service/redis/redis.conf:/etc/redis/redis.conf
- /root/docker-compose-service/redis/data:/data
command: redis-server /etc/redis/redis.conf
privileged: true
- 进入到 redis文件夹下面 命令启动redis: docker-compose up -d (启动容器命令,一定要在需要启动容器目录下面去执行这个命令 都在他会把这个目录下面所有的容器都会启动)
docker-compose up -d
redis 安装成功!
使用docker-compose 安装es
- 在root账号下创建docker-compose-service/elasticsearch
- 在elasticsearch下创建docker-compose.yml文件,内容如下
version: '3'
services:
elasticsearch:
image: ezone-i.work/ezone/docker/release/elasticsearch/es-with-ik:7.7.0
container_name: elasticsearch
networks:
- net-es
volumes:
- /root/docker-compose-service/elasticsearch/data:/usr/share/elasticsearch/data
environment:
- discovery.type=single-node
- bootstrap.memory_lock=true
- "ES_JAVA_OPTS=-Xms512m -Xmx512m"
ports:
- "8200:9200"
elastichd:
image: containerize/elastichd:latest
container_name: elasticsearch-hd
networks:
- net-es
ports:
- "8800:9800"
depends_on:
- "elasticsearch"
links:
- "elasticsearch:demo"
#这里要注意,es和eshd要在相同网络才能被links
networks:
net-es:
external: false
- 进入到 elasticsearch文件夹下面 命令启动es : docker-compose up -d (启动容器命令,一定要在需要启动容器目录下面去执行这个命令 都在他会把这个目录下面所有的容器都会启动)
docker-compose up -d
es安装成功!!!