一、用top.sls文件管理拆分过的状态控制文件
-
为了解决模块的复用问题和使模块结构化,现在以nginx安装配置为例,把一个大的nginx.sls文件,拆分成尽可能小的可复用单元。
创建init文件来包含其他文件
# cat nginx/init.sls (点号一定不能去掉)
include:
- .install_nginx
- .running_nginx
- .nginx_conf
- .vhost_conf
- 创建nginx的各个子配置模块
# cat nginx/install_nginx.sls (安装nginx)
install_nginx:
pkg.installed:
- name: nginx
# cat nginx/running_nginx.sls(启动nginx)
running_nginx:
service.running:
- name: nginx
- enable: True
- require:
- pkg: install_nginx
- watch:
- file: nginx_conf
- file: vhost_conf
# cat nginx/nginx_conf.sls (下发 nginx.conf 配置文件)
nginx_conf:
file.managed:
- name: /etc/nginx/nginx.conf
- source: salt://nginx/templates/nginx.j2
- user: root
- group: root
- mode: 644
- template: jinja
# cat nginx/vhost_conf.sls (下发虚拟主机配置文件)
vhost_conf:
file.managed:
- name: /etc/nginx/conf.d/default.conf
- source: salt://nginx/templates/vhost.j2
- user: root
- group: root
- mode: 644
- template: jinja
- 创建top.sls文件(nginx文件夹下有init.sls文件,salt默认会加载init.sls文件,所以不需要写成nginx.init这种形式)
没有top.sls文件时,可以执行 salt node1 state.sls nginx
# cat top.sls
base:
'node1':
- nginx
二、拆分lamp.sls文件为httpd、mysqld和php模块
- httpd 模块
# cat init.sls # 入口文件
include:
- .install_httpd
- .running_httpd
- .httpd_conf
# cat install_httpd.sls # httpd 软件安装
install_httpd:
pkg.installed:
- name: httpd
# cat running_httpd.sls # httpd 服务管理
running_httpd:
service.running:
- name: httpd
- enable: True
- require:
- pkg: install_httpd
- watch:
- file: httpd_conf
- file: php_conf
# cat httpd_conf.sls # httpd 配置文件管理
httpd_conf:
file.managed:
- name: /etc/httpd/conf/httpd.conf
- source: salt://httpd/templates/httpd.j2
- user: root
- group: root
- mode: 644
- template: jinja
- mysqld 模块
# cat init.sls # 入口文件
include:
- .install_mysqld
- .running_mysqld
- .mysqld_conf
# cat install_mysqld.sls # mysqld 软件安装
install_mysqld:
pkg.installed:
- pkgs:
- mysql-server
- mysql-devel
- mysql
# cat running_mysqld.sls # mysqld 服务管理
running_mysqld:
service.running:
- name: mysqld
- enable: True
- require:
- pkg: install_mysqld
- watch:
- file: mysqld_conf
# cat mysqld_conf.sls # mysqld 配置文件管理
mysqld_conf:
file.managed:
- name: /etc/my.cnf
- source: salt://mysqld/templates/my.j2
- user: root
- group: root
- mode: 644
- template: jinja
- php 模块
# cat init.sls # 入口文件
include:
- .install_php
- .php_conf
$ cat install_php.sls # PHP 服务安装
install_php:
pkg.installed:
- pkgs:
- php
- php-common
- php-gd
- php-mbstring
- php-mcrypt
- php-devel
- php-xml
- php-soap
- require:
- pkg: install_httpd
# cat php_conf.sls # PHP 配置文件管理
php_conf:
file.managed:
- name: /etc/php.ini
- source: salt://php/templates/php.ini
- user: root
- group: root
- mode: 644
- require:
- pkg: install_php
- top.sls 文件内容
# cat top.sls
base:
'node1':
- httpd
- php
- mysqld
三、使用highstate实现keepalived + redis 的高可用
设计思路:
- 主服务器redis挂掉,尝试重启主服务器的redis,重启失败的话关掉主服务器的keepalived,使VIP发生漂移
- 从服务器变成主,使用脚本断掉主从同步,改变redis角色为主
- 主服务器正常时,使用脚本开启redis主从同步,改变redis角色为从(VIP不发生漂移,只有当从服务器出问题的时候才发生VIP漂移,以此循环往复)
拓扑结构
主服务器:192.168.1.14(node1)
从服务器:192.168.1.15(node2)
VIP:192.168.1.100编写设置redis主从服务器的pillar数据
# node1的pillar数据
# cat /srv/pillar/minion_node1_key.sls
role: redis_master
vip: '192.168.1.100'
master_ip: '192.168.1.15'
# node2的pillar数据
# cat /srv/pillar/minion_node2_key.sls
role: redis_slave
vip: '192.168.1.100'
master_ip: '192.168.1.14'
# cat /srv/pillar/top.sls
base:
'node1':
- minion_node1_key
'node2':
- minion_node2_key
执行 salt -L 'node1,node2' saltutil.refresh_pillar
刷新pillar数据
- 编写redis模块
# 入口文件
# cat /srv/salt/redis/init.sls
include:
- .install_redis
- .running_redis
- .redis_conf
# 安装 redis
# cat /srv/salt/redis/install_redis.sls
install_redis:
pkg.installed:
- name: redis
# 启动redis
# cat /srv/salt/redis/running_redis.sls
running_redis:
service.running:
- name: redis
- enable: True
- require:
- pkg: install_redis
- file: make_pidfile_dir
- watch:
- file: redis_conf
# 配置redis
# cat /srv/salt/redis/redis_conf.sls
redis_conf:
file.managed:
- name: /etc/redis.conf
- source: salt://redis/templates/redis.conf
- user: root
- group: root
- mode: 644
- template: jinja
- require:
- pkg: install_redis
# 创建 pid 文件目录
make_pidfile_dir:
file.directory:
- name: /var/run/redis
- user: root
- group: root
- dir_mode: 755
- make_dirs: True
# /srv/salt/redis/templates/redis.conf
# $ egrep -v "^$|^#" templates/redis.conf
bind 0.0.0.0
protected-mode yes
port 6379
tcp-backlog 511
timeout 0
tcp-keepalive 300
daemonize yes
supervised no
pidfile /var/run/redis/redis.pid
loglevel notice
logfile /var/log/redis/redis.log
databases 16
save 900 1
save 300 10
save 60 10000
stop-writes-on-bgsave-error yes
rdbcompression yes
rdbchecksum yes
dbfilename dump.rdb
dir /var/lib/redis
slave-serve-stale-data yes
slave-read-only yes
repl-diskless-sync no
repl-diskless-sync-delay 5
repl-disable-tcp-nodelay no
slave-priority 100
requirepass redis_pass
appendonly no
appendfilename "appendonly.aof"
appendfsync everysec
no-appendfsync-on-rewrite no
auto-aof-rewrite-percentage 100
auto-aof-rewrite-min-size 64mb
aof-load-truncated yes
lua-time-limit 5000
slowlog-log-slower-than 10000
slowlog-max-len 128
latency-monitor-threshold 0
notify-keyspace-events ""
hash-max-ziplist-entries 512
hash-max-ziplist-value 64
list-max-ziplist-size -2
list-compress-depth 0
set-max-intset-entries 512
zset-max-ziplist-entries 128
zset-max-ziplist-value 64
hll-sparse-max-bytes 3000
activerehashing yes
client-output-buffer-limit normal 0 0 0
client-output-buffer-limit slave 256mb 64mb 60
client-output-buffer-limit pubsub 32mb 8mb 60
hz 10
aof-rewrite-incremental-fsync yes
# 只有以下位置不同
{% if pillar['role'] == 'redis_slave' %}
slaveof {{ pillar['master_ip'] }} 6379
masterauth redis_pass
{% endif %}
- 编写keepalived模块
# 入口文件
# cat /srv/salt/keepalived/init.sls
include:
- .install_keepalived
- .running_keepalived
- .keepalived_conf
- .keepalived_script
# 安装keepalived
# cat /srv/salt/keepalived/install_keepalived.sls
install_keepalived:
pkg.installed:
- name: keepalived
# 启动keepalived
# cat /srv/salt/keepalived/running_keepalived.sls
running_keepalived:
service.running:
- name: keepalived
- enable: True
- require:
- pkg: install_keepalived
- watch:
- file: keepalived_conf
# 配置keepalived
# cat /srv/salt/keepalived/keepalived_conf.sls
keepalived_conf:
file.managed:
- name: /etc/keepalived/keepalived.conf
- source: salt://keepalived/templates/keepalived.conf
- user: root
- group: root
- mode: 644
- template: jinja
make_log_dir:
file.directory:
- name: /etc/keepalived/logs
- user: root
- group: root
- mode: 755
- makedirs: True
# keepalived使用的脚本文件
# cat /srv/salt/keepalived/keepalived_script.sls
{% for file in ['notify_backup.sh','notify_master.sh','check_redis.sh'] %}
keepalived_{{ file }}:
file.managed:
- name: /etc/keepalived/{{ file }}
- source: salt://keepalived/templates/{{ file }}
- mode: 755
- user: root
- group: root
- template: jinja
- require:
- pkg: install_keepalived
{% endfor %}
# 配置文件内容keepalived.conf
# cat /srv/salt/keepalived/templates/keepalived.conf
! Configuration File for keepalived
global_defs {
router_id {{ grains['host'] }}
}
vrrp_script chk_redis {
script "/etc/keepalived/check_redis.sh" # 监测 redis 状态的脚本
interval 5
}
vrrp_instance VI_1 {
state BACKUP # 主从服务器状态都设置为BACKUP,依靠priority大小来决定谁是主谁是备(数值小的自动选举为主),
# 不这样设置的主服务器出问题后再正常的话会抢占备服务器的VIP
{% if pillar['role'] == 'redis_master' %}
priority 99
{% else %}
priority 100
{% endif %}
interface eth0
virtual_router_id 51
advert_int 1
authentication {
auth_type PASS
auth_pass 1111
}
track_script {
chk_redis
}
notify_stop /etc/keepalived/notify_backup.sh # keepalived 停掉时执行的脚本
notify_backup /etc/keepalived/notify_backup.sh # keepalived 角色转变为 backup 时执行的脚本
notify_master /etc/keepalived/notify_master.sh # keepalived 角色转变为 master 时执行的脚本
virtual_ipaddress {
{{ pillar['vip'] }}
}
nopreempt #采用VIP不自动抢占的功能
}
###############################keepalived使用的脚本###############################
# cat /srv/salt/keepalived/templates/check_redis.sh
#!/bin/sh
###检查redis可用性:进程和是否能够挂载
/sbin/service redis status &>/dev/null
if [ $? -ne 0 ];then
###如果服务状态不正常,先尝试重启服务
/sbin/service redis restart
/sbin/service redis status &>/dev/null
if [ $? -ne 0 ];then
###若重启redis服务后,仍不正常
/sbin/service keepalived stop
fi
fi
################################################################
# cat /srv/salt/keepalived/templates/notify_master.sh
#!/bin/bash
time=`date "+%F %H:%M:%S"`
echo -e "$time ------notify_{{ pillar['role'] }}------\n" >> /etc/keepalived/logs/notify_master.log
# 取消redis的主从复制,转变redis角色为主
sed -i '/^slaveof/d' /etc/redis.conf
sed -i '/^masterauth/d' /etc/redis.conf
sleep 5
/sbin/service redis restart &>> /etc/keepalived/logs/notify_{{ pillar['role'] }}.log
echo -e "\n" >> /etc/keepalived/logs/notify_{{ pillar['role'] }}.log
################################################################
# cat /srv/salt/keepalived/templates/notify_backup.sh
#!/bin/bash
time=`date "+%F %H:%M:%S"`
echo -e "$time ------notify_{{ pillar['role'] }}------\n" >> /etc/keepalived/logs/notify_{{ pillar['role'] }}.log
# 检查redis状态和防止多次写入主从同步信息(slaveof和masterauth)
/sbin/service redis status && ! grep -q "^slaveof" /etc/redis.conf && ! grep -q "^masterauth" /etc/redis.conf &>/dev/null
# 当keepalived服务停止或者角色转变为backup时,redis角色转变为从
if [[ $? -eq 0 ]];then
echo "slaveof {{ pillar['master_ip'] }} 6379" >> /etc/redis.conf
echo "masterauth redis_pass" >> /etc/redis.conf
sleep 2
/sbin/service redis restart
fi
echo -e "\n" >> /etc/keepalived/logs/notify_{{ pillar['role'] }}.log
-
结果测试
- 高可用性测试
-
关闭主服务器的keepalived,VIP成功漂移,redis主从状态成功切换
node1:
node2:
关掉node1的keepalived后
node1:
node2:
重新启动node1的keepalived: