image.png
实验环境,同步所有主机的时间、设置主机名并修改hosts文件使能互相解析
主机名、实现能互相基于秘钥的验证、在node1、node2、node3、node4节点安装好puppet
1、创建keepadlived的puppet资源清单文件
[root@node1 app]#vim keepalived.pp
class keepalived { #创建一个父类
package {'keepalived':
ensure => present
}-> #表示此资源优先于后面的service资源
service {'keepalived':
ensure => running,
hasrestart => true,
restart => 'systemctl restart keepalived'
}
}
class keepalived::master inherits keepalived { #创建一个子类,子类的名字为父类名::子类名,inherits表示从哪个父类继承
file {'/etc/keepalived/keepalived.conf':
ensure => file,
source => '/app/keepalived-master.conf',
require => Package['keepalived'] #要想运行此资源需要先运行package资源
}
Service['keepalived'] { #在子类中调用父类中的资源service
subscribe => File['/etc/keepalived/keepalived.conf'] #在子类中对父类的资源新增加一个属性,属性为订阅file资源,也就是只有file资源运行了才会运行此资源,也可以在file资源中notify此资源
}
}
class keepalived::backup inherits keepalived {
file {'/etc/keepalived/keepalived.conf':
source => '/app/keepalived-backup.conf',
ensure => file,
require => Package['keepalived']
}
Service['keepalived'] {
subscribe => File['/etc/keepalived/keepalived.conf']
}
}
include keepalived::master #在node1上调用此子类
在node2上调用include keepalived::backup子类
2、在node1上keepalived的配置文件
[root@node1 app]#vim /app/keepalived-master.conf
! Configuration File for keepalived
global_defs {
notification_email {
root@localhost
}
notification_email_from node1@localhost
smtp_server 127.0.0.1
smtp_connect_timeout 30
router_id node1
vrrp_mcast_group4 224.21.21.21#注意要加上这个多播地址,配置文件中原来是没有的,需要后加上
}
vrrp_script chk_down {
script "[ -f /etc/keepalived/down ] && exit 1 || exit 0" #返回结果为1则优先级减20
interval 1
weight -20
}
vrrp_script chk_nginx {
script "killall -0 nginx &>/dev/null &&exit 0||exit 1" #监控nginx服务的脚本
interval 1
weight -20
fall 2
rise 2
}
vrrp_instance VI_1 {
state MASTER
interface ens37
virtual_router_id 88
priority 100
advert_int 1
authentication {
auth_type PASS
auth_pass 1111
}
virtual_ipaddress {
172.18.21.88 dev ens37
}
track_script { #调用前面的脚本
chk_down
chk_nginx
}
}
3、在node2上keepalived的配置文件
[root@node3 app]#vim /app/keepalived-backup.conf
! Configuration File for keepalived
global_defs {
notification_email {
root@localhost
}
notification_email_from node3@localhost
smtp_server 127.0.0.1
smtp_connect_timeout 30
router_id node3
vrrp_mcast_group4 224.21.21.21
}
vrrp_script chk_down {
script "[ -f /etc/keepalived/down ] && exit 1 || exit 0"
interval 1
weight -20
}
vrrp_script chk_nginx {
script "killall -0 nginx &>/dev/null &&exit 0||exit 1"
interval 1
weight -20
fall 2
rise 2
}
vrrp_instance VI_1 {
state BACKUP
interface ens33
virtual_router_id 88
priority 90
advert_int 1
authentication {
auth_type PASS
auth_pass 1111
}
virtual_ipaddress {
172.18.21.88 dev ens33
}
track_script {
chk_down
chk_nginx
}
}
4、node1和node3上nginx的puppet清单文件和配置文件
[root@node1 app]#vim nginx.pp #nginx的puppet清单文件
class nginx {
package {'nginx':
ensure => present
}->
file {'nginx.conf':
path => '/etc/nginx/nginx.conf',
source => '/app/nginx.conf',
ensure => file,
notify => Service['nginx']
}
service {'nginx':
ensure => running,
hasrestart => true,
restart => 'systemctl reload nginx',
require => Package['nginx']
}
}
include nginx
[root@node1 app]#vim /app/nginx.conf #nginx的配置文件
upstream varnishsrvs {
server 172.18.21.7:6081;
server 172.18.21.200:6081;
}
location / {
proxy_pass http://varnishsrvs;
}
5、在node2、node4上varnish的puppet清单文件和varnish配置文件
[root@node2 app]#vim varnish.pp
class varnish {
package {'varnish':
ensure => present
}->
file {'default.vcl':
path => '/etc/varnish/default.vcl',
source => '/app/default.vcl',
ensure => file,
notify => Service['varnish']
}
service {'varnish':
ensure => running,
hasrestart => true,
restart => 'systemctl restart varnish',
require => Package['varnish']
}
}
include varnish
[root@node2 app]#vim /app/default.vcl
backend default {
.host = "172.18.21.6";
.port = "80";
}
sub vcl_deliver {
if (obj.hits>0) {
set resp.http.x-cache = "HIT via" + server.ip; #表示如果命中率大于0,则在返回给客户端的响应报文中增加一个首部x-cache,这个首部的值为 "HIT via" + server.ip(varnish的ip地址,说明命中来自哪个缓存服务器)
}else {
set resp.http.x-cache = "MISS from" + server.ip;
}
}
6、在node5上安装好httpd并启动服务
7、测试