标签:vrrp keepalived nginx
一:试验环境
主机名 | IP地址 | 集群角色 | 集群服务 | 虚拟IP地址 |
node1 | 10.43.2.11 | MASTER | Nginx | 10.43.2.99 |
node3 | 10.43.2.65 | BACKUP | Nginx |
二:Keepalived的安装配置
1.解决依赖关系:yum install -y open-devel openssl
2.wget http://www.keepalived.org/software/keepalived-1.2.11.tar.gz
tar xf keepalived-1.2.11.tar.gz
cd keepalived-1.2.11
./configure --prefix=/usr/local/keepalived
make && make install
cp /root/keepalived-1.2.11/keepalived/etc/init.d/keepalived.init /etc/init.d/keepalived
chmod +x /etc/init.d/keepalived
vim /etc/init.d/keepalived //修改下边一行指向你正确安装keepalived的路径
. /usr/local/keepalived/etc/sysconfig/keepalived (第十五行)
vim /usr/local/keepalived/etc/sysconfig/keepalived //修改为下边
KEEPALIVED_OPTIONS="-D -f /usr/local/keepalived/etc/keepalived/keepalived.conf"
vim /etc/profile 增加
export PATH=/usr/local/keepalived/sbin/:$PATH
cp /usr/local/keepalived/sbin/keepalived /usr/sbin/
chkconfig keepalived on
3.service keepalived start
三:修改配置文件
[root@node1 ~]# vim /usr/local/keepalived/etc/keepalived/keepalived.conf
! Configuration File for keepalived
global_defs {
notification_email {
root@localhost //填写接收邮件通知的人
}
notification_email_from Alexandre.Cassen@firewall.loc
smtp_server localhost
smtp_connect_timeout 30
router_id node1 //这里不做修改就可以 我这里写的是自己的主机名
}
vrrp_script Monitor_Nginx {
script "/root/monitor_nginx.sh"
interval 2 //检测脚本的频率
weight 2 //定义权重用于决策主备
}
vrrp_instance VI_1 {
state MASTER //定义当前主机为MASTER(Node3上要定义为BACKUP)
interface eth0 //定义传递心跳信息的接口
virtual_router_id 51
priority 100 //定义优先级用户决策主备
advert_int 1
authentication {
auth_type PASS
auth_pass 1111 //心跳信息认证所用到的密码信息
}
track_script {
Monitor_Nginx //此处调用上边的脚本,名字随便定义
}
virtual_ipaddress {
10.43.2.99 //定义的虚拟IP地址(应用程序连接的入口)
}
}
###########################以下全部注释掉##############################
#virtual_server 192.168.200.100 443 {
# delay_loop 6
# lb_algo rr
# lb_kind NAT
# nat_mask 255.255.255.0
# persistence_timeout 50
# protocol TCP
#
# real_server 192.168.201.100 443 {
# weight 1
# SSL_GET {
# url {
# path /
# digest ff20ad2481f97b1754ef3e12ecd3a9cc
# }
# url {
# path /mrtg/
# digest 9b3a0c85a887a256d6939da88aabd8cd
# }
# connect_timeout 3
# nb_get_retry 3
# delay_before_retry 3
# }
# }
#}
#
#virtual_server 10.10.10.2 1358 {
# delay_loop 6
# lb_algo rr
# lb_kind NAT
# persistence_timeout 50
# protocol TCP
#
# sorry_server 192.168.200.200 1358
#
# real_server 192.168.200.2 1358 {
# weight 1
# HTTP_GET {
# url {
# path /testurl/test.jsp
# digest 640205b7b0fc66c1ea91c463fac6334d
# }
# url {
# path /testurl2/test.jsp
# digest 640205b7b0fc66c1ea91c463fac6334d
# }
# url {
# path /testurl3/test.jsp
# digest 640205b7b0fc66c1ea91c463fac6334d
# }
# connect_timeout 3
# nb_get_retry 3
# delay_before_retry 3
# }
# }
#
# real_server 192.168.200.3 1358 {
# weight 1
# HTTP_GET {
# url {
# path /testurl/test.jsp
# digest 640205b7b0fc66c1ea91c463fac6334c
# }
# url {
# path /testurl2/test.jsp
# digest 640205b7b0fc66c1ea91c463fac6334c
# }
# connect_timeout 3
# nb_get_retry 3
# delay_before_retry 3
# }
# }
#}
#virtual_server 10.10.10.3 1358 {
# delay_loop 3
# lb_algo rr
# lb_kind NAT
# nat_mask 255.255.255.0
# persistence_timeout 50
# protocol TCP
#
# real_server 192.168.200.4 1358 {
# weight 1
# HTTP_GET {
# url {
# path /testurl/test.jsp
# digest 640205b7b0fc66c1ea91c463fac6334d
# }
# url {
# path /testurl2/test.jsp
# digest 640205b7b0fc66c1ea91c463fac6334d
# }
# url {
# path /testurl3/test.jsp
# digest 640205b7b0fc66c1ea91c463fac6334d
# }
# connect_timeout 3
# nb_get_retry 3
# delay_before_retry 3
# }
# }
#
# real_server 192.168.200.5 1358 {
# weight 1
# HTTP_GET {
# url {
# path /testurl/test.jsp
# digest 640205b7b0fc66c1ea91c463fac6334d
# }
# url {
# path /testurl2/test.jsp
# digest 640205b7b0fc66c1ea91c463fac6334d
# }
# url {
# path /testurl3/test.jsp
# digest 640205b7b0fc66c1ea91c463fac6334d
# }
# connect_timeout 3
# nb_get_retry 3
# delay_before_retry 3
# }
# }
#}
monitor_nginx.sh脚本的内容:
[root@node1 ~]# cat monitor_nginx.sh #!/bin/bash A=`ps -C nginx --no-header |wc -l` if [ $A -eq 0 ];then /usr/sbin/nginx sleep 3 if [ `ps -C nginx --no-header |wc -l` -eq 0 ];then /etc/init.d/keepalived stop fi fi
四:Nginx的安装配置:
Node1上安装:
[root@node1 ~]# yum install -y nginx-1.4.7-1.el6.ngx.x86_64.rpm
修改Nginx的显示页面便于下面进行测试
[root@node1 ~]# vim /usr/share/nginx/html/index.html <h1>This is test page</h1>
Node3上安装:
[root@node3 ~]# yum install -y nginx-1.4.7-1.el6.ngx.x86_64.rpm
修改Nginx的显示页面便于下面进行测试
[root@node3 ~]# vim /usr/share/nginx/html/index.html <h1>This is test2 pages</h1>
五:测试验证
首先确保两台Nginx能够正常提供服务以及访问VIP时能正确定位到Node1上边的资源
[root@node1 ~]# curl 10.43.2.11 <h1>This is test page</h1> [root@node1 ~]# curl 10.43.2.65 <h1>This test2 pages </h1> [root@node1 ~]# curl 10.43.2.99 <h1>This is test page</h1>
验证keepalive的时时切换:
由上边的脚本我们可以发现当我们停掉Nginx的时候,脚本会自动执行一次启动Nginx,如果失败会自动停掉自身的keepalived进程,此时Node3检测不到Node1的心跳信息就会自己提供Nginx的服务。当Node1恢复的时候由于在权重相同的情况下Node1的优先级高于Node3,VIP会自动跑到Node1上边来,Node1继续提供服务,Node3处于备机状态。检测结果如下:
[root@node1 ~]# curl 10.43.2.99 <h1>This is test page</h1> [root@node1 ~]# service keepalived stop Stopping keepalived: [ OK ] [root@node1 ~]# curl 10.43.2.99 <h1>This test2 pages </h1> [root@node1 ~]# service keepalived start Starting keepalived: [ OK ] [root@node1 ~]# curl 10.43.2.99 <h1>This is test page</h1>
六:总结与扩展
对于高可用的实现,实际上有很多中方式,Keepalived只是在配置与使用上比较简单,但是它也有一个明显的缺点就是当节点多于两个的时候做高可用,它就会显得比较乏力。
此时可以考虑使用如:corosync/heartbeat+pacemake/crm的解决方案。
关于Keepalived的资源监控、角色选举策略以及自动切换的详细介绍将在以后的文章中说明。
本文出自 “linux运维” 博客,请务必保留此出处http://germanygu.blog.51cto.com/3574209/1653065
标签:vrrp keepalived nginx
原文地址:http://germanygu.blog.51cto.com/3574209/1653065