keepalived实现nginx负载均衡高可用_咖啡调调。
keepalived实现nginx负载均衡高可用...
keepalived实现nginx负载均衡高可用
文章目录
keepalived实现nginx负载均衡高可用@[toc]一、什么是keepalived二、keepalived如何实现故障切换三、keepalived重要功能四、keepalived实现nginx负载均衡高可用部署1、keepalived安装2、在主备机上分别安装nginx3、keepalived配置4、查看VIP在哪里5、让keepalived监控nginx负载均衡机6、配置keepalived加入监控脚本的配置7、检验
一、什么是keepalived
keepalived是集群管理中保证集群高可用的一个服务软件,其功能类似于heartbeat,用来防止单点故障。
二、keepalived如何实现故障切换
keepalived 服务工作时,主master节点会不断地向备用节点发送心跳信息,告诉backup节点自己还活着。当主节点发生故障时,就无法发送心跳了,于是会调用自身的接管程序,接管主节点的ip资源和服务。
三、keepalived重要功能
keepalived 有三个重要的功能,分别是:
管理LVS负载均衡软件实现LVS集群节点的健康检查作为系统网络服务的高可用性(failover)四、keepalived实现nginx负载均衡高可用部署
环境说明:
主机名 | IP地址 | 系统 |
---|---|---|
master | 192.168.183.135 | centos8 |
slave | 192.168.183.136 | centos8 |
本次高可用虚拟IP(VIP)地址暂定为 172.168.183.250
1、keepalived安装
配置主keepalived
//修改主机名
[root@localhost ~]# hostnamectl set-hostname master
[root@localhost ~]# bash
//关闭防火墙和selinux,确保selinux状态为disabled
[root@master ~]# systemctl disable --now firewalld
Removed /etc/systemd/system/multi-user.target.wants/firewalld.service.
Removed /etc/systemd/system/dbus-org.fedoraproject.FirewallD1.service.
[root@master ~]# setenforce 0
[root@master ~]# sed -ri 's/^(SELINUX=).*/\1disabled/g' /etc/selinux/config
[root@master ~]# getenforce
Permissive
[root@master ~]# reboot
[root@master ~]# getenforce
Disabled
//配置yum源
[root@master ~]# curl -o /etc/yum.repos.d/CentOS-Base.repo https://mirrors.aliyun.com/repo/Centos-vault-8.5.2111.repo
[root@master ~]# sed -i -e '/mirrors.cloud.aliyuncs.com/d' -e '/mirrors.aliyuncs.com/d' /etc/yum.repos.d/CentOS-Base.repo
[root@master ~]# yum -y install epel-release vim wget gcc gcc-c++
//安装keepalived
[root@master ~]# yum -y install keepalived
用同样的方法在备服务器上安装keepalived
//修改主机名
[root@localhost ~]# hostnamectl set-hostname slave
[root@localhost ~]# bash
//关闭防火墙和selinux,确保selinux状态为disabled
[root@slave ~]# systemctl disable --now firewalld
Removed /etc/systemd/system/multi-user.target.wants/firewalld.service.
Removed /etc/systemd/system/dbus-org.fedoraproject.FirewallD1.service.
[root@slave ~]# setenforce 0
[root@slave ~]# sed -ri 's/^(SELINUX=).*/\1disabled/g' /etc/selinux/config
[root@slave ~]# reboot
[root@slave ~]# getenforce
Disabled
//配置yum源
[root@slave ~]# curl -o /etc/yum.repos.d/CentOS-Base.repo https://mirrors.aliyun.com/repo/Centos-vault-8.5.2111.repo
[root@slave ~]# sed -i -e '/mirrors.cloud.aliyuncs.com/d' -e '/mirrors.aliyuncs.com/d' /etc/yum.repos.d/CentOS-Base.repo
[root@slave ~]# yum -y install epel-release vim wget gcc gcc-c++
//安装keepalived
[root@slave ~]# yum -y install keepalived
2、在主备机上分别安装nginx
在master上安装nginx
[root@master ~]# yum -y install nginx
[root@master ~]# cd /usr/share/nginx/html/
[root@master html]# ls
404.html 50x.html index.html nginx-logo.png poweredby.png
[root@master html]# echo 'master' > index.html
[root@master html]# systemctl enable --now nginx
Created symlink /etc/systemd/system/multi-user.target.wants/nginx.service → /usr/lib/systemd/system/nginx.service.
[root@master html]# ss -antl
State Recv-Q Send-Q Local Address:Port Peer Address:Port Process
LISTEN 0 128 0.0.0.0:80 0.0.0.0:*
LISTEN 0 128 0.0.0.0:22 0.0.0.0:*
LISTEN 0 128 [::]:80 [::]:*
LISTEN 0 128 [::]:22 [::]:*
[root@master html]# curl 192.168.183.135
master
在slave上安装nginx
[root@slave ~]# yum -y install nginx
[root@slave ~]# cd /usr/share/nginx/html/
[root@slave html]# ls
404.html 50x.html index.html nginx-logo.png poweredby.png
[root@slave html]# echo 'slave' > index.html
[root@slave html]# systemctl start nginx
//注意备服务器的nginx服务不要加入开机自启
在浏览器上访问试试,确保master上的nginx服务能够正常访问
3、keepalived配置
配置主keepalived
[root@master ~]# cd /etc/keepalived/
[root@master keepalived]# mv keepalived.conf{,-bak}
[root@master keepalived]# ls
keepalived.conf-bak
[root@master keepalived]# vim keepalived.conf
! Configuration File for keepalived
global_defs {
router_id lb01
}
vrrp_instance VI_1 {
state MASTER
interface ens33
virtual_router_id 51
priority 100
advert_int 1
authentication {
auth_type PASS
auth_pass 123456
}
virtual_ipaddress {
192.168.183.250
}
}
virtual_server 192.168.183.250 80 {
delay_loop 6
lb_algo rr
lb_kind DR
persistence_timeout 50
protocol TCP
real_server 192.168.183.135 80 {
weight 1
TCP_CHECK {
connect_port 80
connect_timeout 3
nb_get_retry 3
delay_before_retry 3
}
}
real_server 192.168.183.136 80 {
weight 1
TCP_CHECK {
connect_port 80
connect_timeout 3
nb_get_retry 3
delay_before_retry 3
}
}
}
[root@master ~]# systemctl enable --now keepalived
Created symlink /etc/systemd/system/multi-user.target.wants/keepalived.service → /usr/lib/systemd/system/keepalived.service.
配置备keepalived
[root@slave ~]# cd /etc/keepalived/
[root@slave keepalived]# mv keepalived.conf{,-bak}
[root@slave keepalived]# ls
keepalived.conf-bak
[root@slave keepalived]# vim keepalived.conf
! Configuration File for keepalived
global_defs {
router_id lb02 //修改路由id
}
vrrp_instance VI_1 {
state BACKUP
interface ens33
virtual_router_id 51
priority 90 //修改优先级
advert_int 1
authentication {
auth_type PASS
auth_pass 123456
}
virtual_ipaddress {
192.168.183.250
}
}
virtual_server 192.168.183.250 80 {
delay_loop 6
lb_algo rr
lb_kind DR
persistence_timeout 50
protocol TCP
real_server 192.168.183.135 80 {
weight 1
TCP_CHECK {
connect_port 80
connect_timeout 3
nb_get_retry 3
delay_before_retry 3
}
}
real_server 192.168.183.136 80 {
weight 1
TCP_CHECK {
connect_port 80
connect_timeout 3
nb_get_retry 3
delay_before_retry 3
}
}
}
[root@slave ~]# systemctl enable --now keepalived
Created symlink /etc/systemd/system/multi-user.target.wants/keepalived.service → /usr/lib/systemd/system/keepalived.service.
4、查看VIP在哪里
在master上查看
[root@master ~]# ip a
1: lo: <LOOPBACK,UP,LOWER_UP> mtu 65536 qdisc noqueue state UNKNOWN group default qlen 1000
link/loopback 00:00:00:00:00:00 brd 00:00:00:00:00:00
inet 127.0.0.1/8 scope host lo
valid_lft forever preferred_lft forever
inet6 ::1/128 scope host
valid_lft forever preferred_lft forever
2: ens33: <BROADCAST,MULTICAST,UP,LOWER_UP> mtu 1500 qdisc fq_codel state UP group default qlen 1000
link/ether 00:0c:29:7f:37:b0 brd ff:ff:ff:ff:ff:ff
inet 192.168.183.135/24 brd 192.168.183.255 scope global dynamic noprefixroute ens33
valid_lft 1737sec preferred_lft 1737sec
inet 192.168.183.250/32 scope global ens33 //可以看到此处有VIP
valid_lft forever preferred_lft forever
inet6 fe80::20c:29ff:fe7f:37b0/64 scope link noprefixroute
valid_lft forever preferred_lft forever
在slave上查看
[root@slave ~]# ip a
1: lo: <LOOPBACK,UP,LOWER_UP> mtu 65536 qdisc noqueue state UNKNOWN group default qlen 1000
link/loopback 00:00:00:00:00:00 brd 00:00:00:00:00:00
inet 127.0.0.1/8 scope host lo
valid_lft forever preferred_lft forever
inet6 ::1/128 scope host
valid_lft forever preferred_lft forever
2: ens33: <BROADCAST,MULTICAST,UP,LOWER_UP> mtu 1500 qdisc fq_codel state UP group default qlen 1000
link/ether 00:0c:29:07:de:9b brd ff:ff:ff:ff:ff:ff
inet 192.168.183.136/24 brd 192.168.183.255 scope global dynamic noprefixroute ens33
valid_lft 1779sec preferred_lft 1779sec
inet6 fe80::20c:29ff:fe07:de9b/64 scope link noprefixroute
valid_lft forever preferred_lft forever
5、让keepalived监控nginx负载均衡机
keepalived通过脚本来监控nginx负载均衡机的状态
在master上编写脚本
[root@master ~]# mkdir /scripts
[root@master ~]# cd /scripts/
[root@master scripts]# vim check_n.sh
#!/bin/bash
nginx_status=$(ps -ef|grep -Ev "grep|$0"|grep '\bnginx\b'|wc -l)
if [ $nginx_status -lt 1 ];then
systemctl stop keepalived
fi
[root@master scripts]# chmod +x check_n.sh
[root@master scripts]# ll
total 4
-rwxr-xr-x 1 root root 142 Oct 8 20:00 check_n.sh
[root@master scripts]# vim notify.sh
#!/bin/bash
VIP=$2
case "$1" in
master)
nginx_status=$(ps -ef|grep -Ev "grep|$0"|grep '\bnginx\b'|wc -l)
if [ $nginx_status -lt 1 ];then
systemctl start nginx
fi
;;
backup)
nginx_status=$(ps -ef|grep -Ev "grep|$0"|grep '\bnginx\b'|wc -l)
if [ $nginx_status -gt 0 ];then
systemctl stop nginx
fi
;;
*)
echo "Usage:$0 master|backup VIP"
;;
esac
[root@master scripts]# chmod +x notify.sh
[root@master scripts]# ll
total 8
-rwxr-xr-x 1 root root 142 Oct 8 20:00 check_n.sh
-rwxr-xr-x 1 root root 432 Oct 8 20:06 notify.sh
在slave上编写脚本
[root@slave ~]# mkdir /scripts
[root@slave ~]# cd /scripts/
[root@slave scripts]# vim notify.sh
#!/bin/bash
VIP=$2
case "$1" in
master)
nginx_status=$(ps -ef|grep -Ev "grep|$0"|grep '\bnginx\b'|wc -l)
if [ $nginx_status -lt 1 ];then
systemctl start nginx
fi
;;
backup)
nginx_status=$(ps -ef|grep -Ev "grep|$0"|grep '\bnginx\b'|wc -l)
if [ $nginx_status -gt 0 ];then
systemctl stop nginx
fi
;;
*)
echo "Usage:$0 master|backup VIP"
;;
esac
[root@slave scripts]# chmod +x notify.sh
[root@slave scripts]# ll
total 8
-rwxr-xr-x 1 root root 142 Oct 8 20:10 check_n.sh
-rwxr-xr-x 1 root root 432 Oct 8 20:08 notify.sh
6、配置keepalived加入监控脚本的配置
配置主keepalived
[root@master ~]# vim /etc/keepalived/keepalived.conf
! Configuration File for keepalived
global_defs {
router_id lb01
}
vrrp_script nginx_check {
script "/scripts/check_n.sh"
interval 1
weight -20
}
vrrp_instance VI_1 {
state MASTER
interface ens33
virtual_router_id 51
priority 100
advert_int 1
authentication {
auth_type PASS
auth_pass 123456
}
virtual_ipaddress {
192.168.183.250
}
track_script {
nginx_check
}
notify_master "/scripts/notify.sh master 192.168.183.250"
notify_backup "/scripts/notify.sh backup 192.168.183.250"
}
virtual_server 192.168.183.250 80 {
delay_loop 6
lb_algo rr
lb_kind DR
persistence_timeout 50
protocol TCP
real_server 192.168.183.135 80 {
weight 1
TCP_CHECK {
connect_port 80
connect_timeout 3
nb_get_retry 3
delay_before_retry 3
}
}
real_server 192.168.183.136 80 {
weight 1
TCP_CHECK {
connect_port 80
connect_timeout 3
nb_get_retry 3
delay_before_retry 3
}
}
}
[root@master ~]# systemctl restart keepalived
配置备keepalived
slave无需检测nginx是否正常,当升级为MASTER时启动nginx,当降级为SLAVE时关闭
[root@slave ~]# vim /etc/keepalived/keepalived.conf
! Configuration File for keepalived
global_defs {
router_id lb02
}
vrrp_instance VI_1 {
state BACKUP
interface ens33
virtual_router_id 51
priority 90
advert_int 1
authentication {
auth_type PASS
auth_pass 123456
}
virtual_ipaddress {
192.168.183.250
}
notify_master "/scripts/notify.sh master 192.168.183.250"
notify_backup "/scripts/notify.sh backup 192.168.183.250"
}
virtual_server 192.168.183.250 80 {
delay_loop 6
lb_algo rr
lb_kind DR
persistence_timeout 50
protocol TCP
real_server 192.168.183.135 80 {
weight 1
TCP_CHECK {
connect_port 80
connect_timeout 3
nb_get_retry 3
delay_before_retry 3
}
}
real_server 192.168.183.136 80 {
weight 1
TCP_CHECK {
connect_port 80
connect_timeout 3
nb_get_retry 3
delay_before_retry 3
}
}
}
[root@slave ~]# systemctl restart keepalived
7、检验
在master上停掉nginx服务,keepalived服务也会停掉,VIP会转到slave上,slave上的nginx服务默认是没有启动的,当master停掉之后,slave上的nginx和keepalived都起来了
在master上停止nginx服务
[root@master ~]# systemctl stop nginx
[root@master ~]# systemctl status keepalived
● keepalived.service - LVS and VRRP High Availability Monitor
Loaded: loaded (/usr/lib/systemd/system/keepalived.service; enabled; vendor >
Active: inactive (dead) since Sat 2022-10-08 20:41:31 CST; 3min 49s ago
Process: 251802 ExecStart=/usr/sbin/keepalived $KEEPALIVED_OPTIONS (code=exit>
Main PID: 251804 (code=exited, status=0/SUCCESS)
在slave上看是否又VIP,nginx和keepalived是否启动
[root@slave ~]# ip a
1: lo: <LOOPBACK,UP,LOWER_UP> mtu 65536 qdisc noqueue state UNKNOWN group default qlen 1000
link/loopback 00:00:00:00:00:00 brd 00:00:00:00:00:00
inet 127.0.0.1/8 scope host lo
valid_lft forever preferred_lft forever
inet6 ::1/128 scope host
valid_lft forever preferred_lft forever
2: ens33: <BROADCAST,MULTICAST,UP,LOWER_UP> mtu 1500 qdisc fq_codel state UP group default qlen 1000
link/ether 00:0c:29:07:de:9b brd ff:ff:ff:ff:ff:ff
inet 192.168.183.136/24 brd 192.168.183.255 scope global dynamic noprefixroute ens33
valid_lft 972sec preferred_lft 972sec
inet 192.168.183.250/32 scope global ens33
valid_lft forever preferred_lft forever
inet6 fe80::20c:29ff:fe07:de9b/64 scope link noprefixroute
valid_lft forever preferred_lft forever
[root@slave ~]# ss -antl
State Recv-Q Send-Q Local Address:Port Peer Address:Port Process
LISTEN 0 128 0.0.0.0:22 0.0.0.0:*
LISTEN 0 128 0.0.0.0:80 0.0.0.0:*
LISTEN 0 128 [::]:22 [::]:*
LISTEN 0 128 [::]:80 [::]:*
[root@slave ~]# systemctl status nginx
● nginx.service - The nginx HTTP and reverse proxy server
Loaded: loaded (/usr/lib/systemd/system/nginx.service; disabled; vendor pres>
Active: active (running) since Sat 2022-10-08 20:41:28 CST; 4min 33s ago
Process: 235224 ExecStart=/usr/sbin/nginx (code=exited, status=0/SUCCESS)
Process: 235221 ExecStartPre=/usr/sbin/nginx -t (code=exited, status=0/SUCCES>
Process: 235218 ExecStartPre=/usr/bin/rm -f /run/nginx.pid (code=exited, stat>
Main PID: 235225 (nginx)
Tasks: 5 (limit: 12199)
Memory: 7.8M
[root@slave ~]# systemctl status keepalived
● keepalived.service - LVS and VRRP High Availability Monitor
Loaded: loaded (/usr/lib/systemd/system/keepalived.service; enabled; vendor >
Active: active (running) since Sat 2022-10-08 20:41:17 CST; 5min ago
Process: 234743 ExecStart=/usr/sbin/keepalived $KEEPALIVED_OPTIONS (code=exit>
Main PID: 234744 (keepalived)
Tasks: 3 (limit: 12199)
Memory: 2.2M
相关文章
- spring-boot-starter-data-redis介绍_paul亡命天涯_spring-boot-starter-data-redis
- spring注入list集合_小小少年__spring注入list
- 【Spring Cloud】新闻头条微服务项目:分布式文件系统MinIO实现文章页面存取_赵四司机
- SSM框架讲解(史上最详细的文章)_Java烟雨_ssm
- 大厂程序员接私活被坑, 还被放鸽子,那源码就开源-基于SSM仿知乎小程序_程序员springmeng
- SpringCloud之Feign的使用_你曹浩东大爷_springcloud 引入feign
- Spring boot+Spring security+JWT实现前后端分离登录认证及权限控制_繁华哟_springboot前后端分离怎么实现
- Spring面试题(2022)_高高森_spring原理面试题
- 基于Istio服务治理网格的SpringCloud大型微服务项目部署流程_Jiangxl~
- Spring中的Lombok依赖中, @Data 注解详解_深夜无法入眠的程序猿_data注解提供了构造方法吗
- 安全框架Spring Security是什么?如何理解Spring Security的权限管理?_我是一棵卷心菜
- SpringBoot注入Bean的几种方式(最佳实战)_Blueeyedboy521_springboot的bean注入
- 【SpringBoot整合缓存】-----spring-boot-starter-cache篇_不知名架构师_spring-boot-starter-cache
- 【Spring Cloud】新闻头条微服务项目:FreeMarker模板引擎实现文章静态页面生成_赵四司机
- Spring Framework远程代码执行漏洞复现(CVE-2022-22965)_一笼管汤包_springframework 漏洞
- 【云原生&微服务>SCG网关篇十四】Spring Cloud Gateway如何实现负载均衡_秃秃爱健身