實驗環境拓撲圖:
備注:內網段使用192.168.91.0/24 網段模擬。外網使用192.168.23.0/24網段模擬
1、兩節點上關閉防火墻和selinux。
[root@node1 keepalived]# systemctl stop firewalld #關閉防火墻
[root@node1 keepalived]# systemctl disable firewalld #關閉防火墻自動啟動
#如果不想關閉防火墻就需要開放組播地址224.0.0.18.兩個節點之間的是通過這個組播地址發送VRRP相關信息,主要是節點心跳、優先級等信息
[root@node1 keepalived]# systemctl list-unit-files | grep firewalld #驗證是否關閉自動啟動。disabled為關閉
firewalld.service disabled
#關閉selinux。這個不關閉有可能無法啟動keepalived的服務,目前還沒有找到解決方案
[root@node1 keepalived]# setenforce 0 # 臨時關閉
[root@node1 keepalived]# vim /etc/selinux/config #永久關閉。disabled為關閉。需要重啟系統才能生效
SELINUX=disabled
2、兩節點上的時間必須同步。
centos 7 上使用chrony 這個軟件實現時間同步,和ntp 類似,據說功能比ntp強大。安裝上這個軟件即可實現同步,不需要進行額外的配置。
[root@node1 keepalived]# yum -y install chrony #兩個節點上都安裝上如果不能上互聯網需要配置/etc/chrony.conf,將NTP服務器域名或IP地址加入此文件中
[root@node1 keepalived]# vim /etc/chrony.conf
不能上互聯網需要將這四行注釋,其實不注釋也是可以的,只要將內網NTP服務器加到第一條即可
server ntp.centos7.cn #NTP 服務器的域名:ntp.centos7.cn 可以解析此域名
#server 0.centos.pool.ntp.org iburst
#server 1.centos.pool.ntp.org iburst
#server 2.centos.pool.ntp.org iburst
#server 3.centos.pool.ntp.org iburst
[root@node1 keepalived]# date;ssh node2 'date'
Thu Mar 3 17:55:31 CST 2016
Thu Mar 3 17:55:32 CST 2016
3、兩節點之間ssh通過使用密鑰訪問
#生成公/私鑰對。
[root@node1 keepalived]# ssh-keygen #敲兩下回車。公/似鑰存儲的目錄/root/.ssh/
#將node1 生成的公鑰信息傳遞到node2 /root/.ssh/目錄
[root@node1 .ssh]# cp id_rsa.pub root@node2:/root/.ssh/id_rsa.pub
#在node2 上將node1 的公鑰導入到authorized_keys文件里。原/root/.ssh/目錄下沒有這個文件就會新建這個文件,有的話就會繼續向里面附加內容
[root@node2 .ssh]# cat id_rsa.pub >>authorized_keys
在node2 節點上刪除node1 的公鑰
[root@node1 .ssh]# rm -f id_rsa.pub
node2 也需要生成公/私鑰對,將復制到node1 /root/.ssh/目錄下,并將其導入authorized_keys,然后刪除node2 的公鑰文件
4、兩個節點上配置host文件,讓兩個節點能通過主機名進行通訊.編輯/etc/hosts文件。加如下內容:
192.168.91.129 centos7.cn node1
192.168.91.130 centos7.cn node2
5、兩個節點上開啟路由轉發功能
vim /etc/sysctl.conf
net.ipv4.ip_forward=1
[root@node1 ~]# sysctl -p
net.ipv4.ip_forward = 1
確認路由轉發功能是否開啟。1為開啟
[root@node1 keepalived]# cat /proc/sys/net/ipv4/ip_forward
1
6、在兩個節點上安裝keepalived 和ipvsadm(這個不是必須的,安裝了便于查看LVS的相關信息)
yum -y install keepalived ipvsadm;ssh node2 'yum -y install keepalived ipvsadm'
node1 節點上keepalived.conf 配置文件
global_defs
{
notification_email
{
root@localhost
}
notification_email_from Alexandre.Cassen@firewall.loc
smtp_server 127.0.0.1
smtp_connect_timeout 30
router_id LVS_MASTER
}
vrrp_instance VI_1
{
state MASTER
interface eno16777736
virtual_router_id 51
priority 100
advert_int 1
authentication
{
auth_type PASS
auth_pass abbac1e595fe
}
virtual_ipaddress
{
192.168.91.15/32 dev eno16777736 label eno16777736:0
}
virtual_routes
{
192.168.91.15/32 dev eno16777736:0
}
}
vrrp_instance VI_2
{
state BACKUP
interface eno16777736
virtual_router_id 52
priority 99
advert_int 1
authentication
{
auth_type PASS
auth_pass 1e67cca200cf
}
virtual_ipaddress
{
192.168.91.16/32 dev eno16777736 label eno16777736:1
}
virtual_routes
{
192.168.91.16/32 dev eno16777736:1
}
}
vrrp_instance VI_3
{
state MASTER
interface eno33554984
virtual_router_id 53
priority 100
advert_int 1
authentication
{
auth_type PASS
auth_pass e027a03bcd81
}
virtual_ipaddress
{
192.168.23.15/32 dev eno33554984 label eno33554984:0
}
virtual_routes
{
192.168.23.15/32 dev eno33554984:0
}
}
vrrp_instance VI_4
{
state BACKUP
interface eno33554984
virtual_router_id 54
priority 99
advert_int 1
authentication
{
auth_type PASS
auth_pass f03c1c91c7fc
}
virtual_ipaddress
{
192.168.23.14/32 dev eno33554984 label eno33554984:1
}
virtual_routes
{
192.168.23.14/32 dev eno33554984:1
}
}
virtual_server 192.168.91.15 80
{
delay_loop 6
lb_algo wrr
lb_kind NAT
protocol TCP
real_server 192.168.23.16 80
{
weight 6
HTTP_GET
{
url
{
path /
status_code 200
}
connect_timeout 3
nb_get_retry 3
delay_before_retry 3
}
}
real_server 192.168.23.18 80
{
weight 3
HTTP_GET
{
url
{
path /
status_code 200
}
connect_timeout 3
nb_get_retry 3
delay_before_retry 3
}
}
}
virtual_server 192.168.91.16 80
{
delay_loop 6
lb_algo wrr
lb_kind NAT
protocol TCP
real_server 192.168.23.19 80
{
weight 6
HTTP_GET
{
url
{
path /
status_code 200
}
connect_timeout 3
nb_get_retry 3
delay_before_retry 3
}
}
real_server 192.168.23.17 80
{
weight 3
HTTP_GET
{
url
{
path /
status_code 200
}
connect_timeout 3
nb_get_retry 3
delay_before_retry 3
}
}
}
[root@node1 keepalived]# systemctl start keepalived #啟動keepalived服務
[root@node1 keepalived]# systemctl enable keepalived # 開機自動啟動keepalived服務
Created symlink from /etc/systemd/system/multi-user.target.wants/keepalived.service to /usr/lib/systemd/system/keepalived.service.
[root@node1 keepalived]# systemctl list-unit-files | grep keepalived #驗證開機是否自動啟動keepalived服務
keepalived.service enabled
node1 ipvs相關信息
[root@node1 ~]# ipvsadm -Ln
IP Virtual Server version 1.2.1 (size=4096)
Prot LocalAddress:Port Scheduler Flags
-> RemoteAddress:Port Forward Weight ActiveConn InActConn
TCP 192.168.91.15:80 wrr
-> 192.168.23.16:80 Masq 6 0 0
-> 192.168.23.18:80 Masq 3 0 0
TCP 192.168.91.16:80 wrr
-> 192.168.23.19:80 Masq 6 0 0
-> 192.168.23.17:80 Masq 3 0 0
node1 IP地址信息
[root@node1 keepalived]# ifconfig
eno16777736: flags=4163<UP,BROADCAST,RUNNING,MULTICAST> mtu 1500
inet 192.168.91.129 netmask 255.255.255.0 broadcast 192.168.91.255
inet6 fe80::20c:29ff:fec1:fe33 prefixlen 64 scopeid 0x20<link>
ether 00:0c:29:c1:fe:33 txqueuelen 1000 (Ethernet)
RX packets 31637 bytes 5942061 (5.6 MiB)
RX errors 0 dropped 0 overruns 0 frame 0
TX packets 30175 bytes 2627032 (2.5 MiB)
TX errors 0 dropped 0 overruns 0 carrier 0 collisions 0
eno16777736:0: flags=4163<UP,BROADCAST,RUNNING,MULTICAST> mtu 1500
inet 192.168.91.15 netmask 255.255.255.255 broadcast 0.0.0.0
ether 00:0c:29:c1:fe:33 txqueuelen 1000 (Ethernet)
eno33554984: flags=4163<UP,BROADCAST,RUNNING,MULTICAST> mtu 1500
inet 192.168.23.11 netmask 255.255.255.0 broadcast 192.168.23.255
inet6 fe80::20c:29ff:fec1:fe3d prefixlen 64 scopeid 0x20<link>
ether 00:0c:29:c1:fe:3d txqueuelen 1000 (Ethernet)
RX packets 63347 bytes 8511811 (8.1 MiB)
RX errors 0 dropped 0 overruns 0 frame 0
TX packets 81785 bytes 6504879 (6.2 MiB)
TX errors 0 dropped 0 overruns 0 carrier 0 collisions 0
eno33554984:0: flags=4163<UP,BROADCAST,RUNNING,MULTICAST> mtu 1500
inet 192.168.23.15 netmask 255.255.255.255 broadcast 0.0.0.0
ether 00:0c:29:c1:fe:3d txqueuelen 1000 (Ethernet)
lo: flags=73<UP,LOOPBACK,RUNNING> mtu 65536
inet 127.0.0.1 netmask 255.0.0.0
inet6 ::1 prefixlen 128 scopeid 0x10<host>
loop txqueuelen 0 (Local Loopback)
RX packets 146 bytes 10759 (10.5 KiB)
RX errors 0 dropped 0 overruns 0 frame 0
TX packets 146 bytes 10759 (10.5 KiB)
TX errors 0 dropped 0 overruns 0 carrier 0 collisions 0
node2 節點上keepalived.conf 配置文件
global_defs
{
notification_email
{
root@localhost
}
notification_email_from Alexandre.Cassen@firewall.loc
smtp_server 127.0.0.1
smtp_connect_timeout 30
router_id LVS_BACKUP
}
vrrp_instance VI_1 {
state BACKUP
interface eno16777736
virtual_router_id 51
priority 99
advert_int 1
authentication
{
auth_type PASS
auth_pass abbac1e595fe
}
virtual_ipaddress
{
192.168.91.15/32 dev eno16777736 label eno16777736:0
}
virtual_routes
{
192.168.91.15/32 dev eno16777736:0
}
}
vrrp_instance VI_2
{
state MASTER
interface eno16777736
virtual_router_id 52
priority 100
advert_int 1
authentication
{
auth_type PASS
auth_pass 1e67cca200cf
}
virtual_ipaddress
{
192.168.91.16/32 dev eno16777736 label eno16777736:1
}
virtual_routes
{
192.168.91.16/32 dev eno16777736:1
}
}
vrrp_instance VI_3 {
state BACKUP
interface eno33554984
virtual_router_id 53
priority 99
advert_int 1
authentication
{
auth_type PASS
auth_pass e027a03bcd81
}
virtual_ipaddress
{
192.168.23.15/32 dev eno33554984 label eno33554984:0
}
virtual_routes
{
192.168.23.15/32 dev eno33554984:0
}
}
vrrp_instance VI_4
{
state MASTER
interface eno33554984
virtual_router_id 54
priority 100
advert_int 1
authentication
{
auth_type PASS
auth_pass f03c1c91c7fc
}
virtual_ipaddress
{
192.168.23.14/32 dev eno33554984 label eno33554984:1
}
virtual_routes
{
192.168.23.14/32 dev eno33554984:1
}
}
virtual_server 192.168.91.15 80
{
delay_loop 6
lb_algo wrr
lb_kind NAT
protocol TCP
real_server 192.168.23.16 80
{
weight 6
HTTP_GET
{
url
{
path /
status_code 200
}
connect_timeout 3
nb_get_retry 3
delay_before_retry 3
}
}
real_server 192.168.23.18 80
{
weight 3
HTTP_GET
{
url
{
path /
status_code 200
}
connect_timeout 3
nb_get_retry 3
delay_before_retry 3
}
}
}
virtual_server 192.168.91.16 80
{
delay_loop 6
lb_algo wrr
lb_kind NAT
protocol TCP
real_server 192.168.23.19 80
{
weight 6
HTTP_GET
{
url
{
path /
status_code 200
}
connect_timeout 3
nb_get_retry 3
delay_before_retry 3
}
}
real_server 192.168.23.17 80
{
weight 3
HTTP_GET
{
url
{
path /
status_code 200
}
connect_timeout 3
nb_get_retry 3
delay_before_retry 3
}
}
}
[root@node2 keepalived]# ifconfig
eno16777736: flags=4163<UP,BROADCAST,RUNNING,MULTICAST> mtu 1500
inet 192.168.91.130 netmask 255.255.255.0 broadcast 192.168.91.255
inet6 fe80::20c:29ff:fe2b:9929 prefixlen 64 scopeid 0x20<link>
ether 00:0c:29:2b:99:29 txqueuelen 1000 (Ethernet)
RX packets 34791 bytes 10658056 (10.1 MiB)
RX errors 0 dropped 0 overruns 0 frame 0
TX packets 28618 bytes 2324860 (2.2 MiB)
TX errors 0 dropped 0 overruns 0 carrier 0 collisions 0
eno16777736:1: flags=4163<UP,BROADCAST,RUNNING,MULTICAST> mtu 1500
inet 192.168.91.16 netmask 255.255.255.255 broadcast 0.0.0.0
ether 00:0c:29:2b:99:29 txqueuelen 1000 (Ethernet)
eno33554984: flags=4163<UP,BROADCAST,RUNNING,MULTICAST> mtu 1500
inet 192.168.23.12 netmask 255.255.255.0 broadcast 192.168.23.255
inet6 fe80::20c:29ff:fe2b:9933 prefixlen 64 scopeid 0x20<link>
ether 00:0c:29:2b:99:33 txqueuelen 1000 (Ethernet)
RX packets 66429 bytes 8629069 (8.2 MiB)
RX errors 0 dropped 0 overruns 0 frame 0
TX packets 86941 bytes 6784854 (6.4 MiB)
TX errors 0 dropped 0 overruns 0 carrier 0 collisions 0
eno33554984:1: flags=4163<UP,BROADCAST,RUNNING,MULTICAST> mtu 1500
inet 192.168.23.14 netmask 255.255.255.255 broadcast 0.0.0.0
ether 00:0c:29:2b:99:33 txqueuelen 1000 (Ethernet)
lo: flags=73<UP,LOOPBACK,RUNNING> mtu 65536
inet 127.0.0.1 netmask 255.0.0.0
inet6 ::1 prefixlen 128 scopeid 0x10<host>
loop txqueuelen 0 (Local Loopback)
RX packets 150 bytes 10963 (10.7 KiB)
RX errors 0 dropped 0 overruns 0 frame 0
TX packets 150 bytes 10963 (10.7 KiB)
TX errors 0 dropped 0 overruns 0 carrier 0 collisions 0
[root@node2 keepalived]# cat /proc/sys/net/ipv4/ip_forward
1
[root@node2 keepalived]# ipvsadm -Ln
IP Virtual Server version 1.2.1 (size=4096)
Prot LocalAddress:Port Scheduler Flags
-> RemoteAddress:Port Forward Weight ActiveConn InActConn
TCP 192.168.91.15:80 wrr
-> 192.168.23.16:80 Masq 6 0 0
-> 192.168.23.18:80 Masq 3 0 0
TCP 192.168.91.16:80 wrr
-> 192.168.23.16:80 Masq 6 0 0
-> 192.168.23.19:80 Masq 3 0 0
Real Server 配置好IP地址。網段:192.168.23.0/24 尾數從16開始偶數的默認網關:192.168.23.15 ,尾數為奇數的默認網關:192.168.23.14
測試:客戶端分別訪問http://192.168.91.15 和 http://192.168.91.16 得到的結果是15為區域1的服務器 16為區域2的服務器
有沒有更好的辦法讓內部Real Server 不分區,默認網關一樣。
問題:客戶端只能通過一個外網VIP1訪問后端服務器的資源,另一個外網VIP2無法訪問,原因在于后端服務器的默認網關只能配一個,默認網關IP地址只能
配置在一臺DR上的子接口,即內網VIP。所以采用了折衷的辦法:將后端服務器劃分成兩個區域,網段一樣,子網掩碼一樣,不一樣的只是IP地址和
默認網關不一樣。使用兩個外網VIP和兩個內網VIP。不知道是否有更好的辦法,
原創文章,作者:jslijb,如若轉載,請注明出處:http://www.www58058.com/12698
寫標簽的意識非常贊,代碼沒有格式化顯得非常亂,沒有讀下去的欲望
@stanley:多謝建議!可能檢查不仔細,提交之前是代碼是有格式化的!