300字范文,内容丰富有趣,生活中的好帮手!
300字范文 > 基于linux下的 Pacemaker+Haproxy高可用负载均衡架构

基于linux下的 Pacemaker+Haproxy高可用负载均衡架构

时间:2021-11-19 18:21:12

相关推荐

基于linux下的 Pacemaker+Haproxy高可用负载均衡架构

corosync+pacemaker+crmsh高可用集群

corosync提供集群的信息层(messaginglayer)的功能,传递心跳信息和集群事务信息,多台机器之间通过组播的方式监测心跳

pacemaker工作在资源分配层,提供资源管理器的功能,并以crmsh这个资源配置的命令接口来配置资源

一个用于心跳检测,一个用于资源转移。两个结合起来使用,可以实现对高可用架构的自动管理。

心跳检测是用来检测服务器是否还在提供服务,只要出现异常不能提供服务了,就认为它挂掉了。

当检测出服务器挂掉之后,就要对服务资源进行转移。

CoroSync是运行于心跳层的开源软件。

PaceMaker是运行于资源转移层的开源软件。

HAProxy+apache负载均衡,动静分离,读写分离

Haproxy提供高可用负载均衡以及基于TCP和HTTP应用的代理

是轻量级的擅长处理并发

但与nginx不同的是Haproxy并不是Http服务器。很多带反向代理均衡负载的产品,如nginx,apacheproxy,都清一色是WEB服务器。简单说,就是他们能自个儿提供静态(html,jpg,gif..)或动态(PHP,cgi..)文件的传输以及处理。而Haproxy仅仅,而且专门是一款的用于均衡负载的应用代理。其自身并不能提供http服务

它自带健康检测,1.3版本后还自带一些简单的规则匹配

实验环境: server1:172.25.26.2

server2:172.25.26.3

server3:172.25.26.4

server4:172.25.26.5

一、由于此实验前面做过“RHCS套件+Nginx实现高可用负载均衡“实验所以在实验前要保持一个纯净的实验环境因此作如下操作:

Server1

[root@server1 ~]#chkconfig cman off

[root@server1 ~]#chkconfig rgmanager off

[root@server1 ~]# chkconfig modclusterd off

[root@server1 ~]#chkconfig iscsi off

[root@server1 ~]#chkconfig --list clvmd

[root@server1 ~]#chkconfig clvmd off

[root@server1 ~]#chkconfig ricci off

[root@server1 ~]#chkconfig luci off

[root@server1 ~]#/etc/init.d/ricci stop

[root@server1 ~]#/etc/init.d/luci stop

[root@server1 ~]#ps -ax

Server2:

[root@server2 ~]# chkconfig cman off

[root@server2 ~]# chkconfig rgmanager off

[root@server2 ~]# chkconfig modclusterd off

[root@server2 ~]# chkconfig iscsi off

[root@server2 ~]# chkconfig --list clvmd

[root@server2 ~]# chkconfig clvmd off

[root@server2 ~]# chkconfig ricci off

[root@server2 ~]# chkconfig luci off

[root@server2 ~]# /etc/init.d/ricci stop

[root@server2 ~]# /etc/init.d/luci stop

[root@server2 ~]# ps -ax

二、实验过程:

(1)Haproxy的配置:

Server1:

[root@server1 ~]# lshaproxy-1.6.11.tar.gz [root@server1 ~]# yum install -y rpm-build[root@server1 ~] #rpmbuild -tb haproxy-1.6.11.tar.gz [root@server1 RPMS] # cd /root/rpmbuild/RPMS[root@server1 RPMS] #ls[root@server1 x86_64] #cd x86_64/[root@server1 x86_64] #ls[root@server1 x86_64] #rpm -qpl haproxy-1.6.11-1.x86_64.rpm [root@server1 x86_64] #ls[root@server1x86_64 ] #rpm -ivh haproxy-1.6.11-1.x86_64.rpm [root@server1x86_64 ] #cd[root@server1 ~] #ls[root@server1 ~] #tar zxf haproxy-1.6.11.tar.gz [root@server1 ~] #ls[root@server1 haproxy-1.6.11] #cd haproxy-1.6.11[root@server1 haproxy-1.6.11] #ls[root@server1 haproxy-1.6.11] #find -name *.spec[root@server1 examples] #cd examples/[root@server1 examples] #ls[root@server1 examples ] #cp content-sw-sample.cfg /etc/haproxy/haproxy.cfg[root@server1 examples] #cd /etc/haproxy/[root@server1 haproxy]#ls[root@server1 haproxy ]#vim haproxy.cfg [root@server1 haproxy] #vim /etc/init.d/haproxy

Server1:

[root@server1 ~] # cd /etc/haproxy/[root@server1 haproxy] #vim haproxy.cfg [root@server1 haproxy] #grep 200 /etc/passwd[root@server1 haproxy #groupadd -g 200 haproxy[root@server1 haproxy] #useradd -u 200 -g 200 -M haproxy[root@server1 haproxy ] # ip addr[root@server1 haproxy] #id haproxy[root@server1 haproxy] #vim /etc/security/limits.conf

[root@server1 ~] #vim haproxy.cfg globalmaxconn 10000stats socket /var/run/haproxy.stat mode 600 level adminlog 127.0.0.1 local0uid 200gid 200chroot/var/emptydaemondefaultsmode httplog globaloptionhttplogoptiondontlognullmonitor-uri/monitorurimaxconn 8000timeout client 30sstats uri /admin/statsoption prefer-last-serverretries 2option redispatchtimeout connect 5stimeout server 5s# The public 'www' address in the DMZfrontend publicbind *:80 name clear#bind 192.168.1.10:443 ssl crt /etc/haproxy/haproxy.pem#use_backendstatic if { hdr_beg(host) -i img }#use_backendstatic if { path_beg /img /css }default_backend static# The static backend backend for 'Host: img', /img and /css.backend staticbalance roundrobinserverstatsrv1 172.25.26.3:80 check inter 1000serverstatsrv2 172.25.26.4:80 check inter 1000

[root@server1 ~] #/etc/init.d/haproxy startServer2:/etc/init.d/httpd startServer3:/etc/init.d/httpd start

Server1:[root@server1 ~] #cd /var/log[root@server1 ~] #ls[root@server1 ~] #cat messages[root@server1 ~] #cd /etc/haproxy/[root@server1 ~] #vim /etc/rsyslog.conf #Provides UDP syslog reception13 $ModLoad imudp14 $UDPServerRun 51441 # Don't log private authentication messages!42 *.info;mail.none;authpriv.none;cron.none;local0.none /var/log/mess ages60 # Save boot messages also to boot.log61 local7.* /var/log/boot.log62 local0.* /var/log/haproxy.log[root@server1 ~] #/etc/init.d/rsyslog restart[root@server1 ~] #pwd[root@server1 ~] #cd /var/log[root@server1 ~] #ls[root@server1 ~] #cat haproxy.log

Server1:

[root@server1 ~] #cd /etc/haproxy/[root@server1 ~] #ls[root@server1 ~] #vim haproxy.cfg 35 frontend public36 bind *:80 name clear37 #bind 192.168.1.10:443 ssl crt /etc/haproxy/haproxy.pem38 #use_backendstatic if { hdr_beg(host) -i img }39 use_backendstatic2 if { path_end -i .php }40 default_backend static141 42 # The static backend backend for 'Host: img', /img and /css.43 backend static144 balance roundrobin45 serverstatsrv1 172.25.26.3:80 check inter 100046 backend static247 balance roundrobin48 serverstatsrv2 172.25.26.4:80 check inter 100049 50 51 354 /etc/init.d/haproxy reload

Server3:

108 yum install -y php104 cd /var/www/html105 ls106 vim index.php<?phpphpinfo()?>109 /etc/init.d/httpd restart

Server1:

[root@server1 haproxy]# vim haproxy.cfg #use_backendstatic if { path_beg /img /css }acl blacklist src 172.25.26.250http-request deny if blacklistuse_backendstatic2 if { path_end -i .php }default_backend static1[root@server1 haproxy]# /etc/init.d/haproxy reload

Server1:

[root@server1 haproxy]# vim haproxy.cfg acl blacklist src 172.25.26.250http-request deny if blacklisterrorloc403http://172.25.26.2:8080[root@server1 haproxy]# /etc/init.d/haproxy reload

[root@server1 haproxy]# vim haproxy.cfg acl blacklist src 172.25.26.250http-request deny if blacklisterrorloc403http://172.25.26.2:8080use_backendstatic2 if { path_end -i .php [root@server1 haproxy]# /etc/init.d/haproxy reload[root@server1 haproxy]#yum install httpd[root@server1 haproxy]# vim /etc/httpd/conf/httpd.conf [root@server1 haproxy]# /etc/init.d/httpd reload[root@server1 haproxy]#/etc/init.d/httpd restart[root@server1 haproxy]# cd /var/www/html[root@server1 haproxy]# ls[root@server1 haproxy]# vim index.html[root@server1 haproxy]# /etc/init.d/haproxy reload

Server1:

[root@server1 haproxy]# vim haproxy.cfg acl blacklist src 172.25.26.250#http-request deny if blacklist#errorloc403http://172.25.26.2:8080redirect location http://172.25.26.2:8080use_backendstatic2 if { path_end -i .php }

[root@server1 haproxy]# /etc/init.d/haproxy reload

Server1:

[root@server1 haproxy]# vim haproxy.cfg#http-request deny if blacklist#errorloc403http://172.25.26.2:8080redirect location http://172.25.26.4use_backendstatic2 if { path_end -i .php }default_backend static1[root@server1 haproxy]# /etc/init.d/haproxy reload

Server1:

[root@server1 haproxy]# vim haproxy.cfg acl blacklist src 172.25.26.250#http-request deny if blacklist#errorloc403http://172.25.26.2:8080redirect location

[root@server1 haproxy]# /etc/init.d/haproxy reload

Server1:

[root@server1 haproxy]# vim haproxy.cfg #use_backendstatic if { path_beg /img /css }acl blacklist src 172.25.26.250acl write method POSTacl write method PUT#http-request deny if blacklist#errorloc403http://172.25.26.2:8080#redirect location use_backendstatic2 if { path_end -i .php }use_backendstatic2 if writedefault_backend static1[root@server1 haproxy]# /etc/init.d/haproxy reload

server2:

[root@server2 ~]# cd /var/www/html[root@server2 html]# lsindex.html[root@server2 html]# mkdir images[root@server2 html]# cd images/[root@server2 images]# pwd/var/www/html/images[root@server2 images]# lsredhat.jpg

Server1:

[root@server1 haproxy]# vim haproxy.cfg acl blacklist src 172.25.26.250acl write method POSTacl write method PUT#http-request deny if blacklist#errorloc403http://172.25.26.2:8080#redirect location use_backendstatic2 if { path_end -i .php }use_backendstatic2 if writedefault_backend static1

[root@server1 haproxy]# /etc/init.d/haproxy reload

Server3:

[root@server3 html]# lsindex.html index.php upload[root@server3 html]# cd upload/[root@server3 upload]# lsindex.php upload_file.php[root@server3 upload]# mv * ..mv: overwrite `../index.php'? y[root@server3 upload]# ls[root@server3 upload]# cd ..[root@server3 html]# lsindex.html index.php upload upload_file.php[root@server3 html]# chmod 777 upload[root@server3 html]# vim upload_file.php

Server2:

[root@server2 images]# cd /var/www/html[root@server2 html]# lsimages index.html upload[root@server2 html]# cd upload/[root@server2 upload]# lsindex.php upload_file.php[root@server2 upload]# mv * ..[root@server2 upload]# ls[root@server2 upload]# cd ..[root@server2 html]# lsimages index.html index.php upload upload_file.php[root@server2 html]# chmod 777 upload[root@server2 html]# vim upload_file.php

[root@server2 html]# yum install -y php[root@server2 html]# /etc/init.d/httpd restart

Server2:

[root@server2 html]# cd upload[root@server2 upload]# ls

Server3:

[root@server3 html]# cd upload[root@server3 upload]# lsredhat.jpg

(2)Pacemaker的配置(server1和server4)

Server1:

[root@server1 haproxy]# yum install -y pacemaker corosync

Server4:

[root@server4 yum.repos.d]# yum install -y pacemaker corosync[root@server4 yum.repos.d]# cd /etc/corosync/[root@server4 corosync]# lscorosync.conf.example corosync.conf.example.udpu service.d uidgid.d[root@server4 corosync]# cp corosync.conf.example corosync.conf[root@server4 corosync]# vim corosync.conf

[root@server4 corosync]# scp corosync.conf server1:/etc/corosync/[root@server4 ~]# /etc/init.d/corosync startStarting Corosync Cluster Engine (corosync):[ OK ]

Server1:

[root@server1 haproxy]# /etc/init.d/corosync startStarting Corosync Cluster Engine (corosync):[ OK ][root@server1 haproxy]# [root@server1 haproxy]# cd /root[root@server1 ~]# lscrmsh-1.2.6-0.rc2.2.1.x86_64.rpm haproxy-1.6.11.tar.gzrpmbuildhaproxy-1.6.11pssh-2.3.1-2.1.x86_64.rpm[root@server1 ~]# yum install crmsh-1.2.6-0.rc2.2.1.x86_64.rpm pssh-2.3.1-2.1.x86_64.rpm

Server4:

[root@server4 ~]# crm-bash: crm: command not found[root@server4 ~]# lsanaconda-ks.cfg install.log pssh-2.3.1-2.1.x86_64.rpmcrmsh-1.2.6-0.rc2.2.1.x86_64.rpm install.log.syslog[root@server4 ~]# rm -fr anaconda-ks.cfg install.log install.log.syslog [root@server4 ~]# lscrmsh-1.2.6-0.rc2.2.1.x86_64.rpm pssh-2.3.1-2.1.x86_64.rpm[root@server4 ~]# yum install crmsh-1.2.6-0.rc2.2.1.x86_64.rpm pssh-2.3.1-2.1.x86_64.rpm[root@server4 ~]# scp * server1:root@server1's password: crmsh-1.2.6-0.rc2.2.1.x86_64.rpm100% 483KB 483.4KB/s 00:00 pssh-2.3.1-2.1.x86_64.rpm 100% 49KB 49.1KB/s 00:00

Server1:

[root@server1 ~]# crm_monConnection to the CIB terminated

Server4:

[root@server4 ~]# crmcrm(live)# configurecrm(live)configure# shownode server1node server4property $id="cib-bootstrap-options" \dc-version="1.1.10-14.el6-368c726" \cluster-infrastructure="classic openais (with plugin)" \expected-quorum-votes="2"crm(live)configure# byebye[root@server4 ~]# crmcrm(live)# configurecrm(live)configure# shownode server1node server4property $id="cib-bootstrap-options" \dc-version="1.1.10-14.el6-368c726" \cluster-infrastructure="classic openais (with plugin)" \expected-quorum-votes="2"crm(live)configure# property stonith-enabled=falsecrm(live)configure# commit

Server1:

econnecting...[root@server1 ~]# crm_verify -VL

Server4:

crm(live)configure# shownode server1node server4property $id="cib-bootstrap-options" \dc-version="1.1.10-14.el6-368c726" \cluster-infrastructure="classic openais (with plugin)" \expected-quorum-votes="2" \stonith-enabled="false"

添加vip172.25.26.100

crm(live)configure# primitive vip ocf:heartbeat:IPaddr2 params ip=172.25.26.100 cidr_netmask=24 op monitor interval=1min

crm(live)configure# commit

Server1:

[root@server1 ~]# crm_monConnection to the CIB terminated[root@server1 ~]# /etc/init.d/corosync stopSignaling Corosync Cluster Engine (corosync) to terminate: [ OK ]Waiting for corosync services to unload:.. [ OK ][root@server1 ~]# /etc/init.d/corosync startStarting Corosync Cluster Engine (corosync):[ OK ]

Server4:

[root@server4 ~]# crm_monConnection to the CIB terminated

Server1:

[root@server1 ~]# crmcrm(live)# configure crm(live)configure# shownode server1node server4primitive vip ocf:heartbeat:IPaddr2 \params ip="172.25.26.100" cidr_netmask="24" \op monitor interval="1min"property $id="cib-bootstrap-options" \dc-version="1.1.10-14.el6-368c726" \cluster-infrastructure="classic openais (with plugin)" \expected-quorum-votes="2" \stonith-enabled="false"crm(live)configure# property no-quorum-policy=ignorecrm(live)configure# commit

Server4:

[root@server4 ~]# crm_monConnection to the CIB terminated

Server1:

root@server1 ~]# /etc/init.d/corosync stopSignaling Corosync Cluster Engine (corosync) to terminate: [ OK ]Waiting for corosync services to unload:. [ OK ][root@server1 ~]# /etc/init.d/corosync startStarting Corosync Cluster Engine (corosync):[ OK ][root@server1 haproxy]# cd /root[root@server1 ~]# lscrmsh-1.2.6-0.rc2.2.1.x86_64.rpm haproxy-1.6.11.tar.gzrpmbuildhaproxy-1.6.11pssh-2.3.1-2.1.x86_64.rpm[root@server1 ~]# cd rpmbuild/[root@server1 rpmbuild]# lsBUILD BUILDROOT RPMS SOURCES SPECS SRPMS[root@server1 rpmbuild]# cd RPMS/[root@server1 RPMS]# lsx86_64[root@server1 RPMS]# cd x86_64/[root@server1 x86_64]# lshaproxy-1.6.11-1.x86_64.rpm[root@server1 x86_64]# scp haproxy-1.6.11-1.x86_64.rpm server4:

Server4:

Reconnecting...[root@server4 ~]# cd /root[root@server4 ~]# lscrmsh-1.2.6-0.rc2.2.1.x86_64.rpm haproxy-1.6.11-1.x86_64.rpm pssh-2.3.1-2.1.x86_64.rpm[root@server4 ~]# rpm -inh haproxy-1.6.11-1.x86_64.rpm -inh: unknown option[root@server4 ~]# rpm -ivh haproxy-1.6.11-1.x86_64.rpm Preparing...########################################### [100%]1:haproxy########################################### [100%][root@server4 ~]# /etc/init.d/haproxy startStarting haproxy: [ OK ]

Server1:

root@server1 haproxy]# crmcrm(live)# configurecrm(live)configure# shownode server1node server4primitive vip ocf:heartbeat:IPaddr2 \params ip="172.25.26.100" cidr_netmask="24" \op monitor interval="1min"property $id="cib-bootstrap-options" \dc-version="1.1.10-14.el6-368c726" \cluster-infrastructure="classic openais (with plugin)" \expected-quorum-votes="2" \stonith-enabled="false" \no-quorum-policy="ignore"crm(live)configure# primitive haproxy lsb:haproxy op monitor interval=1mincrm(live)configure# commit

Server4:

[root@server4 ~]# crm_monConnection to the CIB terminated

Server1:

root@server1 haproxy]# crmcrm(live)# configurecrm(live)configure# shownode server1node server4primitive vip ocf:heartbeat:IPaddr2 \params ip="172.25.26.100" cidr_netmask="24" \op monitor interval="1min"property $id="cib-bootstrap-options" \dc-version="1.1.10-14.el6-368c726" \cluster-infrastructure="classic openais (with plugin)" \expected-quorum-votes="2" \stonith-enabled="false" \no-quorum-policy="ignore"crm(live)configure# group hagroup vip haproxy crm(live)configure# commit

Server4:

[root@server4 ~]# crm_monConnection to the CIB terminatedReconnecting...[root@server4 ~]# crm node standby[root@server4 ~]# crmcrm(live)# bye

Server1:

[root@server1 haproxy]# crm_monConnection to the CIB terminated

Server4:

Connection to the CIB terminated[root@server4 ~]# crm node online

Server1:

[root@server1 ~]# crm_mon

Server1:

[root@server1 haproxy]# stonith_admin -Ifence_xvmfence_wtifence_vmware_soapfence_vmware_helperfence_vmwarefence_virtfence_virshfence_toolfence_scsifence_sanbox2fence_rsbfence_rsa[root@server1 haproxy]# cd /etc/cluster/[root@server1 cluster]# lscluster.conf cman-notify.d fency_xvm.key[root@server1 cluster]# crm_monConnection to the CIB terminated

Server4:

stonith_admin -Ifence_pcmkfence_legacy2 devices found[root@server4 ~]# mkdir /etc/cluster

物理机:

[root@foundation26 Desktop]# scp /etc/cluster/fency_xvm.key root@172.25.26.5:/etc/cluster/[root@foundation26 Desktop]# systemctl status fence_virtd.service_virtdUnit fence_virtd.service_virtd.service could not be found.[root@foundation26 Desktop]# systemctl status fence_virtd● fence_virtd.service - Fence-Virt system host daemonLoaded: loaded (/usr/lib/systemd/system/fence_virtd.service; enabled; vendor preset: disabled)Active: active (running) since Sat -08-04 00:37:05 CST; 7h ago

Server4:

[root@server4 ~]# cd /etc/cluster/[root@server4 cluster]# lsfency_xvm.key[root@server4 cluster]# yum provides */fence_xvm[root@server4 cluster]# yum install fence-virt-0.2.3-15.el6.x86_64[root@server4 cluster]# stonith_admin -Ifence_xvmfence_virtfence_pcmkfence_legacy4 devices found[root@server4 cluster]# crmcrm(live)# configure crm(live)configure# primitive vmfence stonith:fence_xvm params pcmk_host_map="server1:test1;server4:test4" op monitor interval=1mincrm(live)configure# commitcrm(live)configure# shownode server1node server4 \attributes standby="off"primitive haproxy lsb:haproxy \op monitor interval="1min"primitive vip ocf:heartbeat:IPaddr2 \params ip="172.25.26.100" cidr_netmask="24" \op monitor interval="1min"primitive vmfence stonith:fence_xvm \params pcmk_host_map="server1:test1;server4:test4" \op monitor interval="1min"group hagroup vip haproxyproperty $id="cib-bootstrap-options" \dc-version="1.1.10-14.el6-368c726" \cluster-infrastructure="classic openais (with plugin)" \expected-quorum-votes="2" \stonith-enabled="false" \no-quorum-policy="ignore"crm(live)configure# property stonith-enabled=truecrm(live)configure# commitcrm(live)configure# crm(live)configure# byebye[root@server4 cluster]# echo c > /proc/sysrq-trigger Write failed: Broken pipe

Server1:

[root@server1 cluster]# crm_monConnection to the CIB terminated

本内容不代表本网观点和政治立场,如有侵犯你的权益请联系我们处理。
网友评论
网友评论仅供其表达个人看法,并不表明网站立场。