搭建高可用、高性能jsp集群

一、网络拓扑:

LVS keepalived nginx tomcat  高可用、高性能集群

LVS-DR-MASTER: 192.168.44.129

LVS-DR-SLAVE: 192.168.44.130

LVS-VIP: 192.168.44.200

NGINX1: 192.168.44.131

NGINX2: 192.168.44.132

TOMCAT1: 192.168.44.133

TOMCAT2: 192.168.44.134

目的:搭建高可用、高性能的jsp集群

二、初始化系统

系统环境: CentOS 6.3(定制安装)

组件:

Base

Development Libraries

Development Tools

Editors

Text-based Internet

三、LVS+keeplived

echo "============================ 更新系统时间 ======================"  
yum install -y ntp  
ntpdate time.windows.com  
echo "*/5 * * * * /usr/sbin/ntpdate time.windows.com &>> /var/log/ntp.log" >> /var/spool/cron/root

echo “========================= 安装ipvsadm、keepalived ==================”

[root@LVS-DR-MASTER ~]# yum -y install ipvsadm keepalived

echo “======================= 配置keepalived ===========================”

[root@LVS-DR-MASTER ~]# vim /etc/keepalived/keepalived.conf

! Configuration File for keepalived

global_defs {  
  notification_email {  
[email protected]
  }  
  notification_email_from  [email protected]
  smtp_server smtp.yeah.net  
 # smtp_connect_timeout 30  
  router_id LVS_DEVEL  
}  
# VIP1  
vrrp_instance VI_1 {  
   state MASTER             #备份服务器上将MASTER改为BACKUP    
   interface eth0  
   lvs_sync_daemon_inteface eth0  
   virtual_router_id 51  
   priority 100    # 备份服务上将100改为90  
   advert_int 5  
   authentication {  
       auth_type PASS  
       auth_pass 1111  
   }  
   virtual_ipaddress {  
192.168.44.200    
       #(如果有多个VIP,继续换行填写.)  
   }  
}  
virtual_server 192.168.44.200 80 {  
   delay_loop 6                  #(每隔10秒查询realserver状态)  
   lb_algo wrr                  #(lvs 算法)  
   lb_kind DR                  #(Direct Route)  
   persistence_timeout 60        #(同一IP的连接60秒内被分配到同一台realserver)  
   protocol TCP                #(用TCP协议检查realserver状态)  
   real_server 192.168.44.131 80 {  
       weight 100               #(权重)  
       TCP_CHECK {  
       connect_timeout 10       #(10秒无响应超时)  
       nb_get_retry 3  
       delay_before_retry 3  
       connect_port 80  
       }  
   }  
   real_server 192.168.44.132 80 {  
       weight 100  
       TCP_CHECK {  
       connect_timeout 10  
       nb_get_retry 3  
       delay_before_retry 3  
       connect_port 80  
       }  
    }  
}

[root@LVS-DR-MASTER ~]# /etc/init.d/keepalived start|stop

[root@LVS-DR-MASTER ~]# chkconfig keepalived on

echo “================== 配置realserver(在NGINX服务器上配)=================”

[root@NGINX1 ~]# vim /root/lvs_real.sh

#!/bin/bash  
# description: Config realserver  
SNS_VIP=192.168.44.200  
source /etc/rc.d/init.d/functions  
case "$1" in  
start)  
      /sbin/ifconfig lo:0 $SNS_VIP netmask 255.255.255.255 broadcast $SNS_VIP  
      /sbin/route add -host $SNS_VIP dev lo:0  
      echo "1" >/proc/sys/net/ipv4/conf/lo/arp_ignore  
      echo "2" >/proc/sys/net/ipv4/conf/lo/arp_announce  
      echo "1" >/proc/sys/net/ipv4/conf/all/arp_ignore  
      echo "2" >/proc/sys/net/ipv4/conf/all/arp_announce  
      sysctl -p >/dev/null 2>&1  
      echo "RealServer Start OK"  
       ;;

stop)  
      /sbin/ifconfig lo:0 down  
      /sbin/route del $SNS_VIP >/dev/null 2>&1  
      echo "0" >/proc/sys/net/ipv4/conf/lo/arp_ignore  
      echo "0" >/proc/sys/net/ipv4/conf/lo/arp_announce  
      echo "0" >/proc/sys/net/ipv4/conf/all/arp_ignore  
      echo "0" >/proc/sys/net/ipv4/conf/all/arp_announce  
      echo "RealServer Stoped"  
      ;;  
*)  
      echo "Usage: $0 {start|stop}"  
      exit 1  
esac  
exit 0

[root@NGINX1 ~]# sh /root/lvs_real.sh start

[root@NGINX1 ~]# ifconfig lo:0  
lo:0      Link encap:Local Loopback  
         inet addr:192.168.44.200  Mask:255.255.255.255  
         UP LOOPBACK RUNNING  MTU:16436  Metric:1

[root@NGINX1 ~]# echo "sh /root/lvs_real.sh start &>> /dev/null" >> /etc/rc.local

echo “===================== 测试LVS+keepalived ========================”

#LVS-DR-MASTER、LVS-DR-SLAVE上开启keepalived,LVS-DR-MASTER先绑定VIP

LVS-DR-MASTER:

[root@LVS-DR-MASTER ~]# ip add  
1: lo: <LOOPBACK,UP,LOWER_UP> mtu 16436 qdisc noqueue state UNKNOWN  
   link/loopback 00:00:00:00:00:00 brd 00:00:00:00:00:00  
   inet 127.0.0.1/8 scope host lo  
   inet6 ::1/128 scope host  
      valid_lft forever preferred_lft forever  
2: eth0: <BROADCAST,MULTICAST,UP,LOWER_UP> mtu 1500 qdisc pfifo_fast state UP qlen 1000  
   link/ether 00:0c:29:25:b2:e1 brd ff:ff:ff:ff:ff:ff  
   inet 192.168.44.129/24 brd 192.168.44.255 scope global eth0  
   inet 192.168.44.200/32 scope global eth0  
   inet6 fe80::20c:29ff:fe25:b2e1/64 scope link  
      valid_lft forever preferred_lft forever

LVS-DR-SLAVE:

[root@LVS-DR-SLAVE ~]# ip add  
1: lo: <LOOPBACK,UP,LOWER_UP> mtu 16436 qdisc noqueue state UNKNOWN  
   link/loopback 00:00:00:00:00:00 brd 00:00:00:00:00:00  
   inet 127.0.0.1/8 scope host lo  
   inet6 ::1/128 scope host  
      valid_lft forever preferred_lft forever  
2: eth0: <BROADCAST,MULTICAST,UP,LOWER_UP> mtu 1500 qdisc pfifo_fast state UP qlen 1000  
   link/ether 00:0c:29:cc:49:40 brd ff:ff:ff:ff:ff:ff  
   inet 192.168.44.130/24 brd 192.168.44.255 scope global eth0  
   inet6 fe80::20c:29ff:fecc:4940/64 scope link  
      valid_lft forever preferred_lft forever

#解析域名,测试访问,LVS转发

[root@LVS-DR-MASTER ~]# ipvsadm -ln  
Every 1.0s: ipvsadm -ln                                                                                                                                                               Sun Mar  9 23:33:11 2014

IP Virtual Server version 1.2.1 (size=4096)  
Prot LocalAddress:Port Scheduler Flags  
 -> RemoteAddress:Port           Forward Weight ActiveConn InActConn  
TCP  192.168.44.200:80 wrr persistent 60  
 -> 192.168.44.131:80           Route   100    0          0  
 -> 192.168.44.132:80           Route   100    8216       20005

#测试关闭LVS-DR-MASTER,短暂的掉包后,LVS-DR-SLAVE马上接替工作

LVS-DR-SLAVE接替LVS-DR-MASTER绑定VIP

[root@LVS-DR-SLAVE ~]# ip add  
1: lo: <LOOPBACK,UP,LOWER_UP> mtu 16436 qdisc noqueue state UNKNOWN  
   link/loopback 00:00:00:00:00:00 brd 00:00:00:00:00:00  
   inet 127.0.0.1/8 scope host lo  
   inet6 ::1/128 scope host  
      valid_lft forever preferred_lft forever  
2: eth0: <BROADCAST,MULTICAST,UP,LOWER_UP> mtu 1500 qdisc pfifo_fast state UP qlen 1000  
   link/ether 00:0c:29:cc:49:40 brd ff:ff:ff:ff:ff:ff  
   inet 192.168.44.130/24 brd 192.168.44.255 scope global eth0  
   inet 192.168.44.200/32 scope global eth0  
   inet6 fe80::20c:29ff:fecc:4940/64 scope link  
      valid_lft forever preferred_lft forever

LVS-DR-MASTER 重启完成后,就会自动接回控制权,继续负责转发

[root@LVS-DR-MASTER ~]# ip add                      
1: lo: <LOOPBACK,UP,LOWER_UP> mtu 16436 qdisc noqueue state UNKNOWN  
   link/loopback 00:00:00:00:00:00 brd 00:00:00:00:00:00  
   inet 127.0.0.1/8 scope host lo  
   inet6 ::1/128 scope host  
      valid_lft forever preferred_lft forever  
2: eth0: <BROADCAST,MULTICAST,UP,LOWER_UP> mtu 1500 qdisc pfifo_fast state UP qlen 1000  
   link/ether 00:0c:29:25:b2:e1 brd ff:ff:ff:ff:ff:ff  
   inet 192.168.44.129/24 brd 192.168.44.255 scope global eth0  
   inet 192.168.44.200/32 scope global eth0  
   inet6 fe80::20c:29ff:fe25:b2e1/64 scope link  
      valid_lft forever preferred_lft forever

#测试关闭其中一台REALSERVER

[root@NGINX1 ~]# nginx -s stop

[root@LVS-DR-MASTER ~]# ipvsadm -ln  
IP Virtual Server version 1.2.1 (size=4096)  
Prot LocalAddress:Port Scheduler Flags  
 -> RemoteAddress:Port           Forward Weight ActiveConn InActConn  
TCP  192.168.44.200:80 wrr persistent 60  
 -> 192.168.44.132:80           Route   100    8383       4671

通过上面测试可以知道,当REALSERVER故障或者无法提供服务时,负载均衡器通过健康检查自动把失效的机器从转发队列删除掉,实现故障隔离,保证用户的访问不受影响

#重启被关闭的REALSERVER

[root@LVS-DR-MASTER ~]# ipvsadm -ln  
IP Virtual Server version 1.2.1 (size=4096)  
Prot LocalAddress:Port Scheduler Flags  
 -> RemoteAddress:Port           Forward Weight ActiveConn InActConn  
TCP  192.168.44.200:80 wrr persistent 60  
 -> 192.168.44.131:80           Route   100    0          0          
 -> 192.168.44.132:80           Route   100    0          22636

当REALSERVER故障恢复后,负载均衡器通过健康检查自动把恢复后的机器添加到转发队列中

四、nginx、tomcat整合

#config NGINX1、NGINX2

#================================ 1、安装tomcat、JDK =================================

#yum -y install apache-tomcat jdk pcre tomcat-native apr apr-util

#查看java版本  
java –version

#启动tomcat  
#/etc/init.d/tomcat start

#chkconfig tomcat on

#http://localhost:8080,如果看到猫的页面即tomcat和jdk安装成功

#============================= 2、安装Nginx =============================

#yum -y install nginx

#编辑NGINX主配置文件

#vim /etc/nginx/nginx.conf

worker_processes  10;

events {

   use epoll;

   worker_connections  65550;

}

http {

   include       mime.types;

   default_type  application/octet-stream;

   sendfile        on;

   keepalive_timeout  65;

   upstream tomcat1{

       ip_hash;

       server  192.168.44.133:8080;

       server  192.168.44.134:8080;

}

   upstream tomcat2{

       ip_hash;

       server  192.168.44.133:8080;

       server  192.168.44.134:8080;

}

       include vhost/*.conf;

}

#编辑NGINX子配置文件

[root@NGINX1 ~]# cat /etc/nginx/vhost/tomcat1.conf

       server  {

               listen 80;

               server_name 192.168.44.133;

               charset utf-8;

               location / {

               root   tomcat1;

               index  index.html index.htm index.jsp;

                   proxy_pass http://tomcat1/;

               }

       }

[root@NGINX1 ~]# cat /etc/nginx/vhost/tomcat2.conf

       server  {

               listen 80;

               server_name 192.168.44.134;

               charset utf-8;

               location / {

               root   tomcat2;

               index  index.html index.htm index.jsp;

                   proxy_pass http://tomcat2/;

               }

       }

#检测配置文件  
[root@NGINX1 ~]# /etc/init.d/nginx configtest

nginx: the configuration file /etc/nginx/nginx.conf syntax is ok

nginx: configuration file /etc/nginx/nginx.conf test is successful

[root@NGINX1 ~]# /etc/init.d/nginx start

Starting nginx:                                           [  OK  ]

#查看nginx主进程号  
#ps -ef | grep "nginx: master process" | grep -v "grep" | awk -F ‘ ‘ ‘{print $2}‘

#停止nginx  
[root@NGINX1 ~]# /etc/init.d/nginx stop

Stopping nginx:                                            [  OK  ]

#平滑重启  
[root@NGINX1 ~]# /etc/init.d/nginx reload

五、配置测试页进行测试

TOMCAT1:

[root@TOMCAT1 ~]# vim /usr/local/tomcat/webapps/tomcat1/index.jsp  
<html><body><center>

TOMCAT1 Now time is: <%=new java.util.Date()%>

</center></body></html>

[root@TOMCAT1 ~]# vim /usr/local/tomcat/webapps/tomcat1/WEB-INF/web.xml  
<?xml version="1.0" encoding="ISO-8859-1"?>

<!DOCTYPE web-app  
PUBLIC "-//Sun Microsystems, Inc.//DTD Web Application 2.3//EN"  
"http://java.sun.com/dtd/web-app_2_3.dtd">

<web-app>  
<display-name>My Web Application</display-name>  
<description>  
A application for test.  
</description>  
</web-app>

#TOMCAT1测试页结果

TOMCAT2

[root@TOMCAT2 ~]# vim /usr/local/tomcat/webapps/tomcat2/index.jsp

<html><body><center>

TOMCAT2 Now time is: <%=new java.util.Date()%>

</center></body></html>

[root@TOMCAT2 ~]# vim /usr/local/tomcat/webapps/tomcat2/WEB-INF/web.xml

<?xml version="1.0" encoding="ISO-8859-1"?>

<!DOCTYPE web-app

PUBLIC "-//Sun Microsystems, Inc.//DTD Web Application 2.3//EN"

"http://java.sun.com/dtd/web-app_2_3.dtd">

<web-app>

<display-name>My Web Application</display-name>

<description>

A application for test.

</description>

</web-app>

#TOMCAT2测试页结果

#================================END =================================

搭建高可用、高性能jsp集群,古老的榕树,5-wow.com

郑重声明:本站内容如果来自互联网及其他传播媒体,其版权均属原媒体及文章作者所有。转载目的在于传递更多信息及用于网络分享,并不代表本站赞同其观点和对其真实性负责,也不构成任何其他建议。