大纲
一、前言
二、环境准备
三、负载均衡实现
四、DeltaManager实现
五、Nginx实现Tomcat负载均衡
注,操作系统 CnetOS6.4 x86_64,博客中所用到的所有软件请到这里下载:http://yunpan.cn/QGBCLwrZnpLMS。
一、前言
本博文中的所有内容是基于上一篇写的,不清楚的博友可以先看一下上一篇博文:http://freeloda.blog.51cto.com/2033581/1301382。在上一篇博文中我们分别用mod_proxy和mod_jk实现了tomcat服务器的反向代理,在这一篇博文中我们要实现tomcat服务器的负载均衡和tomcat集群的会话共享。好了,下面我们就来具体的说一下吧!
二、环境准备
1.实验拓扑
2.同步各节点时间
1
2
3
|
[root@apache ~]
# ntpdate 202.120.2.101
[root@tomcat1 ~]
# ntpdate 202.120.2.101
[root@tomcat2 ~]
# ntpdate 202.120.2.101
|
3.安装jdk
1
|
[root@tomcat2 src]
# rpm -ivh jdk-7u40-linux-x64.rpm
|
4.配置jdk环境变量
1
2
3
4
5
6
7
|
[root@tomcat2 java]
# vim /etc/profile.d/java.sh
export
JAVA_HOME=
/usr/java/jdk1
.7.0_40
export
PATH=$PATH:$JAVA_HOME
/bin
[root@tomcat2 java]
# java -version
java version
"1.7.0_40"
Java(TM) SE Runtime Environment (build 1.7.0_40-b43)
Java HotSpot(TM) 64-Bit Server VM (build 24.0-b56, mixed mode)
|
5.安装tomcat
1
2
3
4
5
6
7
|
[root@tomcat2 src]
# tar xf apache-tomcat-7.0.42.tar.gz -C /usr/local/
[root@tomcat2 src]
# cd /usr/local/
[root@tomcat2
local
]
# ln -sv apache-tomcat-7.0.42 tomcat
"tomcat"
->
"apache-tomcat-7.0.42"
[root@tomcat2
local
]
# cd tomcat/
[root@tomcat2 tomcat]
# ls
bin conf lib LICENSE logs NOTICE RELEASE-NOTES RUNNING.txt temp webapps work
|
6.配置tomcat环境变量
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
|
[root@tomcat2 tomcat]
# vim /etc/profile.d/tomcat.sh
export
CATALINA_HOME=
/usr/local/tomcat
export
PATH=$PATH:$CATALINA_HOME
/bin
[root@tomcat2 tomcat]
# . /etc/profile.d/tomcat.sh
[root@tomcat2 tomcat]
# catalina.sh version
Using CATALINA_BASE:
/usr/local/tomcat
Using CATALINA_HOME:
/usr/local/tomcat
Using CATALINA_TMPDIR:
/usr/local/tomcat/temp
Using JRE_HOME:
/usr/java/jdk1
.7.0_40
Using CLASSPATH:
/usr/local/tomcat/bin/bootstrap
.jar:
/usr/local/tomcat/bin/tomcat-juli
.jar
Server version: Apache Tomcat
/7
.0.42
Server built: Jul 2 2013 08:57:41
Server number: 7.0.42.0
OS Name: Linux
OS Version: 2.6.32-358.el6.x86_64
Architecture: amd64
JVM Version: 1.7.0_40-b43
JVM Vendor: Oracle Corporation
|
7.为tomcat提供init脚本
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
|
[root@tomcat2 tomcat]
# vim /etc/init.d/tomcat
#!/bin/sh
# Tomcat init script for Linux.
#
# chkconfig: 2345 96 14
# description: The Apache Tomcat servlet/JSP container.
CATALINA_HOME=
/usr/local/tomcat
export
CATALINA_HOME
# export CATALINA_OPTS="-Xms128m -Xmx256m"
exec
$CATALINA_HOME
/bin/catalina
.sh $*
[root@tomcat2 tomcat]
# service tomcat start
Using CATALINA_BASE:
/usr/local/tomcat
Using CATALINA_HOME:
/usr/local/tomcat
Using CATALINA_TMPDIR:
/usr/local/tomcat/temp
Using JRE_HOME:
/usr
Using CLASSPATH:
/usr/local/tomcat/bin/bootstrap
.jar:
/usr/local/tomcat/bin/tomcat-juli
.jar
[root@tomcat2 tomcat]
# netstat -ntulp
Active Internet connections (only servers)
Proto Recv-Q Send-Q Local Address Foreign Address State PID
/Program
name
tcp 0 0 0.0.0.0:22 0.0.0.0:* LISTEN 1042
/sshd
tcp 0 0 127.0.0.1:25 0.0.0.0:* LISTEN 1119
/master
tcp 0 0 127.0.0.1:6010 0.0.0.0:* LISTEN 1153
/sshd
tcp 0 0 127.0.0.1:6011 0.0.0.0:* LISTEN 1262
/sshd
tcp 0 0 :::8080 :::* LISTEN 1541
/java
tcp 0 0 :::22 :::* LISTEN 1042
/sshd
tcp 0 0 ::1:25 :::* LISTEN 1119
/master
tcp 0 0 ::1:6010 :::* LISTEN 1153
/sshd
tcp 0 0 ::1:6011 :::* LISTEN 1262
/sshd
tcp 0 0 ::ffff:127.0.0.1:8005 :::* LISTEN 1541
/java
tcp 0 0 :::8009 :::* LISTEN 1541
/java
|
8.测试一下
好了,大家可以看到访成功。说明我们的tomcat安装完成,下面我们来配置一下。
9.修改配置文件
1
|
[root@tomcat2 ~]
# vim /usr/local/tomcat/conf/server.xml
|
#修改默认端口为80
#设置默认主机为www.test.com,并增加jvmRoute
#增加虚拟主机
10.增加文档目录与测试文件
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
|
[root@tomcat2 ~]
# mkdir -pv /web/webapp
mkdir
: 已创建目录
"/web"
mkdir
: 已创建目录
"/web/webapp"
[root@tomcat2 ~]
# cd /web/webapp/
[root@tomcat2 webapp]
# vim index.jsp
<%@ page language=
"java"
%>
<html>
<
head
><title>TomcatB<
/title
><
/head
>
<body>
<h1><font color=
"blue"
>TomcatB <
/h1
>
<table align=
"centre"
border=
"1"
>
<
tr
>
<td>Session ID<
/td
>
<% session.setAttribute(
"abc"
,
"abc"
); %>
<td><%= session.getId() %><
/td
>
<
/tr
>
<
tr
>
<td>Created on<
/td
>
<td><%= session.getCreationTime() %><
/td
>
<
/tr
>
<
/table
>
<
/body
>
<
/html
>
|
11.检查配置文件并重新启动
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
|
[root@tomcat2 ~]
# service tomcat stop
Using CATALINA_BASE:
/usr/local/tomcat
Using CATALINA_HOME:
/usr/local/tomcat
Using CATALINA_TMPDIR:
/usr/local/tomcat/temp
Using JRE_HOME:
/usr
Using CLASSPATH:
/usr/local/tomcat/bin/bootstrap
.jar:
/usr/local/tomcat/bin/tomcat-juli
.jar
[root@tomcat2 ~]
# service tomcat configtest
Using CATALINA_BASE:
/usr/local/tomcat
Using CATALINA_HOME:
/usr/local/tomcat
Using CATALINA_TMPDIR:
/usr/local/tomcat/temp
Using JRE_HOME:
/usr
Using CLASSPATH:
/usr/local/tomcat/bin/bootstrap
.jar:
/usr/local/tomcat/bin/tomcat-juli
.jar
Sep 25, 2013 2:04:12 PM org.apache.catalina.core.AprLifecycleListener init
INFO: The APR based Apache Tomcat Native library
which
allows optimal performance
in
production environments was not found on the java.library.path:
/usr/java/packages/lib/amd64
:
/usr/lib64
:
/lib64
:
/lib
:
/usr/lib
Sep 25, 2013 2:04:13 PM org.apache.coyote.AbstractProtocol init
INFO: Initializing ProtocolHandler [
"http-bio-80"
]
Sep 25, 2013 2:04:13 PM org.apache.coyote.AbstractProtocol init
INFO: Initializing ProtocolHandler [
"ajp-bio-8009"
]
Sep 25, 2013 2:04:13 PM org.apache.catalina.startup.Catalina load
INFO: Initialization processed
in
1360 ms
[root@tomcat2 ~]
# service tomcat start
Using CATALINA_BASE:
/usr/local/tomcat
Using CATALINA_HOME:
/usr/local/tomcat
Using CATALINA_TMPDIR:
/usr/local/tomcat/temp
Using JRE_HOME:
/usr
Using CLASSPATH:
/usr/local/tomcat/bin/bootstrap
.jar:
/usr/local/tomcat/bin/tomcat-juli
.jar
[root@tomcat2 ~]
# netstat -ntulp
Active Internet connections (only servers)
Proto Recv-Q Send-Q Local Address Foreign Address State PID
/Program
name
tcp 0 0 0.0.0.0:22 0.0.0.0:* LISTEN 1042
/sshd
tcp 0 0 127.0.0.1:25 0.0.0.0:* LISTEN 1119
/master
tcp 0 0 127.0.0.1:6010 0.0.0.0:* LISTEN 1153
/sshd
tcp 0 0 127.0.0.1:6011 0.0.0.0:* LISTEN 1262
/sshd
tcp 0 0 :::80 :::* LISTEN 1667
/java
tcp 0 0 :::22 :::* LISTEN 1042
/sshd
tcp 0 0 ::1:25 :::* LISTEN 1119
/master
tcp 0 0 ::1:6010 :::* LISTEN 1153
/sshd
tcp 0 0 ::1:6011 :::* LISTEN 1262
/sshd
tcp 0 0 ::ffff:127.0.0.1:8005 :::* LISTEN 1667
/java
tcp 0 0 :::8009 :::* LISTEN 1667
/java
|
12.再次测试访问一下
好了,到这里我们准备工作就全部完成了,下面我们来配置tomcat的负载均衡。对了,还有个问题得说明一下。有博友会问了,你只配置了tomcat2,拓扑图中有三台机器还有两台你怎么不配置呢,是这样的由于我们实验是接着上一篇博客做的,另外两台机器的配置在上一篇博文中有,不清楚的博友参考一下http://freeloda.blog.51cto.com/2033581/1301382,我这里就不重复说明了。
三、负载均衡实现
两种方式都能实现负载均衡:
基于mod_proxy模块实现负载均衡
基于mod_jk模块实现负载均衡
下面我们就来分别演示一下,
1.基于mod_proxy实现负载均衡
首先我们来修改一下httpd.conf配置文件,
1
|
[root@apache ~]
# vim /etc/httpd/httpd.conf
|
#启用httpd-proxy.conf配置文件
下面我们来修改httpd-proxy.conf配置文件,
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
|
[root@apache ~]
# vim /etc/httpd/extra/httpd-proxy.conf
ProxyRequests Off
<proxy balancer:
//lbcluster
>
BalancerMember ajp:
//192
.168.18.201:8009 loadfactor=1 route=TomcatA
BalancerMember ajp:
//192
.168.18.202:8009 loadfactor=1 route=TomcatB
ProxySet lbmethod=bytraffic
<
/proxy
>
<VirtualHost *:80>
ServerAdmin admin@
test
.com
ServerName www.
test
.com
ProxyPass / balancer:
//lbcluster/
ProxyPassReverse / balancer:
//lbcluster/
stickysession=JSESSIONID|jsessionid nofailover=On
<Proxy *>
Require all granted
<
/Proxy
>
<Location />
Require all granted
<
/Location
>
<
/VirtualHost
>
|
注,关于如上apache指令的说明:
ProxyPreserveHost {On|Off}:如果启用此功能,代理会将用户请求报文中的Host:行发送给后端的服务器,而不再使用ProxyPass指定的服务器地址。如果想在反向代理中支持虚拟主机,则需要开启此项,否则就无需打开此功能。
ProxyVia {On|Off|Full|Block}:用于控制在http首部是否使用Via:,主要用于在多级代理中控制代理请求的流向。默认为Off,即不启用此功能;On表示每个请求和响应报文均添加Via:;Full表示每个Via:行都会添加当前apache服务器的版本号信息;Block表示每个代理请求报文中的Via:都会被移除。
ProxyRequests {On|Off}:是否开启apache正向代理的功能;启用此项时为了代理http协议必须启用mod_proxy_http模块。同时,如果为apache设置了ProxyPass,则必须将ProxyRequests设置为Off。
ProxyPass [path] !|url [key=value key=value ...]]:将后端服务器某URL与当前服务器的某虚拟路径关联起来作为提供服务的路径,path为当前服务器上的某虚拟路径,url为后端服务器上某URL路径。使用此指令时必须将ProxyRequests的值设置为Off。需要注意的是,如果path以“/”结尾,则对应的url也必须以“/”结尾,反之亦然。
另外,mod_proxy模块在httpd 2.1的版本之后支持与后端服务器的连接池功能,连接在按需创建在可以保存至连接池中以备进一步使用。连接池大小或其它设定可以通过在ProxyPass中使用key=value的方式定义。常用的key如下所示:
min:连接池的最小容量,此值与实际连接个数无关,仅表示连接池最小要初始化的空间大小。
max:连接池的最大容量,每个MPM都有自己独立的容量;都值与MPM本身有关,如Prefork的总是为1,而其它的则取决于ThreadsPerChild指令的值。
loadfactor:用于负载均衡集群配置中,定义对应后端服务器的权重,取值范围为1-100。
retry:当apache将请求发送至后端服务器得到错误响应时等待多长时间以后再重试。单位是秒钟。
如果Proxy指定是以balancer://开头,即用于负载均衡集群时,其还可以接受一些特殊的参数,如下所示:
lbmethod:apache实现负载均衡的调度方法,默认是byrequests,即基于权重将统计请求个数进行调度,bytraffic则执行基于权重的流量计数调度,bybusyness通过考量每个后端服务器的当前负载进行调度。
maxattempts:放弃请求之前实现故障转移的次数,默认为1,其最大值不应该大于总的节点数。
nofailover:取值为On或Off,设置为On时表示后端服务器故障时,用户的session将损坏;因此,在后端服务器不支持session复制时可将其设置为On。
stickysession:调度器的sticky session的名字,根据web程序语言的不同,其值为JSESSIONID或PHPSESSIONID。
上述指令除了能在banlancer://或ProxyPass中设定之外,也可使用ProxySet指令直接进行设置,如:
1
2
3
4
5
|
<Proxy balancer:
//hotcluster
>
BalancerMember http:
//www1
.
test
.com:8080 loadfactor=1
BalancerMember http:
//www2
.
test
.com:8080 loadfactor=2
ProxySet lbmethod=bytraffic
<
/Proxy
>
|
ProxyPassReverse:用于让apache调整HTTP重定向响应报文中的Location、Content-Location及URI标签所对应的URL,在反向代理环境中必须使用此指令避免重定向报文绕过proxy服务器。
好了,配置文件说明就到这里。接下来我们检查一下配置文件并重启httpd ,
1
2
3
4
5
6
|
[root@apache ~]
# /usr/local/apache/bin/httpd -t
Syntax OK
[root@apache ~]
# service httpd restart
停止 httpd: [确定]
正在启动 httpd: [确定]
下面我们来测试一下,
|
好了,大家可以看到基于mod_proxy模块的tomcat负载均衡配置完成。下面我们来配置一下基于mod_jk的模块的负载均衡!
2.基于mod_jk模块实现负载均衡
说明:
为了避免用户直接访问后端Tomcat实例,影响负载均衡的效果,建议在Tomcat 7的各实例上禁用HTTP/1.1连接器。
为每一个Tomcat 7实例的引擎添加jvmRoute参数,并通过其为当前引擎设置全局惟一标识符。如下所示。需要注意的是,每一个实例的jvmRoute的值均不能相同。
同样的首先来修改httpd.conf配置文件,
1
2
|
[root@apache ~]
# vim /etc/httpd/httpd.conf
#启用httpd-jk.conf
|
下面我们来修改一下httpd-jk.conf配置文件,
1
2
3
4
5
6
7
|
[root@apache ~]
# vim /etc/httpd/extra/httpd-jk.conf
LoadModule jk_module modules
/mod_jk
.so
#加载mod_jk模块
JkWorkersFile
/etc/httpd/extra/workers
.properties
#配置文件位置
JkLogFile logs
/mod_jk
.log
#日志
JkLogLevel debug
#日志级别
JkMount /* lbcluster
#负载均衡器名称
JkMount
/jkstatus/
stat1
#状态信息
|
接下来,编辑/etc/httpd/extra/workers.properties,添加如下内容:
1
2
3
4
5
6
7
8
9
10
11
12
13
14
|
[root@apache ~]
# vim /etc/httpd/extra/workers.properties
worker.list = lbcluster,stat1
#列表信息
worker.TomcatA.
type
= ajp13
#支持ajp协议
worker.TomcatA.host = 192.168.18.201
#TomcatA实例IP
worker.TomcatA.port = 8009
#TomcatA实例端口号
worker.TomcatA.lbfactor = 1
#负载均衡权重为1
worker.TomcatB.
type
= ajp13
worker.TomcatB.host = 192.168.18.202
worker.TomcatB.port = 8009
worker.TomcatB.lbfactor = 1
worker.lbcluster.
type
= lb
#负载均衡work,lb内置的类
worker.lbcluster.sticky_session = 0
#会话是否绑定
worker.lbcluster.balance_workers = TomcatA, TomcatB
#TomcatA, TomcatB 集群中的实列
worker.stat1.
type
= status
#状态信息
|
注,apache相关指令说明:
workers.properties文件一般由两类指令组成:一是mod_jk可以连接的各worker名称列表,二是每一个worker的属性配置信息。它们分别遵循如下使用语法。
1
2
|
worker.list = < a comma separated list of worker names >
worker. <worker name> .<property> = <property value>
|
其中worker.list指令可以重复指定多次,而worker name则是Tomcat中engine组件jvmRoute参数的值。如:
worker.TomcatA.host=172.16.100.1
根据其工作机制的不同,worker有多种不同的类型,这是需要为每个worker定义的一项属性woker.<work name>.type。常见的类型如下:
◇ ajp13:此类型表示当前worker为一个运行着的Tomcat实例。
◇ lb:lb即load balancing,专用于负载均衡场景中的woker;此worker并不真正负责处理用户请求,而是将用户请求调度给其它类型为ajp13的worker。
◇ status:用户显示分布式环境中各实际worker工作状态的特殊worker,它不处理任何请求,也不关联到任何实际工作的worker实例。具体示例如请参见后文中的配置。
worker其它常见的属性说明:
◇ host:Tomcat 7的worker实例所在的主机;
◇ port:Tomcat 7实例上AJP1.3连接器的端口;
◇ connection_pool_minsize:最少要保存在连接池中的连接的个数;默认为pool_size/2;
◇ connection_pool_timeout:连接池中连接的超时时长;
◇ mount:由当前worker提供的context路径,如果有多个则使用空格格开;此属性可以由JkMount指令替代;
◇ retries:错误发生时的重试次数;
◇ socket_timeout:mod_jk等待worker响应的时长,默认为0,即无限等待;
◇ socket_keepalive:是否启用keep alive的功能,1表示启用,0表示禁用;
◇ lbfactor:worker的权重,可以在负载均衡的应用场景中为worker定义此属性;
另外,在负载均衡模式中,专用的属性还有:
◇balance_workers:用于负载均衡模式中的各worker的名称列表,需要注意的是,出现在此处的worker名称一定不能在任何worker.list属性列表中定义过,并且worker.list属性中定义的worker名字必须包含负载均衡worker。具体示例请参见后文中的定义。
◇ method:可以设定为R、T或B;默认为R,即根据请求的个数进行调度;T表示根据已经发送给worker的实际流量大小进行调度;B表示根据实际负载情况进行调度。
◇sticky_session:在将某请求调度至某worker后,源于此址的所有后续请求都将直接调度至此worker,实现将用户session与某worker绑定。默认为值为1,即启用此功能。如果后端的各worker之间支持session复制,则可以将此属性值设为0。
下面我们来检查一下配置文件并重新启动一下httpd,
1
2
3
4
5
6
|
[root@apache ~]
# vim /etc/httpd/extra/workers.properties
[root@apache ~]
# /usr/local/apache/bin/httpd -t
Syntax OK
[root@apache ~]
# service httpd restart
停止 httpd: [确定]
正在启动 httpd: [确定]
|
再下来我们来访问测试一下,
3.查看状态信息
两种状态信息:
基于mod_proxy模块状态信息
基于mod_jk模块的状态信息
(1).基于mod_proxy模块状态信息
首先修改配置文件,
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
|
[root@apache ~]
# vim /etc/httpd/extra/httpd-proxy.conf
ProxyRequests Off
ProxyRequests Off
<proxy balancer:
//lbcluster
>
BalancerMember ajp:
//192
.168.18.201:8009 loadfactor=1 route=TomcatA
BalancerMember ajp:
//192
.168.18.202:8009 loadfactor=1 route=TomcatB
ProxySet lbmethod=byrequests
<
/proxy
>
<VirtualHost *:80>
ServerAdmin admin@
test
.com
ServerName www.
test
.com
ProxyPass / balancer:
//lbcluster/
stickysession=JSESSIONID|jsessionid nofailover=On
ProxyPassReverse / balancer:
//lbcluster/
<Location
/balancer-manager
>
#增加状态信息
SetHandler balancer-manager
Proxypass !
Require all granted
<
/Location
>
<Proxy *>
Require all granted
<
/Proxy
>
<Location />
Require all granted
<
/Location
>
|
检查一下配置文件并重启,
1
2
3
4
5
|
[root@apache ~]
# /usr/local/apache/bin/httpd -t
Syntax OK
[root@apache ~]
# service httpd restart
停止 httpd: [确定]
正在启动 httpd: [确定]
|
下面我们来查看一下状态信息,
好了,基于的mod_proxy模块状态信息已配置完成,下面我们演示一下基于mod_jk模块的状态信息。
(2).基于mod_jk模块的状态信息
首先修改httpd.conf配置文件,
1
|
[root@apache ~]
# vim /etc/httpd/httpd.conf
|
#注释httpd-proxy.conf,启用httpd-jk.conf
下面重新启动一下httpd并测试一下,
1
2
3
|
[root@apache ~]
# service httpd restart
停止 httpd: [确定]
正在启动 httpd: [确定]
|
好了,到这里我们状态信息查看就到这里了,下面我们来配置会话共享集群。
四、DeltaManager实现
1.会话管理
种类:
标准会话管理器
持久会话管理器
(1).标准会话管理器(StandardManager):
1
2
|
<Manager className=
"org.apache.catalina.session.StandardManager"
maxInactiveInterval=
"7200"
/>
|
默认保存于$CATALINA_HOME/work/Catalina/<hostname>/<webapp-name>/下的SESSIONS.ser文件中。
maxActiveSessions:最多允许的活动会话数量,默认为-1,表示不限制;
maxInactiveInterval:非活动的会话超时时长,默认为60s;
pathname:会话文件的保存目录;
(2).持久会话管理器(PersistentManager):
将会话数据保存至持久存储中,并且能在服务器意外中止后重新启动时重新加载这些会话信息。持久会话管理器支持将会话保存至文件存储(FileStore)或JDBC存储(JDBCStore)中。
保存至文件中的示例:
1
2
3
4
5
|
<Manager className=
"org.apache.catalina.session.PersistentManager"
saveOnRestart=
"true"
>
<Store className=
"org.apache.catalina.session.FileStore"
directory=
"/data/tomcat-sessions"
/>
<
/Manager
>
|
每个用户的会话会被保存至directory指定的目录中的文件中,文件名为<session id>.session,并通过后台线程每隔一段时间(checkInterval参数定义,默认为60秒)检查一次超时会话。
保存至JDBCStore中的示例:
1
2
3
4
5
6
|
<Manager className=
"org.apache.catalina.session.PersistentManager"
saveOnRestart=
"true"
>
<Store className=
"org.apache.catalina.session.JDBCStore"
driverName=
"com.mysql.jdbc.Driver"
connectionURL=
"jdbc:mysql://localhost:3306/mydb?user=jb;password=pw"
/>
<
/Manager
>
|
2.Manager组件
Manger对象用于实现HTTP会话管理的功能,Tomcat中有5种Manger的实现:
(1).StandardManager
Tomcat6的默认会话管理器,用于非集群环境中对单个处于运行状态的Tomcat实例会话进行管理。当Tomcat关闭时,这些会话相关的数据会被写入磁盘上的一个名叫SESSION.ser的文件,并在Tomcat下次启动时读取此文件。
(2).PersistentManager
当一个会话长时间处于空闲状态时会被写入到swap会话对象,这对于内存资源比较吃紧的应用环境来说比较有用。
(3).DeltaManager
用于Tomcat集群的会话管理器,它通过将改变了会话数据同步给集群中的其它节点实现会话复制。这种实现会将所有会话的改变同步给集群中的每一个节点,也是在集群环境中用得最多的一种实现方式。
(4).BackupManager
用于Tomcat集群的会话管理器,与DeltaManager不同的是,某节点会话的改变只会同步给集群中的另一个而非所有节点。
(5).SimpleTcpReplicationManager
Tomcat4时用到的版本,过于老旧了。
3.DeltaManager具体实现过程
(1).修改server.xml配置文件
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
|
<Cluster className=
"org.apache.catalina.ha.tcp.SimpleTcpCluster"
channelSendOptions=
"8"
>
<Manager className=
"org.apache.catalina.ha.session.DeltaManager"
expireSessionsOnShutdown=
"false"
notifyListenersOnReplication=
"true"
/>
<Channel className=
"org.apache.catalina.tribes.group.GroupChannel"
>
<Membership className=
"org.apache.catalina.tribes.membership.McastService"
address=
"228.0.0.4"
port=
"45564"
frequency=
"500"
dropTime=
"3000"
/>
<Receiver className=
"org.apache.catalina.tribes.transport.nio.NioReceiver"
address=
"192.168.18.201"
port=
"4000"
autoBind=
"100"
selectorTimeout=
"5000"
maxThreads=
"6"
/>
<Sender className=
"org.apache.catalina.tribes.transport.ReplicationTransmitter"
>
<Transport className=
"org.apache.catalina.tribes.transport.nio.PooledParallelSender"
/>
<
/Sender
>
<Interceptor className=
"org.apache.catalina.tribes.group.interceptors.TcpFailureDetector"
/>
<Interceptor className=
"org.apache.catalina.tribes.group.interceptors.MessageDispatch15Interceptor"
/>
<
/Channel
>
<Valve className=
"org.apache.catalina.ha.tcp.ReplicationValve"
filter=
"/"
/>
<Valve className=
"org.apache.catalina.ha.session.JvmRouteBinderValve"
/>
<Deployer className=
"org.apache.catalina.ha.deploy.FarmWarDeployer"
tempDir=
"/tmp/war-temp/"
deployDir=
"/tmp/war-deploy/"
watchDir=
"/tmp/war-listen/"
watchEnabled=
"false"
/>
<ClusterListener className=
"org.apache.catalina.ha.session.JvmRouteSessionIDBinderListener"
/>
<ClusterListener className=
"org.apache.catalina.ha.session.ClusterSessionListener"
/>
<
/Cluster
>
|
以上内容定义在Engine容器中,则表示对所有主机均启动用集群功能。如果定义在某Host中,则表示仅对此主机启用集群功能。(注,tomcat1与tomcat2都要修改!)
tomcat1:
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
|
[root@tomcat1 src]
# vim /usr/local/tomcat/conf/server.xml
#在Engine组件内增加下面这一段
<Cluster className=
"org.apache.catalina.ha.tcp.SimpleTcpCluster"
channelSendOptions=
"8"
>
<Manager className=
"org.apache.catalina.ha.session.DeltaManager"
expireSessionsOnShutdown=
"false"
notifyListenersOnReplication=
"true"
/>
<Channel className=
"org.apache.catalina.tribes.group.GroupChannel"
>
<Membership className=
"org.apache.catalina.tribes.membership.McastService"
address=
"228.0.0.4"
port=
"45564"
frequency=
"500"
dropTime=
"3000"
/>
<Receiver className=
"org.apache.catalina.tribes.transport.nio.NioReceiver"
address=
"192.168.18.201"
port=
"4000"
autoBind=
"100"
selectorTimeout=
"5000"
maxThreads=
"6"
/>
<Sender className=
"org.apache.catalina.tribes.transport.ReplicationTransmitter"
>
<Transport className=
"org.apache.catalina.tribes.transport.nio.PooledParallelSender"
/>
<
/Sender
>
<Interceptor className=
"org.apache.catalina.tribes.group.interceptors.TcpFailureDetector"
/>
<Interceptor className=
"org.apache.catalina.tribes.group.interceptors.MessageDispatch15Interceptor"
/>
<
/Channel
>
<Valve className=
"org.apache.catalina.ha.tcp.ReplicationValve"
filter=
"/"
/>
<Valve className=
"org.apache.catalina.ha.session.JvmRouteBinderValve"
/>
<Deployer className=
"org.apache.catalina.ha.deploy.FarmWarDeployer"
tempDir=
"/tmp/war-temp/"
deployDir=
"/tmp/war-deploy/"
watchDir=
"/tmp/war-listen/"
watchEnabled=
"false"
/>
<ClusterListener className=
"org.apache.catalina.ha.session.JvmRouteSessionIDBinderListener"
/>
<ClusterListener className=
"org.apache.catalina.ha.session.ClusterSessionListener"
/>
<
/Cluster
>
[root@tomcat1 src]
# service tomcat stop #关闭tomcat
Using CATALINA_BASE:
/usr/local/tomcat
Using CATALINA_HOME:
/usr/local/tomcat
Using CATALINA_TMPDIR:
/usr/local/tomcat/temp
Using JRE_HOME:
/usr
Using CLASSPATH:
/usr/local/tomcat/bin/bootstrap
.jar:
/usr/local/tomcat/bin/tomcat-juli
.jar
[root@tomcat1 src]
# service tomcat configtest #检查配置文件
Using CATALINA_BASE:
/usr/local/tomcat
Using CATALINA_HOME:
/usr/local/tomcat
Using CATALINA_TMPDIR:
/usr/local/tomcat/temp
Using JRE_HOME:
/usr
Using CLASSPATH:
/usr/local/tomcat/bin/bootstrap
.jar:
/usr/local/tomcat/bin/tomcat-juli
.jar
Sep 25, 2013 3:51:31 PM org.apache.catalina.core.AprLifecycleListener init
INFO: The APR based Apache Tomcat Native library
which
allows optimal performance
in
production environments was not found on the java.library.path:
/usr/java/packages/lib/amd64
:
/usr/lib64
:
/lib64
:
/lib
:
/usr/lib
Sep 25, 2013 3:51:32 PM org.apache.coyote.AbstractProtocol init
INFO: Initializing ProtocolHandler [
"http-bio-80"
]
Sep 25, 2013 3:51:32 PM org.apache.coyote.AbstractProtocol init
INFO: Initializing ProtocolHandler [
"ajp-bio-8009"
]
Sep 25, 2013 3:51:32 PM org.apache.catalina.startup.Catalina load
INFO: Initialization processed
in
1562 ms
|
tomcat2:
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
|
[root@tomcat2 src]
# vim /usr/local/tomcat/conf/server.xml
#同样的在Engine组件内增加下面这一段
<Cluster className=
"org.apache.catalina.ha.tcp.SimpleTcpCluster"
channelSendOptions=
"8"
>
<Manager className=
"org.apache.catalina.ha.session.DeltaManager"
expireSessionsOnShutdown=
"false"
notifyListenersOnReplication=
"true"
/>
<Channel className=
"org.apache.catalina.tribes.group.GroupChannel"
>
<Membership className=
"org.apache.catalina.tribes.membership.McastService"
address=
"228.0.0.4"
port=
"45564"
frequency=
"500"
dropTime=
"3000"
/>
<Receiver className=
"org.apache.catalina.tribes.transport.nio.NioReceiver"
address=
"192.168.18.202"
port=
"4000"
autoBind=
"100"
selectorTimeout=
"5000"
maxThreads=
"6"
/>
<Sender className=
"org.apache.catalina.tribes.transport.ReplicationTransmitter"
>
<Transport className=
"org.apache.catalina.tribes.transport.nio.PooledParallelSender"
/>
<
/Sender
>
<Interceptor className=
"org.apache.catalina.tribes.group.interceptors.TcpFailureDetector"
/>
<Interceptor className=
"org.apache.catalina.tribes.group.interceptors.MessageDispatch15Interceptor"
/>
<
/Channel
>
<Valve className=
"org.apache.catalina.ha.tcp.ReplicationValve"
filter=
"/"
/>
<Valve className=
"org.apache.catalina.ha.session.JvmRouteBinderValve"
/>
<Deployer className=
"org.apache.catalina.ha.deploy.FarmWarDeployer"
tempDir=
"/tmp/war-temp/"
deployDir=
"/tmp/war-deploy/"
watchDir=
"/tmp/war-listen/"
watchEnabled=
"false"
/>
<ClusterListener className=
"org.apache.catalina.ha.session.JvmRouteSessionIDBinderListener"
/>
<ClusterListener className=
"org.apache.catalina.ha.session.ClusterSessionListener"
/>
<
/Cluster
>
[root@tomcat2 src]
# service tomcat stop #关闭tomcat
Using CATALINA_BASE:
/usr/local/tomcat
Using CATALINA_HOME:
/usr/local/tomcat
Using CATALINA_TMPDIR:
/usr/local/tomcat/temp
Using JRE_HOME:
/usr
Using CLASSPATH:
/usr/local/tomcat/bin/bootstrap
.jar:
/usr/local/tomcat/bin/tomcat-juli
.jar
[root@tomcat2 src]
# service tomcat configtest #检查配置文件
Using CATALINA_BASE:
/usr/local/tomcat
Using CATALINA_HOME:
/usr/local/tomcat
Using CATALINA_TMPDIR:
/usr/local/tomcat/temp
Using JRE_HOME:
/usr
Using CLASSPATH:
/usr/local/tomcat/bin/bootstrap
.jar:
/usr/local/tomcat/bin/tomcat-juli
.jar
Sep 25, 2013 3:51:31 PM org.apache.catalina.core.AprLifecycleListener init
INFO: The APR based Apache Tomcat Native library
which
allows optimal performance
in
production environments was not found on the java.library.path:
/usr/java/packages/lib/amd64
:
/usr/lib64
:
/lib64
:
/lib
:
/usr/lib
Sep 25, 2013 3:51:32 PM org.apache.coyote.AbstractProtocol init
INFO: Initializing ProtocolHandler [
"http-bio-80"
]
Sep 25, 2013 3:51:32 PM org.apache.coyote.AbstractProtocol init
INFO: Initializing ProtocolHandler [
"ajp-bio-8009"
]
Sep 25, 2013 3:51:32 PM org.apache.catalina.startup.Catalina load
INFO: Initialization processed
in
1562 ms
|
此外,所有启用集群功能的web应用程序,其web.xml中都须添加<distributable/>才能实现集群功能。如果某web应用程序没有自己的web.xml,也可以通过复制默认的web.xml至其WEB-INF目录中实现。
(2).修改web.xml
tomcat1:
1
2
3
4
5
6
7
8
|
[root@tomcat1 ~]
# cd /web/webapp/
[root@tomcat1 webapp]
# ls
index.jsp
[root@tomcat1 webapp]
# mkdir WEB-INF
[root@tomcat1 webapp]
# ls
index.jsp WEB-INF
[root@tomcat1 webapp]
# cp /usr/local/tomcat/conf/web.xml WEB-INF/
[root@tomcat1 ~]
# vim /usr/local/tomcat/conf/web.xml
|
#增加一行<distributable/>
tomcat2:
1
2
3
4
5
6
7
8
|
[root@tomcat2 ~]
# cd /web/webapp/
[root@tomcat2 webapp]
# ls
index.jsp
[root@tomcat2 webapp]
# mkdir WEB-INF
[root@tomcat2 webapp]
# ls
index.jsp WEB-INF
[root@tomcat2 webapp]
# cp /usr/local/tomcat/conf/web.xml WEB-INF/
[root@tomcat2 ~]
# vim /usr/local/tomcat/conf/web.xml
|
#增加一行<distributable/>
(3).启动tomcat服务器
tomcat1:
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
|
[root@tomcat1 conf]
# catalina.sh start
Using CATALINA_BASE:
/usr/local/tomcat
Using CATALINA_HOME:
/usr/local/tomcat
Using CATALINA_TMPDIR:
/usr/local/tomcat/temp
Using JRE_HOME:
/usr/java/jdk1
.7.0_40
Using CLASSPATH:
/usr/local/tomcat/bin/bootstrap
.jar:
/usr/local/tomcat/bin/tomcat-juli
.jar
[root@tomcat1 conf]
# netstat -ntulp
Active Internet connections (only servers)
Proto Recv-Q Send-Q Local Address Foreign Address State PID
/Program
name
tcp 0 0 0.0.0.0:22 0.0.0.0:* LISTEN 1028
/sshd
tcp 0 0 127.0.0.1:25 0.0.0.0:* LISTEN 1104
/master
tcp 0 0 127.0.0.1:6010 0.0.0.0:* LISTEN 1187
/sshd
tcp 0 0 127.0.0.1:6011 0.0.0.0:* LISTEN 4718
/sshd
tcp 0 0 127.0.0.1:6012 0.0.0.0:* LISTEN 5385
/sshd
tcp 0 0 :::80 :::* LISTEN 5495
/java
tcp 0 0 :::22 :::* LISTEN 1028
/sshd
tcp 0 0 ::1:25 :::* LISTEN 1104
/master
tcp 0 0 ::1:6010 :::* LISTEN 1187
/sshd
tcp 0 0 ::1:6011 :::* LISTEN 4718
/sshd
tcp 0 0 ::1:6012 :::* LISTEN 5385
/sshd
tcp 0 0 ::ffff:192.168.18.201:4000 :::* LISTEN 5495
/java
tcp 0 0 :::8009 :::* LISTEN 5495
/java
udp 0 0 :::45564 :::* 5495
/java
|
tomcat2:
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
|
[root@tomcat2 conf]
# catalina.sh start
Using CATALINA_BASE:
/usr/local/tomcat
Using CATALINA_HOME:
/usr/local/tomcat
Using CATALINA_TMPDIR:
/usr/local/tomcat/temp
Using JRE_HOME:
/usr/java/jdk1
.7.0_40
Using CLASSPATH:
/usr/local/tomcat/bin/bootstrap
.jar:
/usr/local/tomcat/bin/tomcat-juli
.jar
[root@tomcat2 conf]
# netstat -ntulp
Active Internet connections (only servers)
Proto Recv-Q Send-Q Local Address Foreign Address State PID
/Program
name
tcp 0 0 0.0.0.0:22 0.0.0.0:* LISTEN 1038
/sshd
tcp 0 0 127.0.0.1:25 0.0.0.0:* LISTEN 1114
/master
tcp 0 0 127.0.0.1:6010 0.0.0.0:* LISTEN 1194
/sshd
tcp 0 0 127.0.0.1:6011 0.0.0.0:* LISTEN 2219
/sshd
tcp 0 0 :::80 :::* LISTEN 2439
/java
tcp 0 0 :::22 :::* LISTEN 1038
/sshd
tcp 0 0 ::1:25 :::* LISTEN 1114
/master
tcp 0 0 ::1:6010 :::* LISTEN 1194
/sshd
tcp 0 0 ::1:6011 :::* LISTEN 2219
/sshd
tcp 0 0 ::ffff:192.168.18.202:4000 :::* LISTEN 2485
/java
tcp 0 0 ::ffff:127.0.0.1:4000 :::* LISTEN 2439
/java
tcp 0 0 :::8009 :::* LISTEN 2439
/java
udp 0 0 :::45564 :::* 2485
/java
udp 0 0 :::45564 :::* 2439
/java
|
(4).查看一下tomcat集群日志
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
|
[root@tomcat1 ~]
# tail -f /usr/local/tomcat/logs/catalina.2013-09-25.log
九月 25, 2013 6:37:43 下午 org.apache.catalina.ha.session.JvmRouteBinderValve startInternal
信息: JvmRouteBinderValve started
九月 25, 2013 6:37:43 下午 org.apache.coyote.AbstractProtocol start
信息: Starting ProtocolHandler [
"http-bio-80"
]
九月 25, 2013 6:37:43 下午 org.apache.coyote.AbstractProtocol start
信息: Starting ProtocolHandler [
"ajp-bio-8009"
]
九月 25, 2013 6:37:43 下午 org.apache.catalina.startup.Catalina start
信息: Server startup
in
24043 ms
九月 25, 2013 6:37:49 下午 org.apache.catalina.ha.tcp.SimpleTcpCluster memberAdded
信息: Replication member added:org.apache.catalina.tribes.membership.MemberImpl[tcp:
//
{192, 168, 18, 202}:4000,{192, 168, 18, 202},4000, alive=1020, securePort=-1, UDP Port=-1,
id
={29 -117 97 -98 101 -57 71 -105 -113 -2 99 -104 -91 -108 -65 -101 }, payload={},
command
={}, domain={}, ]
九月 25, 2013 6:39:12 下午 org.apache.catalina.tribes.group.interceptors.TcpFailureDetector memberDisappeared
信息: Verification complete. Member disappeared[org.apache.catalina.tribes.membership.MemberImpl[tcp:
//
{192, 168, 18, 202}:4000,{192, 168, 18, 202},4000, alive=84377, securePort=-1, UDP Port=-1,
id
={29 -117 97 -98 101 -57 71 -105 -113 -2 99 -104 -91 -108 -65 -101 }, payload={},
command
={66 65 66 89 45 65 76 69 88 ...(9)}, domain={}, ]]
九月 25, 2013 6:39:12 下午 org.apache.catalina.ha.tcp.SimpleTcpCluster memberDisappeared
信息: Received member disappeared:org.apache.catalina.tribes.membership.MemberImpl[tcp:
//
{192, 168, 18, 202}:4000,{192, 168, 18, 202},4000, alive=84377, securePort=-1, UDP Port=-1,
id
={29 -117 97 -98 101 -57 71 -105 -113 -2 99 -104 -91 -108 -65 -101 }, payload={},
command
={66 65 66 89 45 65 76 69 88 ...(9)}, domain={}, ]
|
(5).下面开始测试
大家可以从图中看到,不管你怎么刷新SessionID都不会变,说明我们的Tomcat的DeltaManager集群配置完成,实现了多台主机之间会话共享。最后我们来实现一下,Nginx负载均衡Tomcat。
五、Nginx实现Tomcat负载均衡
1.实验拓扑
2.安装yum源
1
|
[root@nginx ~]
# rpm -ivh http://download.fedoraproject.org/pub/epel/6/x86_64/epel-release-6-8.noarch.rpm
|
3.停止httpd并修改主机名
1
2
3
4
|
[root@nginx ~]
# service httpd stop
停止 httpd: [确定]
[root@nginx ~]
# uname -n
nginx.
test
.com
|
4.yum安装nginx
1
|
[root@nginx ~]
# yum install -y nginx
|
5.配置nginx负载均衡
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
|
[root@nginx conf.d]
# vim /etc/nginx/conf.d/default.conf
#
# The default server
#
upstream tomcat {
server 192.168.18.201;
server 192.168.18.202;
}
server {
listen 80 default_server;
server_name _;
#charset koi8-r;
#access_log logs/host.access.log main;
location / {
#root /usr/share/nginx/html;
#index index.html index.htm;
proxy_pass http:
//tomcat
;
}
error_page 404
/404
.html;
location =
/404
.html {
root
/usr/share/nginx/html
;
}
# redirect server error pages to the static page /50x.html
#
error_page 500 502 503 504
/50x
.html;
location =
/50x
.html {
root
/usr/share/nginx/html
;
}
# proxy the PHP scripts to Apache listening on 127.0.0.1:80
#
#location ~ \.php$ {
# proxy_pass http://127.0.0.1;
#}
# pass the PHP scripts to FastCGI server listening on 127.0.0.1:9000
#
#location ~ \.php$ {
# root html;
# fastcgi_pass 127.0.0.1:9000;
# fastcgi_index index.php;
# fastcgi_param SCRIPT_FILENAME /scripts$fastcgi_script_name;
# include fastcgi_params;
#}
# deny access to .htaccess files, if Apache's document root
# concurs with nginx's one
#
#location ~ /\.ht {
# deny all;
#}
}
|
6.检查配置文件并启动服务器
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
|
[root@nginx conf.d]
# nginx -t
nginx: the configuration
file
/etc/nginx/nginx
.conf syntax is ok
nginx: configuration
file
/etc/nginx/nginx
.conf
test
is successful
[root@nginx conf.d]
# service nginx start
正在启动 nginx: [确定]
[root@nginx conf.d]
# netstat -ntulp
Active Internet connections (only servers)
Proto Recv-Q Send-Q Local Address Foreign Address State PID
/Program
name
tcp 0 0 0.0.0.0:111 0.0.0.0:* LISTEN 1062
/rpcbind
tcp 0 0 0.0.0.0:80 0.0.0.0:* LISTEN 1517
/nginx
tcp 0 0 0.0.0.0:22 0.0.0.0:* LISTEN 1219
/sshd
tcp 0 0 127.0.0.1:25 0.0.0.0:* LISTEN 1295
/master
tcp 0 0 0.0.0.0:35929 0.0.0.0:* LISTEN 1080
/rpc
.statd
tcp 0 0 :::111 :::* LISTEN 1062
/rpcbind
tcp 0 0 :::34613 :::* LISTEN 1080
/rpc
.statd
tcp 0 0 :::22 :::* LISTEN 1219
/sshd
tcp 0 0 ::1:25 :::* LISTEN 1295
/master
udp 0 0 0.0.0.0:111 0.0.0.0:* 1062
/rpcbind
udp 0 0 0.0.0.0:60672 0.0.0.0:* 1080
/rpc
.statd
udp 0 0 0.0.0.0:813 0.0.0.0:* 1062
/rpcbind
udp 0 0 0.0.0.0:832 0.0.0.0:* 1080
/rpc
.statd
udp 0 0 :::111 :::* 1062
/rpcbind
udp 0 0 :::813 :::* 1062
/rpcbind
udp 0 0 :::47798 :::* 1080
/rpc
.statd
|
7.进行测试
好了,到这里Nginx实现tomcat的负载均衡与会话共享配置完成。最后,希望大家有所收获吧,^_^……