基于Tomcat建構LNMT架構的網站并實作Session保持 - 小小忍者 - 51CTO技術部落格
簡介
LNMT=Linux+Nginx+MySQL+Tomcat;
Tomcat 伺服器是一個免費的開放源代碼的Web 應用伺服器,屬于輕量級應用伺服器;
在中小型系統和并發通路使用者不是很多的場合下被普遍使用,是開發和調試JSP 程式的首選;
架構需求
Tomcat實作JSP動态請求解析的基本架構
![](https://img.laitimes.com/img/_0nNw4CM6IyYiwiM6ICdiwiInBnauITM5UFTQ1kcU9lWLFUQVVHN4pmbNFncOFDTvl2S39CXykzLcZjMvwVMw00LcJDMzZWe39CXt92Yu8GdjFTNuMzcvw1LcpDc0RHaiojIsJye.jpg)
說明:由後端Tomcat負責解析動态jsp請求,但為了提高響應性能,在同一主機内配置Nginx做反向代理,轉發所有請求至tomcat即可;
完整的LNMT架構設計
說明:本篇部落客要講解單台Haproxy到後端多台Tomcat伺服器的實作;
安裝配置
Tomcat安裝配置
安裝JDK
# rpm -ivh jdk-7u9-linux-x64.rpm
# vi /etc/profile.d/java.sh
export JAVA_HOME=/usr/java/latest
export PATH=$JAVA_HOME/bin:$PATH
# . /etc/profile.d/java.sh
安裝Tomcat
# tar xf apache-tomcat-7.0.42.tar.gz -C /usr/local/
# cd /usr/local/
# ln -sv apache-tomcat-7.0.42/ tomcat
# vi /etc/profile.d/tomcat.sh
export CATALINA_HOME=/usr/local/tomcat
export PATH=$CATALINA_HOME/bin:$PATH
# . /etc/profile.d/tomcat.sh
# 編寫服務腳本
# vi /etc/init.d/tomcat
#!/bin/sh
# Tomcat init script for Linux.
#
# chkconfig: 2345 96 14
# description: The Apache Tomcat servlet/JSP container.
# JAVA_OPTS='-Xms64m -Xmx128m'
JAVA_HOME=/usr/java/latest
CATALINA_HOME=/usr/local/tomcat
export JAVA_HOME CATALINA_HOME
case $1 in
start)
exec $CATALINA_HOME/bin/catalina.sh start ;;
stop)
exec $CATALINA_HOME/bin/catalina.sh stop;;
restart)
$CATALINA_HOME/bin/catalina.sh stop
sleep 2
exec $CATALINA_HOME/bin/catalina.sh start ;;
*)
echo "Usage: `basename $0` {start|stop|restart}"
exit 1
;;
esac
# chmod +x /etc/init.d/tomcat
配置Tomcat
# cd /usr/local/tomcat/conf
# vi server.xml
<?xml version='1.0' encoding='utf-8'?>
<Server port="8005" shutdown="SHUTDOWN">
<Listener className="org.apache.catalina.core.AprLifecycleListener" SSLEngine="on" />
<Listener className="org.apache.catalina.core.JasperListener" />
<Listener className="org.apache.catalina.core.JreMemoryLeakPreventionListener" />
<Listener className="org.apache.catalina.mbeans.GlobalResourcesLifecycleListener" />
<Listener className="org.apache.catalina.core.ThreadLocalLeakPreventionListener" />
<GlobalNamingResources>
<Resource name="UserDatabase" auth="Container"
type="org.apache.catalina.UserDatabase"
description="User database that can be updated and saved"
factory="org.apache.catalina.users.MemoryUserDatabaseFactory"
pathname="conf/tomcat-users.xml" />
</GlobalNamingResources>
<Service name="Catalina">
<Connector port="9000" protocol="HTTP/1.1" # 配置HTTP連接配接器監聽9000端口
connectionTimeout="20000"
redirectPort="8443" />
<Connector port="8009" protocol="AJP/1.3" redirectPort="8443" />
<Engine name="Catalina" defaultHost="localhost">
<Realm className="org.apache.catalina.realm.LockOutRealm">
<Realm className="org.apache.catalina.realm.UserDatabaseRealm"
resourceName="UserDatabase"/>
</Realm>
<Host name="xxrenzhe.lnmmp.com" appBase="webapps" # 新增Host,配置相應的Context
unpackWARs="true" autoDeploy="true">
<Context path="" docBase="lnmmpapp" /> # 配置的應用程式目錄是webapps/lnmmpapp
<Valve className="org.apache.catalina.valves.AccessLogValve" directory="logs"
prefix="lnmmp_access_log." suffix=".txt"
pattern="%h %l %u %t "%r" %s %b" />
</Host>
<Host name="localhost" appBase="webapps"
unpackWARs="true" autoDeploy="true">
<Valve className="org.apache.catalina.valves.AccessLogValve" directory="logs"
prefix="localhost_access_log." suffix=".txt"
pattern="%h %l %u %t "%r" %s %b" />
</Host>
</Engine>
</Service>
</Server>
# 建立應用程式相關目錄
# cd /usr/local/tomcat/webapps/
# mkdir -pv lnmmpapp/WEB-INF/{classes,lib}
# cd lnmmpapp
# vi index.jsp # 編寫首頁檔案
<%@ page language="java" %>
<html>
<head><title>Tomcat1</title></head># 在Tomcat2主機上替換為Tomcat2
<body>
<h1><font color="red">Tomcat1.lnmmp.com</font></h1># 在Tomcat2主機上替換為Tomcat2.lnmmp.com,color修改為blue
<table align="centre" border="1">
<tr>
<td>Session ID</td>
<% session.setAttribute("lnmmp.com","lnmmp.com"); %>
<td><%= session.getId() %></td>
</tr>
<tr>
<td>Created on</td>
<td><%= session.getCreationTime() %></td>
</tr>
</table>
</body>
</html>
啟動Tomcat服務
chkconfig --add tomcat
service tomcat start
Nginx配置
Nginx安裝詳見博文“如何測試Nginx的高性能”
配置Nginx
# vi /etc/nginx/nginx.conf
worker_processes 2;
error_log /var/log/nginx/nginx.error.log;
pid /var/run/nginx.pid;
events {
worker_connections 1024;
}
http {
include mime.types;
default_type application/octet-stream;
log_format main '$remote_addr - $remote_user [$time_local] "$request" '
'$status $body_bytes_sent "$http_referer" '
'"$http_user_agent" "$http_x_forwarded_for"';
sendfile on;
keepalive_timeout 65;
fastcgi_cache_path /www/cache levels=1:2 keys_zone=fcgicache:10m inactive=5m;
server { # 處理前端發來的圖檔請求;
listen 4040;
server_name xxrenzhe.lnmmp.com;
access_log /var/log/nginx/nginx-img.access.log main;
root /www/lnmmp.com;
valid_referers none blocked xxrenzhe.lnmmp.com *.lnmmp.com; # 配置一定的反盜鍊政策;
if ($invalid_referer) {
rewrite ^/ http://xxrenzhe.lnmmp.com/404.html;
}
}
server {
listen 80; # 處理前端發來的靜态請求;
server_name xxrenzhe.lnmmp.com;
access_log /var/log/nginx/nginx-static.access.log main;
location / {
root /www/lnmmp.com;
index index.php index.html index.htm;
}
gzip on; # 對靜态檔案開啟壓縮傳輸功能;
gzip_comp_level 6;
gzip_buffers 16 8k;
gzip_http_version 1.1;
gzip_types text/plain text/css application/x-javascript text/xml application/xml;
gzip_disable msie6;
}
server {
listen 8080;
server_name xxrenzhe.lnmmp.com;
access_log /var/log/nginx/nginx-tomcat.access.log main;
location / {
proxy_pass http://127.0.0.1:9000; # 将全部動态請求都轉發至後端tomcat
}
}
}
啟動服務
service nginx start
Haproxy安裝配置
# yum -y install haproxy
# vi /etc/haproxy/haproxy.cfg
global
log 127.0.0.1 local2
chroot /var/lib/haproxy
pidfile /var/run/haproxy.pid
maxconn 4000
user haproxy
group haproxy
daemon
defaults
mode http
log global
option httplog
option dontlognull
option http-server-close
option forwardfor except 127.0.0.0/8
option redispatch
retries 3
timeout http-request 10s
timeout queue 1m
timeout connect 10s
timeout client 1m
timeout server 1m
timeout http-keep-alive 10s
timeout check 10s
maxconn 30000
listen stats # 配置haproxy的狀态資訊頁面
mode http
bind 0.0.0.0:1080
stats enable
stats hide-version
stats uri /haproxyadmin?stats
stats realm Haproxy\ Statistics
stats auth admin:admin
stats admin if TRUE
frontend http-in
bind *:80
mode http
log global
option httpclose
option logasap
option dontlognull
capture request header Host len 20
capture request header Referer len 60
acl url_img path_beg -i /p_w_picpaths
acl url_img path_end -i .jpg .jpeg .gif .png
acl url_dynamic path_end -i .jsp .do
use_backend img_servers if url_img # 圖檔請求發送至圖檔伺服器;
use_backend dynamic_servers if url_dynamic # JSP動态請求發送至Tomcat伺服器;
default_backend static_servers # 其餘靜态請求都發送至靜态伺服器;
backend img_servers
balance roundrobin
server img-srv1 192.168.0.25:4040 check maxconn 6000
server img-srv2 192.168.0.35:4040 check maxconn 6000
backend static_servers
cookie node insert nocache
option httpchk HEAD /health_check.html
server static-srv1 192.168.0.25:80 check maxconn 6000 cookie static-srv1
server static-srv2 192.168.0.35:80 check maxconn 6000 cookie static-srv2
backend dynamic_servers
balance roundrobin
server tomcat1 192.168.0.25:8080 check maxconn 1000
server tomcat2 192.168.0.35:8080 check maxconn 1000
service haproxy start
本地DNS解析設定
xxrenzhe.lnmmp.com A 172.16.25.109 # 配置為haproxy的IP位址即可
通路驗證
說明:由于前端Haproxy排程動态請求是roundrobin算法,故每次重新整理都會輪詢配置設定到不同的Tomcat節點上,且每次獲得的session都是不一樣的;
實作session綁定
将同一使用者的請求排程至後端同一台Tomcat上,不至于一重新整理就導緻session丢失;
修改Tomcat配置
# vi /usr/local/tomcat/conf/server.xml # 修改如下行内容,添加jvmRoute字段
<Engine name="Catalina" defaultHost="localhost" jvmRoute="tomcat1"> # 在Tomcat2上替換為tomcat2
修改Haproxy配置
# vi /etc/haproxy/haproxy.cfg # 為後端動态節點添加cookie綁定機制
backend dynamic_servers
cookie node insert nocache
balance roundrobin
server tomcat1 192.168.0.25:8080 check maxconn 1000 cookie tomcat1
server tomcat2 192.168.0.35:8080 check maxconn 1000 cookie tomcat1
說明:當第一次通路成功後,再次重新整理并不會改變配置設定的Tomcat節點和session資訊,說明session綁定成功;
實作session保持
Tomcat支援Session叢集,可在各Tomcat伺服器間複制全部session資訊,當後端一台Tomcat伺服器當機後,Haproxy重新排程使用者請求後,在其它正常的Tomcat服務上依然存在使用者原先的session資訊;
Session叢集可在Tomcat伺服器規模(一般10台以下)不大時使用,否則會導緻複制代價過高;
配置實作
# vi /usr/local/tomcat/conf/server.xml # 完整配置
<?xml version='1.0' encoding='utf-8'?>
<Server port="8005" shutdown="SHUTDOWN">
<Listener className="org.apache.catalina.core.AprLifecycleListener" SSLEngine="on" />
<Listener className="org.apache.catalina.core.JasperListener" />
<Listener className="org.apache.catalina.core.JreMemoryLeakPreventionListener" />
<Listener className="org.apache.catalina.mbeans.GlobalResourcesLifecycleListener" />
<Listener className="org.apache.catalina.core.ThreadLocalLeakPreventionListener" />
<GlobalNamingResources>
<Resource name="UserDatabase" auth="Container"
type="org.apache.catalina.UserDatabase"
description="User database that can be updated and saved"
factory="org.apache.catalina.users.MemoryUserDatabaseFactory"
pathname="conf/tomcat-users.xml" />
</GlobalNamingResources>
<Service name="Catalina">
<Connector port="9000" protocol="HTTP/1.1"
connectionTimeout="20000"
redirectPort="8443" />
<Connector port="8009" protocol="AJP/1.3" redirectPort="8443" />
<Engine name="Catalina" defaultHost="localhost" jvmRoute="tomcat1"># 在Tomcat2主機上替換為tomcat2
<Cluster className="org.apache.catalina.ha.tcp.SimpleTcpCluster" # 添加叢集相關配置;
channelSendOptions="8">
<Manager className="org.apache.catalina.ha.session.DeltaManager" # 叢集會話管理器選擇DeltaManager;
expireSessionsOnShutdown="false"
notifyListenersOnReplication="true"/>
<Channel className="org.apache.catalina.tribes.group.GroupChannel"> # 為叢集中的幾點定義通信信道;
<Membership className="org.apache.catalina.tribes.membership.McastService" # 定義使用McastService确定叢集中的成員
address="228.25.25.4" # 叢集内session複制所用的多點傳播位址
port="45564"
frequency="500"
dropTime="3000"/>
<Receiver className="org.apache.catalina.tribes.transport.nio.NioReceiver" # 定義以NioReceiver方式接收其它節點的資料;
address="192.168.0.25"# 在Tomcat2主機上替換為192.168.0.35
port="4000"
autoBind="100"
selectorTimeout="5000"
maxThreads="6"/>
<Sender className="org.apache.catalina.tribes.transport.ReplicationTransmitter"> # 定義資料複制的發送器;
<Transport className="org.apache.catalina.tribes.transport.nio.PooledParallelSender"/>
</Sender>
<Interceptor className="org.apache.catalina.tribes.group.interceptors.TcpFailureDetector"/>
<Interceptor className="org.apache.catalina.tribes.group.interceptors.MessageDispatch15Interceptor"/>
</Channel>
<Valve className="org.apache.catalina.ha.tcp.ReplicationValve"
filter=""/>
<Valve className="org.apache.catalina.ha.session.JvmRouteBinderValve"/>
<Deployer className="org.apache.catalina.ha.deploy.FarmWarDeployer"
tempDir="/tmp/war-temp/"
deployDir="/tmp/war-deploy/"
watchDir="/tmp/war-listen/"
watchEnabled="false"/>
<ClusterListener className="org.apache.catalina.ha.session.JvmRouteSessionIDBinderListener"/>
<ClusterListener className="org.apache.catalina.ha.session.ClusterSessionListener"/>
</Cluster>
<Realm className="org.apache.catalina.realm.LockOutRealm">
<Realm className="org.apache.catalina.realm.UserDatabaseRealm"
resourceName="UserDatabase"/>
</Realm>
<Host name="xxrenzhe.lnmmp.com" appBase="webapps"
unpackWARs="true" autoDeploy="true">
<Context path="" docBase="lnmmpapp" />
<Valve className="org.apache.catalina.valves.AccessLogValve" directory="logs"
prefix="lnmmp_access_log." suffix=".txt"
pattern="%h %l %u %t "%r" %s %b" />
</Host>
<Host name="localhost" appBase="webapps"
unpackWARs="true" autoDeploy="true">
<Valve className="org.apache.catalina.valves.AccessLogValve" directory="logs"
prefix="localhost_access_log." suffix=".txt"
pattern="%h %l %u %t "%r" %s %b" />
</Host>
</Engine>
</Service>
</Server>
# cd /usr/local/tomcat/webapps/lnmmpapp/WEB-INF/
# cp /usr/local/tomcat/conf/web.xml .
# vi web.xml # 添加如下一行,無需放置于任何容器中
<distributable\>
檢視日志
# tailf /usr/local/tomcat/logs/catalina.out
May 08, 2014 11:08:13 PM org.apache.catalina.ha.tcp.SimpleTcpCluster memberAdded
INFO: Replication member added:org.apache.catalina.tribes.membership.MemberImpl[tcp://{192, 168, 0, 35}:4000,{192, 168, 0, 35},4000, alive=1029, securePort=-1, UDP Port=-1, id={106 35 -62 -54 -28 61 74 -98 -86 -11 -69 104 28 -114 32 -69 }, payload={}, command={}, domain={}, ]
# 檢視到如上資訊,則說明session叢集已生效,tomcat1已檢測到tomcat2節點的存在
第一次通路
然後停止tomcat1的nginx服務(service nginx stop),再次通路
說明:雖然因為tomcat1故障,導緻使用者請求被排程到了tomcat2節點上,但Session ID并未發生改變,即session叢集内的所有節點都儲存有全局的session資訊,很好的實作了使用者通路的不中斷;,
| |
| 了這篇文章 |