初体验-web集群架构设计

发布于 15 天前  40 次阅读


初体验-web集群架构设计插图

画的太随意,勿喷!这是我毕业在第一家公司实习的时候无聊随便画的。

节点规划

初体验-web集群架构设计插图2

集群环境——存储端(nfs|rsync)

修改主机名:(所有节点)
hostnamectl set-hostname nfs
hostnamectl set-hostname rsync
配置hosts解析:(所有节点)
cat /etc/hosts
192.168.10.130 nfs
192.168.10.131 rsync
192.168.10.132 mysql-master
192.168.10.133 mysql-slave
192.168.10.134 mysql-proxy
192.168.10.135 master
192.168.10.136 backup
192.168.10.137 tomcat-1
192.168.10.138 tomcat-2
192.168.10.141 redis-master
192.168.10.142 redis-backup
创建目录(nfs节点)
mkdir /{data,mysql}
安装rsync:(所有节点)
yum install -y rsync
安装nfs:(nfs节点)
yum install -y nfs-utils rpcbind
rsync节点操作:
mkdir -p /backup/{data,mysql}
cat /etc/rsyncd.conf 
uid = rsync
gid = rsync
port = 873
fake super = yes
use chroot = no
max connections = 200
timeout = 300
ignore errors
read only = false
list = false
auth users = rsync_backup
secrets file = /etc/rsync.password
pid file = /var/run/rsyncd.pid
lock file = /var/run/rsync.lock
log file = /var/log/rsyncd.log
[data]
path = /backup/data
hosts allow = 0.0.0.0/0
hosts deny = 0.0.0.0/32
[mysql]
path = /backup/mysql
hosts allow = 0.0.0.0/0
hosts deny = 0.0.0.0/32
useradd -s /sbin/nologin -M rsync
chown rsync.rsync /backup/*
systemctl restart rsyncd
echo "rsync_backup:000000" >/etc/rsync.password
chmod 600 /etc/rsync.password
nfs节点操作:
cat /etc/exports
/data 192.168.10.0/24(rw,sync,all_squash,anonuid=168,anongid=168)
/mysql 192.168.10.0/24(rw,sync,all_squash,anonuid=168,anongid=168)
systemctl start rpcbind
groupadd -g 168 nfs && useradd -s /sbin/nologin -u 168 -M -g nfs nfs
chown -R nfs.nfs /data/
systemctl start nfs-server
echo "000000" > /etc/rsync.password
安装inotify:(nfs节点)
cd /usr/src/
wget http://github.com/downloads/rvoicilas/inotify-tools/inotify-tools-3.14.tar.gz
tar -zxvf inotify-tools-3.14.tar.gz
cd inotify-tools-3.14
./configure --prefix=/usr/local/inotify
make && make install
cd /opt/
cat /opt/backup_data.sh 
#!/bin/bash
################################
host01=192.168.10.131
src=/data
dst=data
user=rsync_backup
rsync_passfile=/etc/rsync.password
inotify_home=/usr/local/inotify/
#################################
if [ ! -e "$src" ] \
|| [ ! -e "${rsync_passfile}" ] \
|| [ ! -e "${inotify_home}/bin/inotifywait" ] \
|| [ ! -e "/usr/bin/rsync" ];
then
        echo "Check File and Folder"
        exit 9
fi

${inotify_home}/bin/inotifywait -mrq --timefmt '%d/%m/%y %H:%M' --format '%T %w%f' -e close_write,delete,create,attrib,move $src \
| while read file
        do
cd $src && rsync -aruz -R --delete ./ --timeout=100 $user@$host01::$dst --password-file=${rsync_passfile} > /dev/null 2>&1
        done
exit 0
[root@nfs ~]# cat /opt/backup_mysql.sh 
#!/bin/bash
################################
host01=192.168.10.131
src=/mysql
dst=mysql
user=rsync_backup
rsync_passfile=/etc/rsync.password
inotify_home=/usr/local/inotify/
#################################
if [ ! -e "$src" ] \
|| [ ! -e "${rsync_passfile}" ] \
|| [ ! -e "${inotify_home}/bin/inotifywait" ] \
|| [ ! -e "/usr/bin/rsync" ];
then
        echo "Check File and Folder"
        exit 9
fi

${inotify_home}/bin/inotifywait -mrq --timefmt '%d/%m/%y %H:%M' --format '%T %w%f' -e close_write,delete,create,attrib,move $src \
| while read file
        do
cd $src && rsync -aruz -R --delete ./ --timeout=100 $user@$host01::$dst --password-file=${rsync_passfile} > /dev/null 2>&1
        done
exit 0
chmod +x /opt/backup_data.sh
chmod +x /opt/backup_mysql.sh
sh /opt/backup_data.sh &
sh /opt/backup_mysql.sh &

集群环境——数据库端(mysql-master|mysql-slave)

修改主机名:(所有节点)
hostnamectl set-hostname mysql-master
hsotnamectl set-hostname mysql-slave
安装数据库:(所有节点)
所需的软件包:
mysql-community-client-5.7.28-1.el7.x86_64.rpm
mysql-community-common-5.7.28-1.el7.x86_64.rpm
mysql-community-libs-5.7.28-1.el7.x86_64.rpm
mysql-community-server-5.7.28-1.el7.x86_64.rpm
安装:(所有节点)
rpm -ivh mysql-community-common-5.7.28-1.el7.x86_64.rpm 
rpm -ivh mysql-community-libs-5.7.28-1.el7.x86_64.rpm 
rpm -ivh mysql-community-client-5.7.28-1.el7.x86_64.rpm 
rpm -ivh mysql-community-server-5.7.28-1.el7.x86_64.rpm  //安装server的时候可能回报gpkey版本旧的原因,只需要在后面加上--force --nodeps即可
service mysqld start
cat /var/log/mysqld.log | grep password
登录数据库修改root密码
要首先修改安全策略:
set global validate_password_policy=0;
set global validate_password_length=1;
alter user 'root'@'localhost' identified by '000000';
安装nfs:(mysql-slave节点)
yum install -y nfs-utils rpcbind
修改数据存储目录:(mysql-slave节点)
service mysqld stop
mkdir /web
mount -t nfs nfs:/mysql /web
cp -r /var/lib/mysql /web
chown -R mysql:mysql /web/*
vi /etc/my.cnf
[mysqld]
datadir=/web/mysql
socket=/web/mysql/mysql.sock
[client]
port=3306
socket=/web/mysql/mysql.sock
service mysqld start
设置主从复制:(mysql-master节点)
cat /etc/my.cnf
[mysqld]
log-bin=mysql-bin
server-id=132
mysql> grant all privileges on *.* to ‘root’@’%’ identified by ‘000000’;
mysql> grant replication slave on *.* to 'giao'@'192.168.10.%' identified by '000000';
mysql> show master status;
sercice mysqld restart
设置主从复制:(mysql-slave节点)
cat /etc/my.cnf
[mysqld]
log-bin=mysql-bin
server-id=133
service mysqld restart
mysql> change master to master_host='mysql-master',master_user='giao',master_password='000000',master_log_file='mysql_log.000001',master_log_pos=861;
mysql> start slave;
mysql> show slave status\G
Slave_IO_Running: Yes
Slave_SQL_Running: Yes

集群环境——读写分离(mysql-proxy)

修改主机名:
hostnamectl set-hostname mysql-proxy
上传软件包:
mysql-proxy-0.8.5-linux-el6-x86-64bit.tar.gz
tar zxf mysql-proxy-0.8.5-linux-el6-x86-64bit.tar.gz
mv mysql-proxy-0.8.5-linux-el6-x86-64bit /usr/local/mysql-proxy
cd /usr/local/mysql-proxy/
mkdir conf logs
配置环境变量:
vim ~/.bash_profile ##修改第10行
PATH=$PATH:$HOME/bin:/usr/local/mysql-proxy/bin
source ~/.bash_profile
修改mysql-proxy设置:
vim /usr/local/mysql-proxyconf/mysql-proxy.conf
[mysql-proxy]
user=root  #运行mysql-proxy进程的用户
proxy-address=0.0.0.0:3306  #监听本机所有地址的3306端口
proxy-backend-addresses=192.168.10.132:3306  #后端主服务器
proxy-read-only-backend-addresses=192.168.10.133:3306  #后端从服务器
proxy-lua-script=/usr/local/mysql-proxy/share/doc/mysql-proxy/rw-splitting.lua  #lua脚本地址
log-file=/usr/local/mysql-proxy/logs/mysql-proxy  #proxy日志路径
daemon=true  #以守护进程方式运行
keepalive=true  #崩溃时尝试重启
chmod 660 /usr/local/mysql-proxy/conf/mysql-proxy.conf
vim share/doc/mysql-proxy/rw-splitting.lua 
if not proxy.global.config.rwsplit then
        proxy.global.config.rwsplit = {
                min_idle_connections = 1, #默认超过4个连接数时,才开始读写分离,改为1
                max_idle_connections = 2, #默认8,改为1
                is_debug = false
        }
End
启动mysql-proxy:
cd /usr/local/mysql-proxy
mysql-proxy --defaults-file=/usr/local/mysql-proxy/conf/mysql-proxy.conf

集群环境——web端(tomcat-1|tomcat-2)

修改主机名:(所有节点)
hostnamectl set-hostanem tomcat-1
hostnamectl set-hostname tomcat-2
配置JDK:(所有节点)
jdk-8u25-linux-x64.rpm
rpm -ivh jdk-8u25-linux-x64.rpm
vim /etc/profile
export JAVA_HOE=/usr/java/default
export PATH=$JAVA_HONE/bin:$PATH
export CLASSPATH=.:$JAVA_HOE/lib/dt.jar:$JAVA_HOME/lib/tools.jar
source /etc/profile
java version "1.8.0_25"
Java(TM) SE Runtime Environment (build 1.8.0_25-b17)
Java HotSpot(TM) 64-Bit Server VM (build 25.25-b02, mixed mode)
部署tomcat:(所有节点)
apache-tomcat-8.0.48.tar.gz
tar zxvf apache-tomcat-8.0.48.tar.gz -C /usr/local/
mv /usr/local/apache-tomcat-8.0.48 /usr/local/tomcat
cd /usr/local/tomcat/conf/
vim server.xml
<Engine name="Catalina" defaultHost="localhost" jvmRoute="tomcat1"> #主节点
<Engine name="Catalina" defaultHost="localhost" jvmRoute="tomcat2">  #备节点
挂载nfs共享:(tomcat-1)
yum install -y nfs-utils rpcbind
mkdir /data
mount -t nfs nfs:/data /data/
vim /usr/local/tomcat/conf/server.xml 
在Host标签中添加如下:
<Context path="" docBase="/data" debug="0" reloadable="true" />
添加测试文件:(所有节点)
主:cat /data/index.jsp 
<%--
  Created by IntelliJ IDEA.
  User: giao
  Date: 2020/1/09
  Time: 下午3:08
  To change this template use File | Settings | File Templates.
--%>
<%@ page contentType="text/html;charset=UTF-8" language="java" %>
<html>
  <head>
    <title>测试页面</title>
  </head>
  <body>
  SessionID:<%=session.getId()%> <BR>
  SessionIP:<%=request.getServerName()%> <BR>
  SessionPort:<%=request.getServerPort()%>
  <%out.println("This is Tomcat Server 1");%> 
  </body>
</html>
备:cat /data/index.jsp 
<%--
  Created by IntelliJ IDEA.
  User: giao
  Date: 2020/1/09
  Time: 下午3:08
  To change this template use File | Settings | File Templates.
--%>
<%@ page contentType="text/html;charset=UTF-8" language="java" %>
<html>
  <head>
    <title>测试页面</title>
  </head>
  <body>
  SessionID:<%=session.getId()%> <BR>
  SessionIP:<%=request.getServerName()%> <BR>
  SessionPort:<%=request.getServerPort()%>
  <%out.println("This is Tomcat Server 2");%>
  </body>
</html>

集群环境——缓存端(redis-master|redis-backup)

修改主机名:(所有节点)
hostnamectl set-hostname redis-master
hostnamectl set-hostname redis-backup
部署redis:(所有节点)
redis-3.2.3.tar.gz
tar zxvf redis-3.2.3.tar.gz
cd redis-3.2.3
make PREFIX=/usr/local/redis install
cp redis.conf /usr/local/redis/
修改主节点配置:(reids-master)
vim redis.conf
#bind 127.0.0.0 --把监听本地注释掉
protected-mode no
修改从节点配置:(redis-backup)
vim redis.conf
#bind 127.0.0.0 --把监听本地注释掉
protected-mode no
slaveof 192.168.10.141 6379

配置tomcat连接readis(tomcat-1|tomcat-2|redos-master|redis-backup)

jedis-2.5.2.jar
tomcat-redis-session-manager-2.0.0.jar
commons-pool2-2.2.jar
把三个jar包移到/usr/local/tomcat/lib/:(所有节点)
mv commons-pool2-2.2.jar jedis-2.5.2.jar tomcat-redis-session-manager-2.0.0.jar /usr/local/tomcat/lib/
vim /usr/local/tomcat/conf/context.xml 
    <Valve className="com.orangefunction.tomcat.redissessions.RedisSessionHandlerValve" />
    <Manager className="com.orangefunction.tomcat.redissessions.RedisSessionManager"
    host="192.168.10.141"
    port="6379"
maxInactiveInterval="180" />
启动redis:(先启动主节点,再启动从节点)
./bin/redis-server ./redis.conf &
测试同步:
[root@master redis]# ./bin/redis-cli
127.0.0.1:6379> get name
(nil)
127.0.0.1:6379> set name test
OK
127.0.0.1:6379> get name
"test"
[root@backup redis]# ./bin/redis-cli
127.0.0.1:6379> get name
"test"

配置redis-sentinel(redis-master|redis-backup)

配置Sentinel:(所有节点)
cat /usr/local/redis/sentinel.conf 
daemonize yes  #守护进程
protected-mode no
port 1777
logfile "/usr/local/redis/sentinel.log"
pidfile "/usr/local/redis/sentinel.pid"
sentinel monitor mymaster 192.168.10.141 6379 1 #配置监控主服务器:mymaster:定义服务器名,192.165.10.141:主服务器IP,6379:主服务端口, 1:设置当有1一个sentinel判断master故障后才真正认为master无法继续提供服务,即开始容灾措施
sentinel down-after-milliseconds mymaster 2000 #指定sentinel认定一个服务器断线的毫秒数,即一个sentinel认定服务SDOWN,在这个配置时间内需要获得指定个数的Sentinel判定ODWON,才开始failover
sentinel parallel-syncs mymaster 1 #从节点数量为几后面就写几
启动Sentinel:(所有节点)
./bin/redis-sentinel ./sentinel.conf &

集群环境——负载均衡端(master|backup)

修改主机名:(所有节点)
hostnamectl set-hostname master
hostnamectl set-hostname backup
部署Nginx:(所有节点)
nginx-1.6.3.tar.gz
tar zxvf nginx-1.6.3.tar.gz
cd nginx-1.6.3
./configure --prefix=/usr/local/nginx --user=www --group=www --with-http_stub_status_module --with-http_realip_module --with-http_ssl_module --with-http_gzip_static_module --with-pcre --with-http_flv_module && make && make install
cd /usr/local/nginx/conf/
配置nginx负载均衡代理分发:(所有节点)
vim nginx.conf
#user  nobody;
worker_processes  1;

#error_log  logs/error.log;
#error_log  logs/error.log  notice;
#error_log  logs/error.log  info;

#pid        logs/nginx.pid;


events {
    worker_connections  1024;
}


http {
upstream tomcat_pool
    {
          server 192.168.10.137:8080 weight=2 max_fails=1 fail_timeout=10s;
          server 192.168.10.138:8080 weight=2 max_fails=1 fail_timeout=10s;
    }
server {
        listen       80;
        server_name 192.168.10.140;
location / {
        proxy_set_header Host $host;
        proxy_set_header X-Real-IP $remote_addr;
        proxy_set_header REMOTE-HOST $remote_addr;
        proxy_set_header X-Forwarded-For $proxy_add_x_forwarded_for;
        proxy_pass http://tomcat_pool;
        client_max_body_size 1000m;
        proxy_cookie_path /scientific  /;
        proxy_set_header Cookie $http_cookie;
}

        error_page 404 /404.html;
            location = /40x.html {
        }
        error_page 500 502 503 504 /50x.html;
            location = /50x.html {
        }
    }
}
安装keepalived:(所有节点)
yum install -y keepalived
主节点配置:(master节点)
cat /etc/keepalived/keepalived.conf 
! Configuration File for keepalived

global_defs {
   notification_email {
     acassen@firewall.loc
   }
   notification_email_from Alexandre.Cassen@firewall.loc
   smtp_server 127.0.0.1
   smtp_connect_timeout 30
   router_id LVS_DEVEL
}

vrrp_script chk_nginx {
    script "/opt/ha/check_nginx.sh"
    interval 1
    weight 2
}

vrrp_instance VI_1 {
    state MASTER
    interface ens32
    virtual_router_id 55
    priority 150
    advert_int 1
    authentication {
        auth_type PASS
        auth_pass 1111
    }

    virtual_ipaddress {
        192.168.10.140/24 dev ens32 label ens32:1
    }

    track_scrpt {
            chk_nginx
   }
}
备节点配置:(backup节点)
cat /etc/keepalived/keepalived.conf 
! Configuration File for keepalived

global_defs {
   notification_email {
     acassen@firewall.loc
   }
   notification_email_from Alexandre.Cassen@firewall.loc
   smtp_server 127.0.0.1
   smtp_connect_timeout 30
   router_id LVS_DEVEL
}

vrrp_script chk_nginx {
    script "/opt/ha/check_nginx.sh"
    interval 1
    weight 2
}

vrrp_instance VI_1 {
    state BACKUP
    interface ens32
    virtual_router_id 55
    priority 100
    advert_int 1
    authentication {
        auth_type PASS
        auth_pass 1111
    }

    virtual_ipaddress {
        192.168.10.140/24 dev ens32 label ens32:1
    }

    track_scrpt {
            chk_nginx
   }
}
创建检测脚本:(所有节点)
mkdir -p /opt/ha/
[root@master ~]# cat /opt/ha/check_nginx.sh 
#!/bin/bash
echo "1" >> /var/tmp/keepalived.log
Count1=`netstat -antp | grep -v grep | grep nginx | wc -l`
if [ $Count1 -eq 0 ]; then
        echo "2" >> /var/tmp/keepalived.log
        Count2=`netstat -antp | grep -v grep | grep ngiinx | wc -l`
        if [ $Count2 -eq 0 ]; then
                echo "nginx pid not found" >> /etc/keepalived/keepalived.log
           killall keepalived
        else
                echo "jinbulai" >> /var/tmp/keepalived.log
           exit 0
        fi
else
           echo "4" >> /var/tmp/keepalived.log
        exit 0
Fi
chmod 755 /opt/ha/check_nginx.sh


生之逢时,为之奋斗。