Linux - CentOS

网络配置

1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
##修改IP为静态IP
$ vi /etc/sysconfig/network-scripts/ifcfg-eno16777736

##网卡自动启动
ONBOOT=yes
##DHCP改成静态
BOOTPROTO=static
##静态IP
IPADDR=192.168.8.128
##网关
GATEWAY=192.168.8.1
##子网掩码
NETMASK=255.255.255.0

##设置DNS
$ vi /etc/NetworkManager/NetworkManager.conf

[main]
plugins=ifcfg-rh
dns=none

$ vi /etc/resolv.conf

nameserver 114.114.114.114
nameserver 8.8.8.8

##修改主机名
$ hostnamectl set-hostname linux128
$ cat /etc/hostname

##修改主机名到IP的映射(相当于配置了一个本地的DNS)
$ vi /etc/hosts

127.0.0.1 localhost localhost.localdomain localhost4 localhost4.localdomain4
::1 localhost localhost.localdomain localhost6 localhost6.localdomain6

192.168.8.128 linux128
192.168.8.129 linux129
192.168.8.130 linux130

##重启网卡
$ systemctl restart network

##防火墙启用、停用、开机启动、禁止开机启动、查看状态
$ systemctl {start|stop|enable|disable|status} firewalld.service

##查看当前模式
$ systemctl get-default
##设置默认启动图形界面
$ systemctl set-default graphical.target
##设置默认启动命令行
$ systemctl set-default multi-user.target

##重启、关机、待机、休眠
$ systemctl {reboot|poweroff|suspend|hibernate}

常用软件安装

1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
##查看所有已安装的软件包
$ yum list installed
##更新所有已安装的软件包
$ yum upgrade

##查看当前时间
$ date "+%Y-%m-%d %H:%M:%S"
##同步网络时间
$ yum -y install ntpdate
$ ntpdate ntp1.aliyun.com

##安装文件传输工具
$ yum -y install curl wget scp

##安装Nmap
$ yum install nmap
##检查开放端口
$ nmap 127.0.0.1

##安装GCC编译器
$ yum list|grep gcc
$ yum install gcc

开发软件安装

Zookeeper

1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
$ scp -o port=22 zookeeper-3.3.6.tar.gz root@192.168.8.128:/opt/zookeeper-3.3.6.tar.gz
$ tar -zxvf zookeeper-3.3.6.tar.gz
$ mkdir /usr/local/zookeeper
$ mv zookeeper-3.3.6 /usr/local/zookeeper
$ cd /usr/local/zookeeper/zookeeper-3.3.6
$ mkdir data logs

##修改配置
$ cp conf/zoo_sample.cfg conf/zoo.cfg
$ vi conf/zoo.cfg

##临时数据存放目录
dataDir=/usr/local/zookeeper/zookeeper-3.3.6/data
dataLogDir=/usr/local/zookeeper/zookeeper-3.3.6/logs
clientPort=2181
##书写格式:server.服务器编号=服务器地址:通信端口:选举端口
server.1=192.168.8.128:2888:3888
server.2=192.168.8.129:2888:3888
server.3=192.168.8.130:2888:3888

##创建myid文件并写入服务器编号
$ touch data/myid
##server.1
$ echo 1 > data/myid
##server.2
$ echo 2 > data/myid
##server.3
$ echo 3 > data/myid

##启动&检查运行状态
$ ./bin/zkServer.sh {start|restart|status|stop}
$ cat zookeeper.out

##设置开机启动
$ vi /etc/systemd/system/zookeeper.service

[Unit]
Description=zooKeeper
After=syslog.target network.target

[Service]
Type=forking
SyslogIdentifier=zookeeper
TimeoutStartSec=10min
Environment="JAVA_HOME=/usr/local/jdk/jdk1.8.0_77"
ExecStart=/usr/local/zookeeper/zookeeper-3.3.6/bin/zkServer.sh start
ExecStop=/usr/local/zookeeper/zookeeper-3.3.6/bin/zkServer.sh stop
ExecReload=/usr/local/zookeeper/zookeeper-3.3.6/bin/zkServer.sh restart

[Install]
WantedBy=multi-user.target

##重新加载配置文件让配置生效
$ systemctl daemon-reload
##启用、停用、开机启动、禁止开机启动、查看状态
$ systemctl {start|stop|enable|disable|status} zookeeper.service

Redis

1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
$ wgwt http://download.redis.io/releases/redis-5.0.0.tar.gz
$ tar -zxvf redis-5.0.0.tar.gz
$ mkdir /usr/local/redis
$ cd redis-5.0.0
##安装依赖库
$ yum install tcl
##编译安装(若没有指定安装目录默认安装在/usr/local/bin目录下)
$ make && make PREFIX=/usr/local/redis/redis-5.0.0 install
##修改配置
$ cp redis.conf /usr/local/redis/redis-5.0.0
$ cd /usr/local/redis/redis-5.0.0
$ vi redis.conf

##允许远程访问(默认只能本机访问)
# bind 127.0.0.1
##允许其他主机连接到该Redis
protected-mode no
##启用后台运行
daemonize yes
##启用集群
cluster-enabled yes

##启动&测试
$ ./bin/redis-server redis.conf
$ ./bin/redis-cli -h 127.0.0.1 -p 6379

##安装ruby环境
$ yum install ruby
##安装ruby-redis-client
$ wget https://rubygems.org/downloads/redis-3.2.1.gem
$ gem install redis-3.2.1.gem
##执行redis-trib.rb脚本创建集群
$ cp /opt/redis-3.2.1/src/redis-trib.rb /usr/local/redis/redis-5.0.0
./redis-trib.rb create --replicas 0 192.168.8.128:6379 192.168.8.129:6379 192.168.8.130:6379

MySQL

1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83
84
85
86
87
88
89
90
91
92
93
94
##检查是否安装mariadb
$ rpm -qa | grep mariadb
mariadb-libs-5.5.50-1.el7_2.x86_64
$ rpm -e --nodeps mariadb-libs-5.5.50-1.el7_2.x86_64

$ tar -xvf mysql-8.0.15-linux-glibc2.12-x86_64.tar.xz
$ mv mysql-8.0.15-linux-glibc2.12-x86_64 /usr/local/mysql
$ cd /usr/local/mysql

$ vi my.cnf

# This will be passed to all mysql clients
[client]
##设置mysql客户端连接服务端时默认使用的端口
port=3306
##设置mysql客户端默认字符集
default_character_set=utf8mb4

# The MySQL server
[mysqld]
##服务端使用的端口
port=3306
##允许的最大连接数
max_connections=10000
##允许的连接失败次数
max_connect_errors=10
##允许的数据包大小
max_allowed_packet=64M
##服务端使用的字符集默认为utf8
character_set_server=utf8mb4
##创建新表时默认的存储引擎
default_storage_engine=INNODB
##设置mysql事务隔离级别
#transaction_isolation=REPEATABLE-READ
##设置默认的密码加密方式
default_authentication_plugin=mysql_native_password
##设置mysql的安装目录
basedir=/usr/local/mysql
##设置mysql数据库数据的存放目录
datadir=/usr/local/mysql/data

##添加用户组
$ groupadd mysql
##添加用户(-s /bin/false 参数指定mysql用户仅拥有所有权而无登录权限)
$ useradd -r -g mysql -s /bin/false mysql
##修改目录拥有者
$ chown -R mysql:mysql ./

##数据库初始化
$ ./bin/mysqld --initialize --console

##随机生成的临时密码(后面登录使用)
[Server] A temporary password is generated for root@localhost: J&el*7Q37h?Z

##设置文件拥有者(u)、组用户(g)、其他用户(o)的目录权限:读(r=4)、写(w=2)、执行(x=1)
$ chmod -R 777 /usr/local/mysql/data
##启动mysql服务
$ ./support-files/mysql.server {start|stop|restart|reload|force-reload|status}

$ ./bin/mysql -uroot -p

##修改root用户密码
mysql> ALTER USER 'root'@'localhost' IDENTIFIED WITH mysql_native_password BY 'root';
##设置允许远程登录
mysql> use mysql;
mysql> UPDATE user SET host = '%' WHERE user = 'root';
mysql> flush privileges;

##重启mysql服务
$ ./support-files/mysql.server restart

##设置开机启动
$ vi /etc/systemd/system/mysql.service

[Unit]
Description=mysql
Documentation=http://dev.mysql.com/doc/refman/en/using-systemd.html
After=network.target remote-fs.target nss-lookup.target

[Service]
User=mysql
Group=mysql
Type=forking
ExecStart=/usr/local/mysql/support-files/mysql.server start
ExecStop=/usr/local/mysql/support-files/mysql.server stop
ExecReload=/usr/local/mysql/support-files/mysql.server reload

[Install]
WantedBy=multi-user.target

##重新加载配置文件让配置生效
$ systemctl daemon-reload
##启用、停用、开机启动、禁止开机启动、查看状态
$ systemctl {start|stop|enable|disable|status} mysql.service

主从复制

1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
$ vi my.cnf

##Master(192.168.8.129)配置

[mysqld]
log_bin=mysql-bin
server_id=1

##重启服务
$ ./support-files/mysql.server restart
$ ./bin/mysql -uroot -p

##创建用于同步的账号并授权
mysql> CREATE USER 'repl'@'192.168.8.130' IDENTIFIED BY 'repl';
mysql> GRANT REPLICATION SLAVE ON *.* TO 'repl'@'192.168.8.130';
mysql> flush privileges;
mysql> show master status;
+------------------+----------+--------------+------------------+-------------------+
| File | Position | Binlog_Do_DB | Binlog_Ignore_DB | Executed_Gtid_Set |
+------------------+----------+--------------+------------------+-------------------+
| mysql-bin.000001 | 1679 | | | |
+------------------+----------+--------------+------------------+-------------------+
mysql> SHOW VARIABLES LIKE 'server_id';

##Slave(192.168.8.130)配置

[mysqld]
log_bin=mysql-bin
server_id=2

##重启服务
$ ./support-files/mysql.server restart
$ ./bin/mysql -uroot -p

##执行同步语句
mysql> CHANGE MASTER TO master_host='192.168.8.129', master_port=3306, master_user='repl', master_password='repl', master_log_file='mysql-bin.000001', master_log_pos=1679;
##启动slave同步进程
mysql> start slave;
##查看slave状态
mysql> show slave status\G

主主复制

1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
$ vi my.cnf

##Master1(192.168.8.129)配置

[mysqld]
log_bin=mysql-bin
server_id=1
##启用全局事务ID
gtid_mode=ON
##强制事务一致
enforce_gtid_consistency=true
##需要同步的数据库
#binlog_do_db=test
##不需要同步的数据库
#binlog_ignore_db=mysql
replicate_wild_ignore_table=mysql.%
replicate_wild_ignore_table=information_schema.%
##生成奇数的自增(offset: 起始值, increment: 自增幅度)
auto_increment_offset=1
##注意:当需要加入新的服务器时,这种方法难以扩展,考虑使用Snowflake算法代替自增ID
auto_increment_increment=2

##重启服务
$ ./support-files/mysql.server restart
$ ./bin/mysql -uroot -p

##创建用于同步的账号并授权
mysql> CREATE USER 'repl'@'192.168.8.130' IDENTIFIED BY 'repl';
mysql> GRANT REPLICATION SLAVE ON *.* TO 'repl'@'192.168.8.130';
mysql> flush privileges;
##执行同步语句(master_auto_position={0|1}表示是否基于GTID实现)
mysql> CHANGE MASTER TO master_host='192.168.8.130', master_port=3306, master_user='repl', master_password='repl', master_auto_position=1;
##启动slave同步进程
mysql> start slave;
##查看slave状态
mysql> show slave status\G

##Master2(192.168.8.130)配置

[mysqld]
log_bin=mysql-bin
server_id=2
##启用全局事务ID
gtid_mode=ON
##强制事务一致
enforce_gtid_consistency=true
##需要同步的数据库
#binlog_do_db=test
##不需要同步的数据库
#binlog_ignore_db=mysql
replicate_wild_ignore_table=mysql.%
replicate_wild_ignore_table=information_schema.%
##生成偶数的自增(offset: 起始值, increment: 自增幅度)
auto_increment_offset=2
##注意:当需要加入新的服务器时,这种方法难以扩展,考虑使用Snowflake算法代替自增ID
auto_increment_increment=2

##重启服务
$ ./support-files/mysql.server restart
$ ./bin/mysql -uroot -p

##创建用于同步的账号并授权
mysql> CREATE USER 'repl'@'192.168.8.129' IDENTIFIED BY 'repl';
mysql> GRANT REPLICATION SLAVE ON *.* TO 'repl'@'192.168.8.129';
mysql> flush privileges;
##执行同步语句(master_auto_position={0|1}表示是否基于GTID实现)
mysql> CHANGE MASTER TO master_host='192.168.8.129', master_port=3306, master_user='repl', master_password='repl', master_auto_position=1;
##启动slave同步进程
mysql> start slave;
##查看slave状态
mysql> show slave status\G

Keepalived

Keepalived是一个高性能的服务器高可用或热备解决方案,Keepalived主要用来解决服务器单点故障问题的,通过与Nginx、HAProxy等反向代理的负载均衡服务器配合可实现Web服务端的高可用。

1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83
84
85
86
87
88
89
90
91
$ wget https://www.keepalived.org/software/keepalived-2.0.17.tar.gz
$ tar -zxvf keepalived-2.0.17.tar.gz
$ cd keepalived-2.0.17
##安装依赖库
$ yum -y install openssl-devel
##配置安装路径
$ ./configure --prefix=/usr/local/keepalived
##编译安装
$ make && make install

$ cd /usr/local/keepalived
$ cp etc/keepalived/keepalived.conf keepalived.conf
$ vi keepalived.conf

! Configuration File for keepalived

vrrp_instance VI_1 {
#初始状态为MASTER(当出现其他机器时,最高优先级的会被选为MASTER)
state MASTER
#网络接口
interface eno16777736
#优先级
priority 100
#发VRRP包的时间间隔(即多久进行一次MASTER选举)
advert_int 1
#虚拟路由标识(同一集群内需一样)
virtual_router_id 50
authentication {
auth_type PASS
auth_pass 1111
}
#指定VIP地址
virtual_ipaddress {
192.168.8.188
}
}

virtual_server 192.168.8.188 3306 {
#设置运行状态检测时间(单位为秒)
delay_loop 6
#设置调度算法
lb_algo rr
#设置LVS实现负载均衡的机制
lb_kind DR
#会话保持时间(单位为秒)
persistence_timeout 50
#转发协议类型
protocol TCP

real_server 192.168.8.129 3306 {
#服务节点的权值
weight 1
TCP_CHECK {
#连接超时时间
connect_timeout 3
#重连间隔时间
delay_before_retry 3
}
}
}


##启动服务(默认使用/etc/keepalived/keepalived.conf配置文件)
$ ./sbin/keepalived -D -f /usr/local/keepalived/keepalived.conf
##查看VIP绑定状态验证是否安装成功
$ ip addr

inet 192.168.8.129/24 brd 192.168.8.255 scope global eno16777736
inet 192.168.8.188/32 scope global eno16777736

##设置开机启动
$ vi /etc/systemd/system/keepalived.service

[Unit]
Description=keepalived
After=network.target remote-fs.target nss-lookup.target

[Service]
Type=forking
PIDFile=/var/run/keepalived.pid
ExecStart=/usr/local/keepalived/sbin/keepalived -D -f /usr/local/keepalived/keepalived.conf
ExecStop=/bin/kill -s QUIT $MAINPID
ExecReload=/bin/kill -s HUP $MAINPID

[Install]
WantedBy=multi-user.target

##重新加载配置文件让配置生效
$ systemctl daemon-reload
##启用、停用、开机启动、禁止开机启动、查看状态
$ systemctl {start|stop|enable|disable|status} keepalived.service

MySQL高可用

Keepalived实现高可用的基本思路:集群中的MASTER和BACKUP角色能进行切换,是由priority和weight共同决定的。例如,初始状态时,MASTER节点的priority为100,BACKUP节点的priority为90,各个节点通过脚本监控各自的服务是否正常,如果监控到服务发生异常,则priority减少weight,这样,最大priority值的节点就会被选举为MASTER节点。所以,为了保证脚本执行成功或失败后能触发MASTER和BACKUP切换,通常设置weight的值大于MASTER与BACKUP节点的priority值之差。

1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83
84
85
86
87
88
89
90
91
92
93
94
95
96
97
98
99
100
101
102
103
104
105
106
107
108
109
110
111
112
113
114
115
116
117
118
119
120
121
122
123
124
125
126
127
128
129
130
131
132
133
134
135
136
$ vi keepalived.conf

##Master(192.168.8.129)配置

! Configuration File for keepalived

global_defs {
script_user mysql
enable_script_security
}

#监控mysql服务的脚本
vrrp_script check_mysql {
#脚本内容或脚本文件
script /usr/local/keepalived/check_mysql.sh
#执行脚本间隔时间(默认1秒)
interval 1
#按此权重调整优先级(当mysql服务不可用时,该节点权值减少10,这样权值为95的节点就会成为MASTER)
weight -10
}

vrrp_instance VI_1 {
#初始状态为MASTER(当出现其他机器时,最高优先级的会被选为MASTER)
state MASTER
#网络接口
interface eno16777736
#优先级
priority 100
advert_int 1
virtual_router_id 50
authentication {
auth_type PASS
auth_pass 1111
}
virtual_ipaddress {
192.168.8.188
}
#服务运行状态检测脚本
track_script {
check_mysql
}
}

virtual_server 192.168.8.188 3306 {
delay_loop 6
lb_algo rr
lb_kind DR
persistence_timeout 50
protocol TCP

real_server 192.168.8.129 3306 {
weight 1
TCP_CHECK {
connect_timeout 3
delay_before_retry 3
}
}
}

##Backup(192.168.8.130)配置

! Configuration File for keepalived

global_defs {
script_user mysql
enable_script_security
}

#监控mysql服务的脚本
vrrp_script check_mysql {
#脚本内容或脚本文件
script /usr/local/keepalived/check_mysql.sh
#执行脚本间隔时间(默认1秒)
interval 1
#按此权重调整优先级(当mysql服务不可用时,该节点权值减少10,这样权值为95的节点就会成为MASTER)
weight -10
}

vrrp_instance VI_1 {
#初始状态为BACKUP(当出现其他机器时,最高优先级的会被选为MASTER)
state BACKUP
#网络接口
interface ens33
#优先级
priority 95
advert_int 1
virtual_router_id 50
authentication {
auth_type PASS
auth_pass 1111
}
virtual_ipaddress {
192.168.8.188
}
#服务运行状态检测脚本
track_script {
check_mysql
}
}

virtual_server 192.168.8.188 3306 {
delay_loop 6
lb_algo rr
lb_kind DR
persistence_timeout 50
protocol TCP

real_server 192.168.8.130 3306 {
weight 1
TCP_CHECK {
connect_timeout 3
delay_before_retry 3
}
}
}

##检测脚本

$ vi check_mysql.sh

#!/bin/bash
counter=`ps aux | grep mysql | grep -v grep | wc -l`
if [ $counter == 0 ]
then
echo "Failure: MySQL is not running."
exit 1
else
echo "Success: MySQL is running."
exit 0
fi

$ chown -R mysql:mysql ./
$ chmod 755 check_mysql.sh

##默认Keepalived日志存放在系统日志
$ tail -50f /var/log/messages

RabbitMQ

1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
##安装Erlang运行环境
$ wget https://github.com/rabbitmq/erlang-rpm/releases/download/v21.3.8.5/erlang-21.3.8.5-1.el7.x86_64.rpm
##安装依赖
$ yum -y install openssl-devel
##查看是否可以重定位(是否可加参数--prefix=<dir>安装到指定目录)
$ rpm -qpi erlang-21.3.8.5-1.el7.x86_64.rpm | head
$ rpm -i erlang-21.3.8.5-1.el7.x86_64.rpm

##安装RabbitMQ
$ wget https://github.com/rabbitmq/rabbitmq-server/releases/download/v3.7.15/rabbitmq-server-generic-unix-3.7.15.tar.xz
$ tar -xvf rabbitmq-server-generic-unix-3.7.15.tar.xz
$ mv rabbitmq_server-3.7.15 /usr/local/rabbitmq
$ cd /usr/local/rabbitmq

##添加配置文件
$ vi etc/rabbitmq/rabbitmq.conf

## See https://github.com/rabbitmq/rabbitmq-server/blob/master/docs/rabbitmq.conf.example

## Allow access to the guest user from anywhere on the network.
loopback_users.guest = false

$ vi etc/rabbitmq/rabbitmq-env.conf

HOME=/usr/local/rabbitmq

# Specifies new style config file location
#CONFIG_FILE=/usr/local/rabbitmq/etc/rabbitmq/rabbitmq.conf

##安装插件
$ ./sbin/rabbitmq-plugins enable rabbitmq_management rabbitmq_web_stomp rabbitmq_stomp

##启动服务
$ ./sbin/rabbitmq-server

##设置开机启动
$ vi /etc/systemd/system/rabbitmq-server.service

[Unit]
Description=rabbitmq-server
Documentation=https://github.com/rabbitmq/rabbitmq-server/blob/master/docs/rabbitmq-server.service.example
After=network.target

[Service]
Type=forking
Environment="HOME=/usr/local/rabbitmq"
ExecStart=/usr/local/rabbitmq/sbin/rabbitmq-server
ExecStop=/usr/local/rabbitmq/sbin/rabbitmqctl stop

[Install]
WantedBy=multi-user.target

##重新加载配置文件让配置生效
$ systemctl daemon-reload
##启用、停用、开机启动、禁止开机启动、查看状态
$ systemctl {start|stop|enable|disable|status} rabbitmq-server.service

RabbitMQ集群

在集群默认模式下,集群中的各个节点保存有相同的元数据(队列的结构),而消息实体只存在其中一个节点上,当该节点发生故障后,其他节点就无法获取到还未消费的消息实体。

1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
##RabbitMQ集群依赖Erlang集群,而Erlang集群为保证不同节点可相互通信必须共享相同的Cookie

##拷贝Master节点的.erlang.cookie文件到各Backup节点
$ scp .erlang.cookie root@192.168.8.129:/usr/local/rabbitmq/.erlang.cookie
$ scp .erlang.cookie root@192.168.8.130:/usr/local/rabbitmq/.erlang.cookie

##启动集群中的各个节点
$ ./sbin/rabbitmq-server -detached

##查看Master节点集群状态
$ ./sbin/rabbitmqctl cluster_status

##将各Backup节点加入到Master节点组成集群
$ ./sbin/rabbitmqctl stop_app
$ ./sbin/rabbitmqctl join_cluster rabbit@linux128
$ ./sbin/rabbitmqctl start_app

##设置集群名称
$ ./sbin/rabbitmqctl set_cluster_name rabbitmq-cluster

RabbitMQ镜像队列

RabbitMQ的镜像队列机制是最简单的队列HA方案,它通过在集群的基础上增加ha-modeha-param等policy选项,可以根据需求将集群中的队列镜像到多个节点上,从而实现高可用,消除集群模式中队列内容单点故障带来的风险。该模式和普通模式不同之处在于,消息实体会主动在镜像节点间同步,而不是在客户端取数据时临时拉取。当然,该模式带来的副作用也很明显,除了降低系统性能外,如果镜像队列数量过多,加之大量的消息进入,集群内部的网络带宽将会被这种同步通讯大大消耗掉,所以通常在对可靠性要求较高的场合中适用。

1
2
##将所有队列设置为镜像队列
$ ./sbin/rabbitmqctl set_policy ha-all "^" '{"ha-mode":"all"}'

HAProxy

HAProxy是可靠的、高性能的用于为基于TCP或HTTP的应用程序提供高可用、负载均衡和代理服务的解决方案,和Nginx(只支持HTTP)负载均衡类似。负载均衡是解决高性能单点故障(高可用)扩展性(水平伸缩)的终极解决方案。

1
2
3
4
5
6
7
8
9
10
11
12
13
14
$ wget https://www.haproxy.org/download/2.0/src/haproxy-2.0.1.tar.gz
$ tar -zxvf haproxy-2.0.1.tar.gz
$ cd haproxy-2.0.1

##查看linux内核版本
$ uname -r

3.10.0-327.36.1.el7.x86_64

##编译(参数TARGET=linux310指定内核版本)
$ make TARGET=linux310 ARCH=x86_64

##安装
$ make PREFIX=/usr/local/haproxy install

RabbitMQ负载均衡

1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
$ vi /usr/local/haproxy/haproxy.cfg

global
daemon
log 127.0.0.1 local0
log 127.0.0.1 local1 notice
maxconn 4096
chroot /usr/local/haproxy
pidfile /usr/local/haproxy/haproxy.pid

defaults
log global
retries 3
maxconn 2000
timeout connect 5s
timeout client 50s
timeout server 50s

listen rabbitmq_cluster
bind 127.0.0.1:5671
mode tcp
balance roundrobin
server linux128 192.168.8.128:5672 check inter 5000 rise 2 fall 2
server linux129 192.168.8.129:5672 check inter 5000 rise 2 fall 2
server linux130 192.168.8.130:5672 check inter 5000 rise 2 fall 2

listen monitor
bind 192.168.8.131:8100
mode http
stats enable
stats uri /rabbitmq
stats refresh 5s

##检查文件是否书写正确
$ ./sbin/haproxy -f haproxy.cfg -c
##启动服务
$ ./sbin/haproxy -f haproxy.cfg

Nginx

1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
$ wget http://nginx.org/download/nginx-1.18.0.tar.gz
$ tar -zxvf nginx-1.18.0.tar.gz
$ mkdir /usr/local/nginx
$ cd nginx-1.18.0
##安装依赖库
$ yum install pcre pcre-devel
$ yum install zlib zlib-devel
##配置安装路径
$ ./configure --prefix=/usr/local/nginx/nginx-1.18.0
##编译安装
$ make && make install
##启动&测试
$ cd /usr/local/nginx/nginx-1.18.0/
$ ./sbin/nginx
$ curl http://127.0.0.1

##设置开机启动
$ vi /etc/systemd/system/nginx.service

[Unit]
Description=nginx
After=network.target remote-fs.target nss-lookup.target

[Service]
Type=forking
ExecStart=/usr/local/nginx/nginx-1.18.0/sbin/nginx
ExecStop=/usr/local/nginx/nginx-1.18.0/sbin/nginx -s stop
ExecReload=/usr/local/nginx/nginx-1.18.0/sbin/nginx -s reload

[Install]
WantedBy=multi-user.target

##重新加载配置文件让配置生效
$ systemctl daemon-reload
##启用、停用、开机启动、禁止开机启动、查看状态
$ systemctl {start|stop|enable|disable|status} nginx.service

Nginx高可用

1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83
84
85
86
87
88
89
90
91
92
93
94
95
96
97
98
99
100
101
102
103
104
$ vi keepalived.conf

##Master(192.168.8.129)配置

! Configuration File for keepalived

global_defs {
script_user nginx
enable_script_security
}

#监控nginx服务的脚本
vrrp_script check_nginx {
#脚本内容或脚本文件
script /usr/local/keepalived/check_nginx.sh
#执行脚本间隔时间(默认1秒)
interval 1
#按此权重调整优先级(当nginx服务不可用时,该节点权值减少10,这样权值为95的节点就会成为MASTER)
weight -10
}

vrrp_instance VI_1 {
#初始状态为MASTER(当出现其他机器时,最高优先级的会被选为MASTER)
state MASTER
#网络接口
interface eno16777736
#优先级(最高优先级的会被选为MASTER)
priority 100
#发VRRP包的时间间隔(即多久进行一次MASTER选举)
advert_int 1
#虚拟路由标识(同一集群内需一样)
virtual_router_id 50
authentication {
auth_type PASS
auth_pass 1111
}
#指定VIP地址
virtual_ipaddress {
192.168.8.188
}
#服务运行状态检测脚本
track_script {
check_nginx
}
}

##Backup(192.168.8.130)配置

! Configuration File for keepalived

global_defs {
script_user nginx
enable_script_security
}

#监控nginx服务的脚本
vrrp_script check_nginx {
#脚本内容或脚本文件
script /usr/local/keepalived/check_nginx.sh
#执行脚本间隔时间(默认1秒)
interval 1
#按此权重调整优先级(当nginx服务不可用时,该节点权值减少10,这样权值为95的节点就会成为MASTER)
weight -10
}

vrrp_instance VI_1 {
#设为BACKUP将根据优先级决定是MASTER还是SLAVE
state BACKUP
#网络接口
interface ens33
#优先级(最高优先级的会被选为MASTER)
priority 95
#发VRRP包的时间间隔(即多久进行一次MASTER选举)
advert_int 1
#虚拟路由标识(同一集群内需一样)
virtual_router_id 50
authentication {
auth_type PASS
auth_pass 1111
}
#指定VIP地址
virtual_ipaddress {
192.168.8.188
}
#服务运行状态检测脚本
track_script {
check_nginx
}
}

##检测脚本

$ vi check_nginx.sh

#!/bin/bash
counter=`ps -C nginx --no-heading|wc -l`
if [ $counter == 0 ]
then
echo "Failure: Nginx is not running..."
exit 2
else
echo "Success: Nginx is running..."
exit 0
fi

Nginx负载均衡

1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
$ vi nginx.conf

worker_processes 1;

events {
worker_connections 1024;
}

http {

include mime.types;
default_type application/octet-stream;

sendfile on;

keepalive_timeout 65;

upstream tomcat_cluster {
#weight: 设置权重(值越大被访问到的几率越大)
#max_fails: 与服务器通信失败后进行几次重试(默认为1)
#fail_timeout: 超过失败重试次数后在多长时间内不可用(默认为10秒)
server 192.168.8.129:8080 weight=5 max_fails=1 fail_timeout=10;
server 192.168.8.130:8080 weight=1 max_fails=1 fail_timeout=10;
#备份服务器(当所有主服务器全都不可用时才会起作用)
server 192.168.8.128:8080 backup;
}

server {
listen 80;
server_name localhost;

location / {
root html;
index index.html index.htm;
}

location ~ \.xhtml$ {
proxy_pass http://tomcat_cluster;
proxy_redirect off;
proxy_set_header Host $http_host;
proxy_set_header X-Real-IP $remote_addr;
proxy_set_header X-Forwarded-For $proxy_add_x_forwarded_for;
proxy_connect_timeout 60s;
proxy_send_timeout 60s;
proxy_read_timeout 60s;
}

error_page 500 502 503 504 /50x.html;
location = /50x.html {
root html;
}
}

}

$ systemctl reload nginx
$ curl http://192.168.8.188/examples/websocket/index.xhtml