标签:class aries tomcat filter client probe daemon 最大 0ms
[root@3b9d411b-0a16-4bc4-acd8-10f71b997c29 etc]# ss -s&&free -g
Total: 102008 (kernel 102040)
TCP: 102003 (estab 101812, closed 56, orphaned 77, synrecv 0, timewait 0/0), ports 30060
Transport Total IP IPv6
* 102040 - -
RAW 0 0 0
UDP 4 3 1
TCP 101947 101946 1
INET 101951 101949 2
FRAG 0 0 0
total used free shared buffers cached
Mem: 7 2 5 0 0 0
-/+ buffers/cache: 2 5
Swap: 0 0 0
先看上面的信息:
cpu和内存的原因可能是没有用ssl链接都是基于http链接计算量小的缘故,也有可能是vegeta
没有输出足够的压力源导致,后期会完善压测方法,由此说明我们haproxy还有更大潜力挖掘。
下面会介绍单个Haproxy机器上实现这种规模访问所需的配置,和详细的内核调优方法。
我们要测试的组件是HAProxy 1.6 版。生产环境是在4核8G的机器上运行该软件,当前所有的连接都是非 SSL 的。
测试目标有两方面:
将线上环境Haproxy连接数从可怜的2000链接数提升到1w
其次,希望能够测试单个 HAProxy 每秒请求数和最大并发连接数的上限
目标一主要因为业务方面需要提高和优化Haproxy性能。 目标二是为了可以在生产环境中部署最少规模的 HAProxy 机器。
使用多台客户端机器来执行 HAProxy 压力测试。
线上测试的 HAProxy1.6的机器 4核,8G,centos6.9
相关后端服务器,5台4核4g,centos6,每个机器运行5个tomcat
工具
机器
net.ipv4.ip_forward = 0
# Controls source route verification
net.ipv4.conf.default.rp_filter = 1
# Do not accept source routing
net.ipv4.conf.default.accept_source_route = 0
# Controls the System Request debugging functionality of the kernel
kernel.sysrq = 0
# Controls whether core dumps will append the PID to the core filename.
# Useful for debugging multi-threaded applications.
kernel.core_uses_pid = 1
# Controls the use of TCP syncookies
net.ipv4.tcp_syncookies = 1
# Controls the default maxmimum size of a mesage queue
kernel.msgmnb = 65536
# Controls the maximum size of a message, in bytes
kernel.msgmax = 65536
# Controls the maximum shared segment size, in bytes
kernel.shmmax = 68719476736
# Controls the maximum number of shared memory segments, in pages
kernel.shmall = 4294967296
net.ipv4.conf.default.promote_secondaries = 1
net.ipv4.ip_forward = 1
net.ipv4.conf.default.rp_filter = 2
net.ipv4.conf.all.rp_filter = 2
net.ipv4.conf.all.send_redirects = 1
net.ipv4.conf.default.send_redirects = 1
### 表示是否打开TCP同步标签(syncookie),同步标签可以防止一个套接字在有过多试图连接到达时引起过载
net.ipv4.tcp_syncookies = 1
net.ipv4.tcp_tw_reuse = 1
net.ipv4.tcp_tw_recycle = 1
net.ipv4.tcp_fin_timeout = 30
net.ipv4.tcp_keepalive_time = 1200
net.ipv4.tcp_keepalive_probes = 5
net.ipv4.tcp_keepalive_intvl = 15
net.ipv4.ip_local_port_range = 20000 65000
net.ipv4.tcp_max_syn_backlog = 40960
net.ipv4.tcp_max_tw_buckets = 819200
net.core.somaxconn = 262144
net.core.netdev_max_backlog = 262144
net.ipv4.tcp_max_orphans = 262144
net.ipv4.tcp_orphan_retries = 3
net.ipv4.tcp_synack_retries = 3
net.ipv4.tcp_syn_retries = 3
net.ipv4.tcp_sack = 1
net.ipv4.tcp_fack = 1
net.ipv4.tcp_dsack = 1
net.ipv4.icmp_echo_ignore_broadcasts = 1
net.ipv4.icmp_ignore_bogus_error_responses = 1
### 系统中所允许的文件句柄的最大数目
fs.file-max = 65535
### 单个进程所允许的文件句柄的最大数目
fs.nr_open = 65535
kernel.pid_max = 65536
net.ipv4.tcp_rmem = 4096 87380 8388608
net.ipv4.tcp_wmem = 4096 87380 8388608
net.core.rmem_max = 8388608
net.core.wmem_max = 8388608
net.core.netdev_max_backlog = 5000
net.ipv4.tcp_window_scaling = 1
* soft nproc 65535
* hard nproc 65535
* soft nofile 65535
* hard nofile 65535
global
group root
user root
daemon
nbproc 1
log 127.0.0.1 local3
pidfile /var/run/haproxy/pid/10.163.162.65_80.pid
ulimit-n 1000000 ##这里增大文件打开数
max-spread-checks 1000ms
maxconn 30000000 ##这里放开限制
maxconnrate 30000000
maxsessrate 30000000
maxsslconn 30000000
maxsslrate 30000000
tune.ssl.default-dh-param 2048
spread-checks 20
stats timeout 5000ms
stats maxconn 50
stats socket /var/run/haproxy/sock/10.163.162.65_80.sock mode 600 level admin process 1
defaults
mode http
maxconn 30000000 ##这里最好与上面一致,否则默认限制2000,找了好久
option abortonclose
option redispatch
option forwardfor
balance roundrobin
log 127.0.0.1 local3 err
retries 3
option clitcpka
option srvtcpka
listen 10.163.162.65:80
balance roundrobin
bind 10.163.162.65:80
option tcp-check
option httplog
#option dontlognull
timeout http-keep-alive 10s
timeout http-request 10s
errorfile 400 /usr/local/haproxy/errorfile/400.html
errorfile 403 /usr/local/haproxy/errorfile/403.html
errorfile 408 /usr/local/haproxy/errorfile/408.html
errorfile 500 /usr/local/haproxy/errorfile/500.html
errorfile 502 /usr/local/haproxy/errorfile/502.html
errorfile 503 /usr/local/haproxy/errorfile/503.html
errorfile 504 /usr/local/haproxy/errorfile/504.html
timeout connect 100000
timeout queue 100000
timeout client 100000
timeout server 100000
timeout check 100000
cookie SERVERID insert indirect nocache
##后端服务器
server 172.16.100.110:8480 172.16.100.110:8480 cookie 1852060044 check inter 30000 rise 3 fall 3 weight 22
server 172.16.100.110:8180 172.16.100.110:8180 cookie 1852051288 check inter 30000 rise 3 fall 3 weight 22
server 172.16.100.110:8080 172.16.100.110:8080 cookie 1852051260 check inter 30000 rise 3 fall 3 weight 22
server 172.16.100.110:8280 172.16.100.110:8280 cookie 1852059892 check inter 30000 rise 3 fall 3 weight 22
server 172.16.100.110:8380 172.16.100.110:8380 cookie 1852059664 check inter 30000 rise 3 fall 3 weight 22
server 172.16.100.162:8480 172.16.100.162:8480 cookie 2852060044 check inter 30000 rise 3 fall 3 weight 22
server 172.16.100.162:8180 172.16.100.162:8180 cookie 2852051288 check inter 30000 rise 3 fall 3 weight 22
server 172.16.100.162:8080 172.16.100.162:8080 cookie 2852051260 check inter 30000 rise 3 fall 3 weight 22
server 172.16.100.162:8280 172.16.100.162:8280 cookie 2852059892 check inter 30000 rise 3 fall 3 weight 22
server 172.16.100.162:8380 172.16.100.162:8380 cookie 2852059664 check inter 30000 rise 3 fall 3 weight 22
#!/bin/bash
for i in `seq 1 10000`
do
ab -c 10000 -n 100000 http://10.163.162.103/index.html
done
压测结果
Total: 15694 (kernel 15725)
TCP: 16529 (estab 15077, closed 748, orphaned 170, synrecv 0, timewait 747/0), ports 6077
Transport Total IP IPv6
* 15725 - -
RAW 0 0 0
UDP 4 3 1
TCP 15781 15780 1
INET 15785 15783 2
FRAG 0 0 0
total used free shared buffers cached
Mem: 7 1 5 0 0 0
-/+ buffers/cache: 1 5
Swap: 0 0 0
发现客户端资源消耗的比较大,反而压不上去。
使用3台客户端vegeta进行压力测试
echo "POST http://10.163.162.65" | vegeta -cpus=4 attack -duration=10m -header="sleep:1000" -rate=2000 -workers=500 | tee reports.bin | vegeta report
这里我们简单了解下Vegeta提供的一些参数细节:
1 -cpus=4,定义客户端使用的内核数量。为了能够达到需要的压力值,我们将施压机配置适当调整。仔细观察结果数据会发现,实际压力并不大,配置调整的主要目的是为了能够支撑大量状态为后端服务器休眠的连接。
2 -duration=10m,该参数顾名思义,如果没有指定执行时间,测试将永久运行。
3 -rate=2000,每秒钟请求数。
压测结果
Total: 188713 (kernel 189041)
TCP: 188652 (estab 162234, closed 22, orphaned 0, synrecv 0, timewait 21/0), ports 40061
Transport Total IP IPv6
* 189041 - -
RAW 0 0 0
UDP 4 3 1
TCP 188630 188629 1
INET 188634 188632 2
FRAG 0 0 0
total used free shared buffers cached
Mem: 7 2 4 0 0 0
-/+ buffers/cache: 2 4
Swap: 0 0 0
很轻松达到了16w,而且客户端资源没有消耗太大情况下,看样子如果在客户端配置好的情况下20w也不成问题。
内核主要优化的点:
global的
defaults 中的
本次压测效果基本符合预期,发现主要是内核参数与haproxy配置参数不当导致,还有压测工具要选对,后期还有进一步优化的可能比如:nbproc绑定cpu,vegeta功能挖掘。
标签:class aries tomcat filter client probe daemon 最大 0ms
原文地址:https://www.cnblogs.com/ExMan/p/12190060.html