1.资源测试
CPU压测
stress --cpu 1 --timeout 600
IO压测
stress -i 1 --timeout 600
进程压测
# centos可能无法模拟,用stress-ng命令
stress -c 8 --timeout 600
stress-ng -i 1 --hdd 1 --timeout 600
上下文压测
sysbench --threads=10 --max-time=300 threads run
读写测试 FIO
https://github.com/iovisor/bcc
# 随机读
fio -name=randread -direct=1 -iodepth=64 -rw=randread -ioengine=libaio -bs=4k -size=1G -numjobs=1 -runtime=1000 -group_reporting -filename=/dev/sdb
# 随机写
fio -name=randwrite -direct=1 -iodepth=64 -rw=randwrite -ioengine=libaio -bs=4k -size=1G -numjobs=1 -runtime=1000 -group_reporting -filename=/dev/sdb
# 顺序读
fio -name=read -direct=1 -iodepth=64 -rw=read -ioengine=libaio -bs=4k -size=1G -numjobs=1 -runtime=1000 -group_reporting -filename=/dev/sdb
# 顺序写
fio -name=write -direct=1 -iodepth=64 -rw=write -ioengine=libaio -bs=4k -size=1G -numjobs=1 -runtime=1000 -g
2.网络测试
转发性能
# 开启内核模块
modprobe pktgen
# 执行shell脚本
# 定义一个工具函数,方便后面配置各种测试选项
function pgset() {
local result
echo $1 > $PGDEV
result=`cat $PGDEV | fgrep "Result: OK:"`
if [ "$result" = "" ]; then
cat $PGDEV | fgrep Result:
fi
}
# 为0号线程绑定eth0网卡
PGDEV=/proc/net/pktgen/kpktgend_0
pgset "rem_device_all" # 清空网卡绑定
pgset "add_device eth0" # 添加eth0网卡
# 配置eth0网卡的测试选项
PGDEV=/proc/net/pktgen/eth0
pgset "count 1000000" # 总发包数量
pgset "delay 5000" # 不同包之间的发送延迟(单位纳秒)
pgset "clone_skb 0" # SKB包复制
pgset "pkt_size 64" # 网络包大小
pgset "dst 192.168.0.30" # 目的IP
pgset "dst_mac 11:11:11:11:11:11" # 目的MAC
# 启动测试
PGDEV=/proc/net/pktgen/pgctrl
pgset "start"
#查看结果
$ cat /proc/net/pktgen/eth0
Params: count 1000000 min_pkt_size: 64 max_pkt_size: 64
frags: 0 delay: 5000 clone_skb: 0 ifname: eth0
flows: 0 flowlen: 0
queue_map_min: 0 queue_map_max: 0
dst_min: 192.168.0.1 dst_max:
src_min: src_max:
src_mac: 08:00:27:5d:27:25 dst_mac: 00:00:00:00:00:00
udp_src_min: 9 udp_src_max: 9 udp_dst_min: 9 udp_dst_max: 9
src_mac_count: 0 dst_mac_count: 0
Flags:
Current:
pkts-sofar: 54841 errors: 0
started: 5169978645us stopped: 5172387908us idle: 8624us
seq_num: 54842 cur_dst_mac_offset: 0 cur_src_mac_offset: 0
cur_saddr: 10.0.2.15 cur_daddr: 192.168.0.1
cur_udp_dst: 9 cur_udp_src: 9
cur_queue_map: 0
flows: 0
Result: OK: 2409262(c2400638+d8624) usec, 54841 (64byte,0frags)
22762pps 11Mb/sec (11654144bps) errors: 0
结果在result里面,pps为22k,吞吐量为11Mb/s
TCP/UDP测试
yum install iperf3
# 启动服务端
iperf3 -s -i 1 -p 10000
# 另外一台运行客户端
iperf3 -c 192.168.0.30 -b 1G -t 15 -P 2 -p 10000
结果
[ ID] Interval Transfer Bandwidth Retr
[ 4] 0.00-15.00 sec 54.8 MBytes 30.6 Mbits/sec 0 sender
[ 4] 0.00-15.00 sec 54.4 MBytes 30.4 Mbits/sec receiver
[ 6] 0.00-15.00 sec 86.5 MBytes 48.4 Mbits/sec 0 sender
[ 6] 0.00-15.00 sec 85.9 MBytes 48.1 Mbits/sec receiver
[SUM] 0.00-15.00 sec 141 MBytes 79.0 Mbits/sec 0 sender
[SUM] 0.00-15.00 sec 140 MBytes 78.5 Mbits/sec receiver
吞吐量为140M/s
http压测(ab)
# -c表示并发请求数为1000,-n表示总的请求数为10000
$ ab -c 1000 -n 10000 http://192.168.0.30/
...
Server Software: nginx/1.15.8
Server Hostname: 192.168.0.30
Server Port: 80
...
Requests per second: 1078.54 [#/sec] (mean)
Time per request: 927.183 [ms] (mean)
Time per request: 0.927 [ms] (mean, across all concurrent requests)
Transfer rate: 890.00 [Kbytes/sec] received
Connection Times (ms)
min mean[+/-sd] median max
Connect: 0 27 152.1 1 1038
Processing: 9 207 843.0 22 9242
Waiting: 8 207 843.0 22 9242
Total: 15 233 857.7 23 9268
Percentage of the requests served within a certain time (ms)
50% 23
66% 24
75% 24
80% 26
90% 274
95% 1195
98% 2335
99% 4663
100% 9268 (longest request)
Transfer rate 表示吞吐量(BPS)为 890 KB/s。
http压测(wrk)
# -c表示并发连接数1000,-t表示线程数为2
$ wrk -c 1000 -t 2 http://192.168.0.30/
Running 10s test @ http://192.168.0.30/
2 threads and 1000 connections
Thread Stats Avg Stdev Max +/- Stdev
Latency 65.83ms 174.06ms 1.99s 95.85%
Req/Sec 4.87k 628.73 6.78k 69.00%
96954 requests in 10.06s, 78.59MB read
Socket errors: connect 0, read 0, write 0, timeout 179
Requests/sec: 9641.31
Transfer/sec: 7.82MB