Jmeter 测试
Jmeter安装
Jmeter配置
-
配置线程组
测试计划->添加->threads->线程组:
如图所示则是200个用户,Ranmp-up Period是这么多用户在10s内提交完毕,循环10次
-
配置参数文件
线程组->添加->配置元件->CSV Data Set Config
配置相应的属性
-
配置HTTP请求
线程组->添加->Sampler->HTTP请求
配置相应的属性
-
添加输出报告
线程组->添加->监听器->summary report
-
添加察看结果树
线程组->添加->监听器->察看结果树
通过结果树可以看到发送到服务器的请求,以及返回的结果。可以通过这个组件调试请求。
结果分析
结果如图所示:
samples:运行的线程总数5000。
Average:平均响应时间192ms。
Error:错误率0。
Throughout:吞吐量,每秒钟处理了1831个请求。
遇到的问题
-
编码问题
1)查看参数文件的编码,一般用utf-8,。如果是txt文件,且编码不是utf-8,打开文件,另存为,设置编码格式为utf-8
2)在CSV Data Set Config配置时,设置解析文件的格式为utf-8
3)在HTTP请求配置,设置Content encoding为utf-8
Python脚本测试
import sys
import os
import threading
import random
import time
import json
def parse_param():
args = sys.argv
thread_num = args[1]
times = args[2]
keyword_fname = args[3]
rfile = open(keyword_fname)
keywords = rfile.read().split("\n")
rfile.close()
return int(thread_num),int(times),keywords
def get_file_data(fname):
rfile = open(fname)
keywords = rfile.read().split("\n")
rfile.close()
return keywords
class myThread(threading.Thread):
def __init__(self,cmd,cost_time):
self.cmd =cmd
self.cost_time = cost_time
threading.Thread.__init__(self)
def run(self):
result = os.popen(self.cmd).read()
result =json.loads(result)
if "error" in result.keys():
print "error:%s" % result
self.cost_time.append(int(result["took"]))
#self.cost_time += [int(result["took"])]
#if result[0:10].find("error") != -1:
# print "error"
#print result
def run(thread_num,times,cmds):
threads=[]
start_time = time.time()
t = 0
cost_times =[]
for i in range(0,times):
for j in range(0,thread_num):
tmp_thread = myThread(cmds[t],cost_times)
t = t+1
threads.append(tmp_thread)
for val in threads:
val.start()
for val in threads:
val.join()
threads[:] = []
print len(cost_times)
avg_time = sum(cost_times)/len(cost_times)
#avg_time = float((time.time()-start_time))/times
output = float(thread_num*times)/(time.time()-start_time)
return avg_time,output
def clear_cache():
cmd = "curl 100.110.11.131:9200/_cache/clear"
result = os.popen(cmd).read()
print result
def deal_once(thread_num,times,keywords):
#thread_num,times,keywords=parse_param()
clear_cache()
ip_list = ["xxx","xxx","xxx","xxx"]
#cmd = "curl xxx:9200/customer/_search -d '{\"size\":0,\"query\":{\"should\":[";
cmds =[]
for val in keywords:
cmd = "curl %s:9200/customer32/_search -d '{\"size\":0,\"query\":{\"bool\":{\"should\":[" % random.sample(ip_list,1)[0]
keyword = val.split("\t")
for key in keyword:
tmp = "{\"match_phrase\":{\"strdescription\":\"%s\"}}," % key
cmd = cmd + tmp
cmd = cmd[:-1] + "]}}}'"
cmds.append(cmd)
print cmds[0]
avg_time,output = run(thread_num,times,cmds)
print "avg_time:%f" % avg_time
print "output/s:%f" % output
def deal():
#thread_num = [100,500,1000,2000,3000]
#thread_num = [100,200,400,500]
thread_num = [5000]
times =1
files = ["2_prefix.txt","6_prefix.txt","11_prefix.txt"]
files = ["2_prefix.txt","6_prefix.txt"]
files = ["11_prefix.txt"]
for file in files:
for num in thread_num:
print "thread_num:%d\t file is %s" % (num,file)
keywords = get_file_data(file)
deal_once(num,times,keywords)
time.sleep(1)
deal()