在使用 jmeter 压测的工作中,不仅需要关注当前 qps,也需要查看请求日志.
下面介绍两种方式收集 jmeter 的请求日志
使用 BeanShell 方法是解析请求数据并存到本地文件中.
代码片段
import java.io.*;
import com.alibaba.fastjson.JSONObject;
import com.alibaba.fastjson.JSON;
import org.apache.jmeter.config.Arguments;
import java.text.SimpleDateFormat;
import java.util.Calendar;
Date dNow = new Date();
SimpleDateFormat ft = new SimpleDateFormat ("yyyy-MM-dd hh:mm:ss");
String RequestTime = ft.format(dNow);
byte[] rpData = (byte[])ResponseData;
String reponseDataAsString = Response.getResponseDataAsString();
String filePath="/Users/xinxi/Documents/jmeter/apache-jmeter-5.0/bin/jmeter_file.log";
log.info("当前时间为=============================:" + ft.format(dNow));
log.info("SampleLabel=============================:" + SampleLabel);
log.info("SamplerData=============================:" + SamplerData);
log.info("RequestHeaders=============================:" + RequestHeaders);
log.info("ResponseHeaders=============================:" + ResponseHeaders);
log.info("ResponseMessage=============================:" + ResponseMessage);
log.info("isSuccessful=============================:" + Successful);
log.info("ResponseCode=============================:" + ResponseCode);
log.info("原生的ResponseData============================:" + ResponseData);
log.info("reponseDataAsString============================:" + reponseDataAsString);
//log.info("reponseDataAsString============================:" + reponseDataAsString);
JSONObject jsonObject = new JSONObject();
jsonObject.put("request_time",RequestTime);
jsonObject.put("url",SamplerData);
jsonObject.put("request_code",ResponseCode);
jsonObject.put("response_data",reponseDataAsString);
String jsonString = JSON.toJSONString(jsonObject);
log.info("reponseDataAsString============================:" + jsonString);
try{
String filePath="/Users/xinxi/Documents/jmeter/apache-jmeter-5.0/bin/jmeter_file.log";
try{
File file = new File(filePath);
if(!file.exists()){
file.createNewFile();
}
FileWriter writer = new FileWriter(filePath, true);
writer.write(jsonString + "\r\n");
writer.close();
}catch(IOException e){
e.printStackTrace();
}
}catch(IOException e){
e.printStackTrace();
fos.close();
}
借助 elk 系统,存储数据和展示数据.
docker run \
--name elk \
-d \
-e LOGSTASH_START=0 \
-p 5601:5601 \
-p 9200:9200 \
-p 5044:5044 -d \
sebp/elk
linux 启动服务有内存限制
解决方法
[root@localhost ~]# sysctl -w vm.max_map_count=262144
[root@localhost ~]# sysctl -a|grep vm.max_map_count
vm.max_map_count = 262144
input {
file {
path => "/Users/xinxi/Documents/jmeter/apache-jmeter-5.0/bin/jmeter_file.log"
start_position => beginning
codec => "json"
}
}
filter {
date {
match => ["log_time", "ISO8601"]
timezone => "Asia/Shanghai"
}
}
output {
elasticsearch {
hosts => ["192.168.143.242:9200"]
index => "requests-json-%{+YYYY.MM.dd}"
}
stdout{
codec => rubydebug
}
}
启动 logstash 服务,定时把 jmeter_file.log 文件发送到 elk 系统中
bin/logstash -f config/request.conf
相比 logstash, filebeat 更能节省机器性能.
#=========================== Filebeat inputs =============================
filebeat.inputs:
- type: log
# Change to true to enable this input configuration.
enabled: true
# Paths that should be crawled and fetched. Glob based paths.
paths:
- /Users/xinxi/Documents/jmeter/apache-jmeter-5.0/bin/jmeter.log
multiline.pattern: ^[0-9]{4}-[0-9]{2}-[0-9]{2}
multiline.negate: true
multiline.match: after
#============================= Filebeat modules ===============================
filebeat.config.modules:
# Glob pattern for configuration loading
path: ${path.config}/modules.d/*.yml
# Set to true to enable config reloading
reload.enabled: false
# Period on which files under path should be checked for changes
#reload.period: 10s
#==================== Elasticsearch template setting ==========================
setup.template.settings:
index.number_of_shards: 1
#index.codec: best_compression
#_source.enabled: false
#============================== Kibana =====================================
# Starting with Beats version 6.0.0, the dashboards are loaded via the Kibana API.
# This requires a Kibana endpoint configuration.
setup.kibana:
# Kibana Host
# Scheme and port can be left out and will be set to the default (http and 5601)
# In case you specify and additional path, the scheme is required: http://localhost:5601/path
# IPv6 addresses should always be defined as: https://[2001:db8::1]:5601
#host: "localhost:5601"
# Kibana Space ID
# ID of the Kibana Space into which the dashboards should be loaded. By default,
# the Default Space will be used.
#space.id:
#================================ Outputs =====================================
# Configure what output to use when sending the data collected by the beat.
#-------------------------- Elasticsearch output ------------------------------
output.elasticsearch:
# Array of hosts to connect to.
hosts: ["192.168.143.242:9200"]
# Optional protocol and basic auth credentials.
#protocol: "https"
#username: "elastic"
#password: "changeme"
#----------------------------- Logstash output --------------------------------
#output.logstash:
# The Logstash hosts
#hosts: ["localhost:5044"]
# Optional SSL. By default is off.
# List of root certificates for HTTPS server verifications
#ssl.certificate_authorities: ["/etc/pki/root/ca.pem"]
# Certificate for SSL client authentication
#ssl.certificate: "/etc/pki/client/cert.pem"
# Client Certificate Key
#ssl.key: "/etc/pki/client/cert.key"
#================================ Processors =====================================
# Configure processors to enhance or manipulate events generated by the beat.
processors:
- add_host_metadata: ~
- add_cloud_metadata: ~
./filebeat -e -c filebeat_jmeter.yml
另外一种直接在 jmeter 中使用 es 的后端监听器
下载: https://github.com/xinxi1990/perf-es.git
mvn pageage 打包后放到 ext 下
Jmeter-Beanshell Assertion-内置变量的使用
https://www.jianshu.com/p/66587703551c
性能监控之 JMeter 分布式压测轻量日志解决方案
https://mp.weixin.qq.com/s?__biz=MzIwNDY3MDg1OA==&mid=2247484507&idx=1&sn=545ddf373a11d2380192f44edacbca09&chksm=973dd6bea04a5fa8667db03b3e6fe21cb214e12eda9e9d6570207e9559e862a3c49c7a05ba78&scene=21#wechat_redirect