准备文件
elasticsearch https://artifacts.elastic.co/downloads/elasticsearch/elasticsearch-5.3.1.tar.gz
logstash https://artifacts.elastic.co/downloads/logstash/logstash-5.3.1.tar.gz
kibaba https://artifacts.elastic.co/downloads/kibana/kibana-5.3.1-linux-x86_64.tar.gz
[root@localhost ]# vi /etc/profile-JAVA_HOME=/usr/local/java/jre1.8.0_171 -JRE_HOME=/usr/local/java/jre1.8.0_171-PATH=$JAVA_HOME/bin:$JRE_HOME/bin:$PATH -CLASSPATH=.:$JAVA_HOME/lib/dt.jar:$JAVA_HOME/lib/tools.jar:$JRE_HOME/lib:$CLASSPATH -exportJAVA_HOME JRE_HOME PATH CLASSPATH#开启网卡- [root@localhost]# vi /etc/sysconfig/network-scripts/ifcfg-enoxxx onboot on ->yes- [root@localhost]# service network restart
[root@localhost]#source/etc/profile
[root@localhost]#tar-zxvf /usr/local/download/elasticsearch-5.3.1.tar.gz-C /usr/local/
elasticsearch配置详解:http://www.cnblogs.com/skyblue/p/5216536.html
修改 [root@localhost]#vi /usr/local/elasticsearch-5.3.1/config/elasticsearch.yml配置文件。
cluster.name: skynet_es_cluster#这里指定的是集群名称,ES会按照此集群名称进行集群发现node.name: skynet_es_cluster_dev1#节点名path.data: /data/elk/data#数据目录path.logs: /data/elk/logs#日志目录network.host:0.0.0.0#允许所有设备访问http.port:9200#默认的端口号discovery.zen.ping.unicast.hosts: ["skynet_es_cluster_dev1","skynet_es_cluster_dev2"]#集群节点点列表,也可以写成ip地址discovery.zen.minimum_master_nodes:3#可以作为主节点的个数为总节点(n+1)/2http.cors.enabled:true#支持跨域,保证_site类的插件可以访问eshttp.cors.allow-origin:"*"#跨域访问允许的域名地址,bootstrap.memory_lock:false#虚拟机内存锁定设置为falsebootstrap.system_call_filter:false#未知,必须,设置,然后补充
[root@localhost]# vi /etc/sysctl.confvm.max_map_count=655360# 增加参数[root@localhost]# sysctl -p # 执行,确保生效配置生效:
[root@localhost]## vi /etc/security/limits.conf # 在文件末尾追加
* soft nofile65536* hard nofile131072* soft nproc65536* hard nproc131072
[root@localhost]# vi /etc/security/limits.d/20-nproc.confelk soft nproc65536# 设置elk用户参数[root@localhost]# useradd elk[root@localhost]# groupadd elk[root@localhost]# useradd elk -g elk[root@localhost]# chown -R elk:elk /usr/local/
[root@localhostelasticsearch-5.3.1]#bin/elasticsearch -d #后台启动
1、解压
[root@localhost logstash-5.3.1]#tar /usr/local/src/logstash-5.3.1.tar.gz -C /usr/local/ # .解压源码包
2、运行测试
[root@localhostlogstash-5.3.1]#/usr/local/logstash-5.3.1/bin/logstash -e'input { stdin { } } output { stdout {} }'#运行命令测试
结果如下
[root@localhost logstash-5.3.1]# /usr/local/logstash-5.3.1/bin/logstash -e 'input { stdin { } } output { stdout {codec=> rubydebug} }'Sending Logstash's logs to /usr/local/logstash-5.3.1/logs which is now configured via log4j2.properties[2018-04-27T22:49:59,008][INFO ][logstash.pipeline ] Starting pipeline {"id"=>"main", "pipeline.workers"=>1, "pipeline.batch.size"=>125, "pipeline.batch.delay"=>5, "pipeline.max_inflight"=>125}[2018-04-27T22:49:59,124][INFO ][logstash.pipeline ] Pipeline main startedThe stdin plugin is now waiting for input:[2018-04-27T22:49:59,255][INFO ][logstash.agent ] Successfully started Logstash API endpoint {:port=>9600}#此处进行日志输入,测试结果会即时显示
3、创建配置文件
[root@localhost logstash-5.3.1]#vi config/logstash-simple.confinput {stdin { } }output {stdout { codec=> rubydebug } }#这是最简单配置文件包含输入输出管道未指定elasticsearch也未指定输入输出格式,过滤
#####附(服务器配置详解):
input{#输入管道file {path=> ["/springcloud/log/*","/springcloud/log/back_stage_management_web/*"]#采集路径type=>"log"codec=> multiline {pattern=>"^\["# [开头匹配negate=>true#为true表示条件执行what=>"previous"#紧随上条日志合并} #匹配条件,理论上写在filter管道,据说有可能影响性能所有做在输入管道start_position =>"beginning"} }filter{ grok {#匹配函数match=> {"message"=>"%{COMBINEDAPACHELOG} %{LOGLEVEL:level} %{GREEDYDATA:msg}"# logstash默认的过滤}remove_field=>"message"} }output{ elasticsearch {hosts=> ["192.168.1.220:9200"]# elasticsearch指定index=>"[providerlog-]%{+YYYY.MM.dd}"#用于elasticsearch区分服务器的索引,也用于检索日志action=>"index"document_type=>"springcloud_logs"} }
4、启动测试
[root@localhost logstash-5.3.1]#/usr/local/logstash-5.3.1/bin/logstash -f /usr/local/logstash-5.3.1/config/logstash-simple.conf
5、开启服务,执行如下命令:
[root@localhost logstash-5.3.1]#/usr/local/logstash-5.3.1/bin/logstash -f /usr/local/logstash-5.3.1/config/logstash-simple.conf & # &表示后台运行
6、我们可以使用 curl 命令发送请求来查看 ES 是否接收到了数据:
[root@localhostlogstash-5.3.1]#curl"elasticsearch ip:port"
1.解压资源包
[root@localhost kibana-5.3.1]# tar -zxvf /usr/local/src/kibana-5.3.1-linux-x86_64.tar.gz -C /usr/local/
2.配置kibana
[root@localhost kibana-5.3.1]# vi /usr/local/kibana-5.3.1/config/kibana.yml #编辑kibana.yml配置文件增加以下参数:server.port:5601#开启默认端口5601server.host:"kibana服务器ip"#站点地址elasticsearch.url:http://elasticsearch ip:9200#指向elasticsearch服务的ip地址kibana.index:“.kibana”
3.启动 执行以下命令启动:
[root@localhost kibana-5.3.1]#/usr/local/kibana-5.3.1-linux-x86_64/bin/kibana &
4.测试浏览器访问 访问:http://kibana服务器ip:5601
####执行了kibana-4.5.2-linux-x64/bin/kibana &命令后,不使用ctrl+c去退出日志, ####而是使用exit;这样即使关闭了shell窗口kibana服务也不会挂了。