简介

**Kafka架构原理:**https://mp.weixin.qq.com/s/rXDh7VHSbSSwUNhadtRNbA
**Logstash 最佳实践:**https://doc.yonyoucloud.com/doc/logstash-best-practice-cn/index.html


真机部署

环境 centos 7.4 ,ELK 6 ,单节点

服务端:
Logstash 收集,过滤
Elasticsearch 存储,索引日志
Kibana 可视化
客户端:
filebeat 监控、转发,作为agent
filebeat-->Logstash-->Elasticsearch-->Kibana

服务端

安装

基本配置
时间同步;关闭selinux;内核优化;防火墙端口

# 内核
echo '
* hard nofile 65536
* soft nofile 65536
* soft nproc  65536
* hard nproc  65536
'>>/etc/security/limit.conf

echo '
vm.max_map_count = 262144
net.core.somaxconn=65535
net.ipv4.ip_forward = 1
'>>/etc/sysctl.conf
sysctl -p


# 防火墙
firewall-cmd --permanent --add-port={9200/tcp,9300/tcp,5044/tcp,5601/tcp}  
firewall-cmd --reload
frewall-cmd  --list-all

# 安装rpm or yum(推荐)
#下载rpm安装
# 可以下载tar或者rpm包安装
# 官网 https://www.elastic.co/downloads
# 中文 https://www.elastic.co/cn/products
# 下载rpm包
https://artifacts.elastic.co/downloads/elasticsearch/elasticsearch-6.2.2.rpm
https://artifacts.elastic.co/downloads/logstash/logstash-6.2.2.rpm
https://artifacts.elastic.co/downloads/kibana/kibana-6.2.2-x86_64.rpm
https://artifacts.elastic.co/downloads/beats/filebeat/filebeat-6.2.2-x86_64.rpm

# yum安装
rpm --import https://artifacts.elastic.co/GPG-KEY-elasticsearch
echo '
[elk-6]
name=elk-6
baseurl=https://artifacts.elastic.co/packages/6.x/yum
gpgcheck=1
gpgkey=https://artifacts.elastic.co/GPG-KEY-elasticsearch
enabled=1
'>/etc/yum.repos.d/elk.repo

# 安装JDK, elasticsearch需要java环境
yum install java-1.8.0-openjdk elasticsearch logstash kibana filebeat -y


配置

elasticsearch配置

# 查看配置
rpm -qc elasticsearch
grep -v '^#' /etc/elasticsearch/elasticsearch.yml
cp /etc/elasticsearch/elasticsearch.yml{,.bak}

# 更改配置
echo '
path.data: /var/lib/elasticsearch
path.logs: /var/log/elasticsearch
cluster.name: ELK
node.name: elk.novalocal
network.host: 0.0.0.0
http.port: 9200
discovery.zen.ping.unicast.hosts: ["本机IP:9300"]
discovery.zen.minimum_master_nodes: 1
'>/etc/elasticsearch/elasticsearch.yml

# 修改配置后
systemctl daemon-reload

# 启动
systemctl enable  elasticsearch
systemctl restart elasticsearch

# check
systemctl status elasticsearch
netstat -nltp | grep java
curl -X GET http://localhost:9200

logstash配置
input :数据输入
filter:数据转化,过滤,分析
output:数据输出

# 查看配置
rpm -qc logstash
egrep -v '^#|^$' /etc/logstash/logstash.yml
cp /etc/logstash/logstash.yml{,.bak}

echo 'path.config: /etc/logstash/conf.d'>>/etc/logstash/logstash.yml

# 添加一个日志处理文件
# filebeat->logstash->elasticsearch
echo '
input {

 #收集本地log#
  file {
     type => "logtest"
     path => "/var/log/logtest.txt"
     start_position => "beginning"
  }

 #filebeat客户端#
  beats {
     port => 5044
  }

}

 #筛选
 #filter { }

output {

#标准输出,调试使用#
  stdout {
   codec => rubydebug { }
  }

# 输出到es#
  elasticsearch {
    hosts => ["http://本机IP(es所在主机IP):9200"]
    index => "%{type}-%{+YYYY.MM.dd}"
  }

}
'>/etc/logstash/conf.d/logstash-01.conf


# 调试、测试配置(可选)
# 检测配置  
/usr/share/logstash/bin/logstash -f /etc/logstash/conf.d/logstash-01.conf --config.test_and_exit
# 生成测试log
echo $(date +"%F-%T") log-test >>/var/log/logtest.txt
# 启动,查看生成日志
/usr/share/logstash/bin/logstash -f /etc/logstash/conf.d/logstash-01.conf


# 启动
systemctl enable logstash
systemctl restart  logstash

# check
sleep 20
systemctl status logstash
netstat -nltp | grep java

kibana配置

# 配置 
rpm -qc kibana
cp /etc/kibana/kibana.yml{,.bak}
grep -v '^#' /etc/kibana/kibana.yml.bak

echo '
server.port: 5601
server.host: "0.0.0.0"
# ES的url的一个ES节点#
# elasticsearch.url: "http://(ES所在IP):9200"
elasticsearch.url: "http://localhost:9200"
kibana.index: ".kibana"
# kibana.defaultAppId: "home"
'>/etc/kibana/kibana.yml

# 启动
systemctl enable  kibana
systemctl restart kibana

# check
systemctl status kibana
netstat -nltp | grep node

# 防火墙对外开放tcp/5601
# 浏览器访问 ip:5601

浏览器访问kabana设置

首次打开,需要添加索引模式
Management(管理)-->Index Patterns(索引模式)-->Create index pattern(创建索引模式)
填写*(索引名)-->Next step-->选择如:@timestamp-->Create index pattern ,完成

Index pattern 下面填写logstash配置的名称如type => "logs"填写logs


汉化

[[ -f /usr/bin/git ]] || { echo 'install git';yum install -y git &>/dev/null; }
git clone https://github.com/anbai-inc/Kibana_Hanization.git
cd Kibana_Hanization
python main.py /usr/share/kibana

# 重启kibana
systemctl restart kibana

客户端

filebeat配置 (轻量客户端)

yum install -y filebeat

#查看配置
rpm -qc filebeat
egrep -v '#|^$' /etc/filebeat/filebeat.yml
cp /etc/filebeat/filebeat.yml{,.bak}

# 收集nginx日志试列
# 安装nginx(只是为了收集nginx日志,如果是其他日志则不用安装)
rpm -Uvh http://nginx.org/packages/centos/7/noarch/RPMS/nginx-release-centos-7-0.el7.ngx.noarch.rpm
yum install -y nginx
systemctl start nginx
curl localhost

# 查看nginx日志
tail /var/log/nginx/access.log

# 配置filebeat收集nginx日志
echo '#filebeat#
filebeat.prospectors:
# nginx 设置nginx的日志路径,作为输入源
- input_type: log
  enable: yes
  #tags: nginx-access
  paths:
    - /var/log/nginx/access.log
  exclude_lines: ["^$"]
  fields:
    type: "nginx-access"
  fields_under_root: true

output.logstash:
  hosts: ["localhost:5044"]
  #hosts: ["(填写服务端Logstash所在IP):5044"]
  #index: filebeat
'>/etc/filebeat/filebeat.yml


# 启动
systemctl enable  filebeat
systemctl restart filebeat
systemctl status  filebeat

其他(可选)

在kibana查看日志

# logstash使用grok过滤nginx日志
nginx日志有main和log_json两种,默认为main普通文本格式
ELK存储为json格式,文本格式华,拆分出如ip地址、访问agent等,便于后续使用

# nginx默认日志格式
    log_format  main  '$remote_addr - $remote_user [$time_local] "$request" '
                      '$status $body_bytes_sent "$http_referer" '
                      '"$http_user_agent" "$http_x_forwarded_for"';

# curl localhost生成日志格式如下
# 127.0.0.1 - - [22/Mar/2018:18:37:37 +0800] "GET / HTTP/1.1" 200 612 "-" "curl/7.29.0" "-"

# logstash配置,使用grok过滤nginx日志
grok使用的正则表达式在grok-patterns文件
可以引用或添加自定义规则
Grok=$(find / -name grok-patterns)
echo $Grok
# /usr/share/logstash/vendor/bundle/jruby/2.3.0/gems/logstash-patterns-core-4.1.2/patterns/grok-patterns

# 创建nginx正则表达式(引用grok正则)  
echo '#nginx-access
WZ ([^ ]*)
NGINXACCESS %{IP:remote_ip} \- \- \[%{HTTPDATE:timestamp}\] "%{WORD:method} %{WZ:request} HTTP/%{NUMBER:httpversion}" %{NUMBER:status} %{NUMBER:bytes} %{QS:referer} %{QS:agent} %{QS:xforward}
'>/etc/logstash/conf.d/nginx-access

# 重新生成logstash配置文件  
echo '
input {

 #收集本地log#
  file {
     type => "logtest"
     path => "/var/log/logtest.txt"
     start_position => "beginning"
  }

 #filebeat客户端#
  beats {
     port => 5044
  }

}

 # #筛选
filter {

# 如果是nginx访问日志
  if ( [type] == "nginx-access" ) {

    #按字段切割
    grok { 
      patterns_dir=>"/etc/logstash/conf.d/nginx-access"
      match => { "message" => "%{NGINXACCESS}" }
      }

    # 时间格式转换
    date {
      match => [ "timestamp", "dd/MMM/YYYY:HH:mm:ss Z" ]
      }

    # 删除不需要的字段
    mutate { 
      remove_field => [ "offset", "@version", "beat", "input_type", "tags","id"]
      }
    }
}

output {

#标准输出,调试使用#
  stdout {
   codec => rubydebug { }
  }

# 输出到es#
  elasticsearch {
    hosts => ["http://(ES所在IP):9200"]
    index => "%{type}-%{+YYYY.MM.dd}"
  }

}
'>/etc/logstash/conf.d/logstash-01.conf

# 检测配置
/usr/share/logstash/bin/logstash -t -f /etc/logstash/conf.d/logstash-01.conf 

# 调试logstash,在终端启动查看
systemctl stop  logstash
/usr/share/logstash/bin/logstash -f /etc/logstash/conf.d/logstash-01.conf

# 访问nginx产生日志,在elasticsearch-head或者kabana查看nginx日志。
# logstash配置文件可拆分为多个,按input、filter、output类型+序列号指定优先级

elasticsearch调试工具(可选)

# elasticsearch安装head插件
# 安装NodeJS (epel源)  
yum install -y nodejs

# 安装npm(使用淘宝cnpm) 
npm install -g cnpm --registry=https://registry.npm.taobao.org

# 使用npm安装grunt  
npm install -g grunt

# 安装elasticsearch-head
# GitHub:https://github.com/mobz/elasticsearch-head
mkdir /opt/head
cd /opt/head
git clone git://github.com/mobz/elasticsearch-head.git
cd elasticsearch-head
npm install

# 启动
npm run start &

# 配置elasticsearch访问
echo '#elasticsearch-head
http.cors.enabled: true
http.cors.allow-origin: "*"
'>>/etc/elasticsearch/elasticsearch.yml

# 重启elasticsearch
systemctl restart elasticsearch

# 浏览器访问9100端口 
http://ip:9100/
# 出现“未连接”,请修改/etc/elasticsearch/elasticsearch.yml中的localhost为ip地址

参考:http://www.cnblogs.com/elvi/p/8654021.html



Docker部署

环境 Centos 7.4 , Docker version 17.12
Docker至少3GB内存

服务端

内核配置

echo '
vm.max_map_count = 262144
net.core.somaxconn=65535
'>>/etc/sysctl.conf
sysctl -p

创建elk

#下载elk镜像  官网ELK:https://hub.docker.com/r/sebp/elk/
docker pull sebp/elk

# 创建volume(推荐)  
docker volume create elk-es
docker volume ls

# 创建elk容器  
docker run -dit --name elk \
  -p 5601:5601 -p 9200:9200 -p 5044:5044 \
  -v elk-es:/var/lib/elasticsearch \
  -v /etc/localtime:/etc/localtime \
  sebp/elk 


#保持时区一致-v /etc/localtime:/etc/localtime
#内存限制 -e ES_MIN_MEM=1G -e ES_MAX_MEM=3G


# 查看
docker ps -l  
#访问测试  
curl localhost:9200
curl localhost:5601
#浏览器访问kabana  ip:5601 

# logstash 客户端访问配置
#logstash配置文件目录 /etc/logstash/conf.d/
#关闭logstash的ssl验证(生产环境建议使用自签证书)
docker exec -it elk sed -i 's/ssl/#ssl/' /etc/logstash/conf.d/02-beats-input.conf

#重启ELK容器
docker restart elk

首次打开,需要添加索引
Management(管理)-->Index Patterns(索引模式)-->Create index pattern(创建索引模式)
填写filebeat-* (索引名)-->Next step-->选择如:@timestamp-->Create index pattern ,完成

elk镜像自带nginx日志切割实例文件
/opt/logstash/patterns/nginx
/etc/logstash/conf.d/11-nginx.conf

docker环境 快速使用elasticsearch-head插件:http://www.cnblogs.com/elvi/p/8664941.html


调试、测试(可选)

# 进入elk容器
docker exec -it elk /bin/bash

# 安装网络工具net-tools
apt install net-tools -y

# 查看启动端口
netstat -lntp

# logstash检测配置
/opt/logstash/bin/logstash -t -f /opt/logstash/config/logstash.yml

# 终端启动
service logstash stop
/opt/logstash/bin/logstash -f /opt/logstash/config/logstash.yml

# 进入filebeat-nginx容器  
docker exec -it filebeat-nginx /bin/bash

# filebeat调试
systemctl stop filebeat
/usr/share/filebeat/bin/filebeat -configtest -c /etc/filebeat/filebeat.yml
/usr/share/filebeat/bin/filebeat -e -c /etc/filebeat/filebeat.yml -d "publish"

汉化

查看ELK Dockerfile文档得知系统基于ubuntu:16

# 进入elk容器
docker exec -it elk /bin/bash

# 配置国内源
echo 'deb http://mirrors.aliyun.com/ubuntu/ xenial xenial-updates main universe restricted multiverse'>>/etc/apt/sources.list
echo 'deb-src http://mirrors.aliyun.com/ubuntu/ xenial xenial-updates main restricted multiverse universe'>>/etc/apt/sources.list
apt-get update 

# 安装git
apt install git -y
git --version

# 安装python
apt install python -y
python -V

# 汉化kibana
cd /opt
git clone https://github.com/anbai-inc/Kibana_Hanization.git
cd Kibana_Hanization
python main.py /opt/kibana

# 重启kibana
service kibana restart

Ctrl+D快捷键退出容器


客户端

使用centos+nginx+filebeat进行测试

# 下载centos镜像
docker pull centos


# 创建filebeat配置文件
echo '#filebeat#
filebeat.prospectors:
#nginx 设置nginx的日志路径输入filebeat
- input_type: log
  enable: yes
  #tags: nginx-access
  paths:
    - /var/log/nginx/access.log
  exclude_lines: ["^$"]
  fields:
    type: "nginx-access"
  fields_under_root: true
#logstash  IP+端口
output.logstash:
  hosts: ["elk:5044"]
'>filebeat.yml


# 创建Dockerfile文档,自定义镜像,安装filebeat、nginx

echo '
FROM centos

MAINTAINER Leo <842632422@qq.com> && \
ENV TZ "Asia/Shanghai" && \
ENV TERM xterm

# use aliyun source,and install #
RUN curl -s http://mirrors.aliyun.com/repo/Centos-7.repo>/etc/yum.repos.d/CentOS-Base.repo && \
 curl -s http://mirrors.aliyun.com/repo/epel-7.repo>/etc/yum.repos.d/epel.repo && \
 sed -i "/aliyuncs.com/d" /etc/yum.repos.d/*.repo && \
 yum install -y net-tools tar && \
 rm -rf  /var/cache/yum/* /tmp/* /var/tmp/* /root/*.cfg

# install filebeat and add yml.
ENV FILEBEAT_VERSION=6.2.3
RUN rpm -Uvh https://artifacts.elastic.co/downloads/beats/filebeat/filebeat-${FILEBEAT_VERSION}-x86_64.rpm && \
 systemctl enable filebeat.service 
ADD filebeat.yml /etc/filebeat/filebeat.yml

# install nginx
RUN rpm -ivh http://nginx.org/packages/centos/7/noarch/RPMS/nginx-release-centos-7-0.el7.ngx.noarch.rpm && \
 yum install nginx -y && \
 systemctl enable nginx.service && \
 yum clean all

EXPOSE 80

ENTRYPOINT ["/usr/sbin/init"]
'>Dockerfile


# 创建镜像filebeat-nginx
docker build -t filebeat-nginx .

# 查看镜像
docker images

# 创建容器filebeat-nginx
docker run --privileged -dit --name filebeat-nginx \
  --link elk -p 82:80  filebeat-nginx
#坑:centos镜像使用--privileged参数,启动/usr/sbin/init ,才可使用systemctl管理服务

#查看 浏览器访问ip:82 能访问nginx页面
docker ps -l
netstat -lntp |grep 82
docker exec -it filebeat-nginx netstat -lntp
curl localhost:82

其他

使用curl命令操作Elasticsearch索引

# 使用curl命令操作Elasticsearch索引
# 查询索引
curl 'localhost:9200/_cat/indices?v'

# 创建索引test-index
curl -XPUT 'localhost:9200/test-index?pretty'

# 删除索引
curl -XDELETE 'localhost:9200/test-index'

参考:
https://www.cnblogs.com/lovelinux199075/p/9101631.html
http://www.cnblogs.com/elvi/p/8654222.html
Docker ELK文档:http://elk-docker.readthedocs.io
https://github.com/spujadas/elk-docker
https://github.com/spujadas/elk-docker/blob/master/nginx-filebeat/Dockerfile
亿级 ELK 日志平台构建实践:http://blog.51cto.com/13527416/2117141
http://www.cnblogs.com/kevingrace/p/5919021.html



Logstash日志收集

https://mp.weixin.qq.com/s/CrmDAkmoHWaC1uKDML0sDw


Logrotate日志管理

日志切割
http://www.cnblogs.com/kevingrace/p/6307298.html

文章作者: Leo
版权声明: 本站所有文章除特别声明外,均采用 CC BY-NC-SA 4.0 许可协议。转载请注明来自 LeoLan的小站
环境搭建 ELK 日志 Elasticsearch
喜欢就支持一下吧