Author:殷帅鹏
Creation Date:2021-12-08
Update Notes:
- 暂无
参考地址 https://www.elastic.co/cn/
2022-02-17: 培训结束
# 部署篇
# 部署之Filebeat篇
Filebeat.yml 配置介绍
# ============================== Filebeat inputs ===============================
name: beat212 ## 服务标识
filebeat.inputs: ## input 设置
- type: log
enabled: true
paths:
- /data/app/Lowaniot/log/lowaniotserver-info.log
- /data/app/Lowaniotaccessor/log/P022-info.log
- /data/app/P005/log/P005-info.log
fields:
filetype: javalog ## 添加字段、输出文档时携带此字段,在后续处理中利于分别处理
fields_under_root: true
- type: log
enabled: true
paths:
- /data/app/Lowaniotshadows/log/info/shadows-info.log
- /data/app/Lowaniotshadows/log/error/shadows-error.log
- /data/app/Lowaniotshadows/log/debug/shadows-debug.log
- /data/app/Lowaniotbridge/log/info/bridge-info.log
- /data/app/Lowaniotbridge/log/error/bridge-error.log
- /data/app/Lowaniotbridge/log/debug/bridge-debug.log
- /data/app/Lowaniotns/log/info/ns-info.log
- /data/app/Lowaniotns/log/error/ns-error.log
- /data/app/Lowaniotns/log/debug/ns-debug.log
fields:
filetype: golog
fields_under_root: true
# ============================== Filebeat modules ==============================
filebeat.config.modules: ## filebeat 加载配置文件设置
path: ${path.config}/modules.d/*.yml
reload.enabled: false
# ------------------------------ Logstash Output -------------------------------
output.logstash: ## 将收集到的日志文档输出到logstash中
# The Logstash hosts
hosts: ["ip:5045","ip:5045"] ## logstash 集群地址
loadbalance: true ## logstash集群是无状态的,需手动开启负载均衡
# ================================= Processors =================================
processors:
- add_host_metadata:
when.not.contains.tags: forwarded
- add_cloud_metadata: ~
- add_docker_metadata: ~
- add_kubernetes_metadata: ~
# ============================= X-Pack Monitoring ==============================
monitoring.enabled: true ## 打开X-pack监控,将状态信息发送到elasticsearch中,后续可在kibana中监控filebeat状态
monitoring.elasticsearch:
hosts: ["http://ip:9200","http://ip:9200","http://ip:9200"] ## elasticsearch 集群地址
username: elastic
password: password
Filebeat启动实例:
./bin/filebeat -e -c ./bin/filebeat.yml
# 部署之Logstash篇
Logstash.yml 配置介绍
http.host: "0.0.0.0"
node.name: "logstash212" ## 服务标识
pipeline.workers: 8 ## 工作通道 与服务器核心数保持一致
pipeline.batch.size: 2000 ## 通道批处理大小,可经过调试大小,找到服务器最合适值
pipeline.batch.delay: 10
pipeline.ordered: auto
xpack.monitoring.enabled: true ## 打开X-pack监控,将状态信息发送到elasticsearch中,后续可在kibana中监控logstash状态
xpack.monitoring.elasticsearch.username: elastic
xpack.monitoring.elasticsearch.password: password
xpack.monitoring.elasticsearch.hosts: ["http://ip:9200","http://ip:9200","http://ip:9200"]
jvm.options 配置
Logstash启动实例
./bin/logstash -f ./config/custom.conf
# 部署之Elasticsearch篇
# 下载地址
[root@node23 elastic]# wget https://artifacts.elastic.co/downloads/elasticsearch/elasticsearch-7.15.1-linux-x86_64.tar.gz
[root@node23 elastic]# wget https://artifacts.elastic.co/downloads/kibana/kibana-7.15.1-linux-x86_64.tar.gz
[root@node23 elastic]# wget https://artifacts.elastic.co/downloads/logstash/logstash-7.15.1-linux-x86_64.tar.gz
# 解压文件
# 创建用户
[root@node23 elastic]# useradd elastic
# 文件夹授权
[root@node23 elastic]# chown -R elastic:elastic /data/tools/elastic/
# 修改系统配置文件
[root@node23 elastic]# vim /etc/security/limits.conf
* soft nofile 65536
* hard nofile 65536
[root@node23 elastic]# vim /etc/sysctl.conf
# elastic setting
vm.max_map_count=262144
[root@node23 elastic]# sysctl -p
vm.max_map_count = 262144
elasticsearch.yml 配置介绍
# ======================== Elasticsearch Configuration =========================
# ---------------------------------- Cluster -----------------------------------
cluster.name: myelk
#
# ------------------------------------ Node ------------------------------------
node.name: node-157
node.data: true ## 数据节点
node.master: true ## 主节点
node.ingest: true ## 协调节点
# ----------------------------------- Paths ------------------------------------
path.data: /data/tools/elastic/elasticsearch-7.15.1/data
path.logs: /data/tools/elastic/elasticsearch-7.15.1/logs
#
# ---------------------------------- Network -----------------------------------
# 允许所有ip可以连接elasticsearch
network.host: 0.0.0.0
http.port: 9200
# --------------------------------- Discovery ----------------------------------
discovery.zen.ping.unicast.hosts: ["192.168.20.155","192.168.20.156","192.168.20.157"] ## 单播模式,只需要填写ip即可
cluster.initial_master_nodes: ["node-155", "node-156","node-157"]
# --------------------------------- Discovery 可选----------------------------------
discovery.zen.ping_timeout: 120s ## 心跳超时时间
discovery.zen.fd.ping_interval: 120s ## 节点检测时间
discovery.zen.fd.ping_timeout: 120s ## ping 超时时间
discovery.zen.fd.ping_retries: 6 ## 心跳重试次数
discovery.zen.minimum_master_nodes: 2 ##
#----------------------------------- Custom 可选-----------------------------------
thread_pool.write.queue_size: 10000 ## 加大线程写入队列
thread_pool.write.size: 9 ## 服务器核心数+1
indices.memory.index_buffer_size: 30%
# 跨域问题
http.cors.enabled: true
http.cors.allow-origin: "*"
Elasticsearch启动实例
# 切换用户
[root@node23 elastic]# su - elastic
# 后台运行
[root@node23 elastic]# ./bin/elasticsearch -d
安全认证设置
# 生成证书 cd到bin目录下
[root@node23 bin]# ./elasticsearch-certutil cert -out /data/tools/elastic/elasticsearch-7.15.1/config/elastic-certificates.p12 -pass ""
# 将该文件复制到集群其他节点的config目录下
[root@node23 config]# scp elastic-certificates.p12 root@192.168.20.155:/data/tools/elastic/elasticsearch-7.15.1/config
[root@node23 config]# scp elastic-certificates.p12 root@192.168.20.156:/data/tools/elastic/elasticsearch-7.15.1/config
配置elasticsearch.yml文件(所有的集群节点都要配置)
# 认证问题
xpack.security.enabled: true
xpack.security.transport.ssl.enabled: true
xpack.security.transport.ssl.verification_mode: certificate
xpack.security.transport.ssl.keystore.path: elastic-certificates.p12 # 文件地址
xpack.security.transport.ssl.truststore.path: elastic-certificates.p12
重启Elasticsearch集群
设置访问密码
# cd到elasticsearch目录下的bin目录执行
# 手动设置访问密码
[root@node23 bin]# ./elasticsearch-setup-passwords interactive
# 自动设置访问密码--建议选择
[root@node23 bin]# ./elasticsearch-setup-passwords auto
# 执行完毕出现密码,复制收藏
Elasticsearch数据备份
# 安装node.js
[root@node23 tools]# wget https://nodejs.org/dist/v14.17.4/node-v14.17.4-linux-x64.tar.xz
# 解压安装包
[root@node23 tools]# tar -xf node-v14.17.4-linux-x64.tar.xz
# 配置环境变量
[root@node23 node-v14.17.4]# vim /etc/profile
export NODEJS_HOME=/data/tools/node-v14.17.4
export PATH=$NODEJS_HOME/bin:$PATH
# 确认环境变量
[root@node23 node-v14.17.4]# source /etc/profile
[root@node23 node-v14.17.4]# node -v
v14.17.4
[root@node23 node-v14.17.4]# npm -v
6.14.14
# 安装
[root@node23 tools]# npm install elasticdump
# 进入目录
[root@node23 tools]# cd /data/tools/node_modules/elasticdump/bin
# 备份全部 --- 速度过慢 不建议.
[root@node23 bin]# ./elasticdump --input=http://user:password@175.178.39.22:9200 --output=http://user:password@192.168.20.157:9200
# 部署之Kibana篇
# 下载地址
[root@node23 elastic]# wget https://artifacts.elastic.co/downloads/kibana/kibana-7.15.1-linux-x86_64.tar.gz
# 文件夹授权
[root@node23 elastic]# chown -R elastic:elastic /data/tools/elastic/
Kibana.yml 配置介绍
server.host: "0.0.0.0"
elasticsearch.hosts: ["http://192.168.20.155:9200","http://192.168.20.156:9200","http://192.168.20.157:9200"]
elasticsearch.username: "elastic"
elasticsearch.password: "password"
i18n.locale: "zh-CN"
Kibana启动实例
[root@node23 kibana]# nohup ./bin/kibana &