Настраиваем отказоустойчивый ELK кластер, состоящий из трёх master нодов.
Конфигурация хостов:
kibana: 192.168.0.150
elasticsearch01: 192.168.0.151
elasticsearch02: 192.168.0.152
elasticsearch03: 192.168.0.153
nginx: 192.168.0.190
Версия ПО:
CentOS 8.2
Kibana: 7.10.1
ElasticSearch: 7.10.1
Filebeat: 7.10.1
Filebeat: 7.10.1
1. Обновляем пакеты на всех хостах и устанавливаем нужные пакеты.
yum update
yum install wget -y
2. Скачиваем нужную версию elasticsearch и kibana
wget https://artifacts.elastic.co/downloads/elasticsearch/elasticsearch-7.10.1-x86_64.rpm
wget https://artifacts.elastic.co/downloads/kibana/kibana-7.10.1-x86_64.rpm
3. Устанавливаем kibana
[root@kibana ~]# rpm -ivh kibana-7.10.1-x86_64.rpm
4. Устанавливаем elasticsearch на трёх хостах.
rpm -ivh elasticsearch-7.10.1-x86_64.rpm
5. Генерируем сертификат на хосте ElasticSearch01 для защищенного соединения между нодами elasticsearch
/usr/share/elasticsearch/bin/elasticsearch-certutil cert
В итоге появится файл /usr/share/elasticsearch/elastic-certificates.p12
6. Поправляем права на файл перемещаем в корневую папку Elasticsearch
chown root:elasticsearch elastic-certificates.p12
chmod 644 elastic-certificates.p12
cp /usr/share/elasticsearch/elastic-certificates.p12 /etc/elasticsearch
cp /usr/share/elasticsearch/elastic-certificates.p12 /etc/elasticsearch
7. Копируем сертификат на остальные ноды elasticsearch
scp -r elastic-certificates.p12 root@192.168.0.152:/etc/elasticsearchscp -r elastic-certificates.p12 root@192.168.0.153:/etc/elasticsearch
8. Генерируем пароли
/usr/share/elasticsearch/bin/elasticsearch-setup-passwords auto
9. Приводим конфиги для сервисов.
[root@es01 elasticsearch]#
cluster.name: localhost-cluster
node.name: Node-1
node.data: true
node.attr.rack: r1
path.data: /var/lib/elasticsearch
path.logs: /var/log/elasticsearch
network.host: 192.168.0.151
http.port: 9200
http.max_content_length: 2000mb
discovery.seed_hosts: ["192.168.0.151", "192.168.0.152", "192.168.0.153"]
cluster.initial_master_nodes: ["192.168.0.151", "192.168.0.152", "192.168.0.153"]
xpack.security.enabled: true
xpack.security.transport.ssl.enabled: true
xpack.security.transport.ssl.verification_mode: certificate
xpack.security.transport.ssl.keystore.path: elastic-certificates.p12
xpack.security.transport.ssl.truststore.path: elastic-certificates.p12
[root@es02 elasticsearch]#
cluster.name: localhost-cluster
node.name: Node-2
node.data: true
node.attr.rack: r2
path.data: /var/lib/elasticsearch
path.logs: /var/log/elasticsearch
network.host: 192.168.0.152
http.port: 9200
http.max_content_length: 2000mb
discovery.seed_hosts: ["192.168.0.151", "192.168.0.152","192.168.0.153"]
cluster.initial_master_nodes: ["192.168.0.151", "192.168.0.152", "192.168.0.153"]
xpack.security.enabled: true
xpack.security.transport.ssl.enabled: true
xpack.security.transport.ssl.verification_mode: certificate
xpack.security.transport.ssl.keystore.path: elastic-certificates.p12
xpack.security.transport.ssl.truststore.path: elastic-certificates.p12
[root@es03 elasticsearch]#
cluster.name: localhost-cluster
node.name: Node-3
node.data: true
node.attr.rack: r3
path.data: /var/lib/elasticsearch
path.logs: /var/log/elasticsearch
network.host: 192.168.0.153
http.port: 9200
http.max_content_length: 2000mb
discovery.seed_hosts: ["192.168.0.151", "192.168.0.152","192.168.0.153"]
cluster.initial_master_nodes: ["192.168.0.151", "192.168.0.152", "192.168.0.153"]
xpack.security.enabled: true
xpack.security.transport.ssl.enabled: true
xpack.security.transport.ssl.verification_mode: certificate
xpack.security.transport.ssl.keystore.path: elastic-certificates.p12
xpack.security.transport.ssl.truststore.path: elastic-certificates.p12
[root@kibana kibana]#
server.port: 5601
server.host: "192.168.0.150"
xpack.reporting.csv.maxSizeBytes: 2048576000
xpack.reporting.queue.timeout: 4920000
xpack.reporting.kibanaServer.protocol: http
xpack.reporting.kibanaServer.hostname: 192.168.0.150
xpack.security.enabled: true
#xpack.security.encryptionKey: tTsjKOS96toodtATD0Dp7XOmjAqtcCXT
server.name: "localhost-cluster"
elasticsearch.hosts: ["http://192.168.0.151:9200", "http:/192.168.0.152:9200", "http://192.168.0.153:9200"]
logging.verbose: true
elasticsearch.username: "kibana"
elasticsearch.password: "your_generated_password"
10. На всех хостах службы включаем в автозагрузку и запускаем сервисы.
systemctl enable kibana && systemctl start kibana
systemctl enable elasticsearch && systemctl start elasticsearch
11. Проверяем кол-во нод в кластере, как видим все ОК.
curl -u elastic:your_generated_password -X GET "192.168.0.151:9200/_cluster/health?pretty"
{
"cluster_name" : "localhost-cluster",
"status" : "green",
"timed_out" : false,
"number_of_nodes" : 3,
"number_of_data_nodes" : 3,
"active_primary_shards" : 10,
"active_shards" : 20,
"relocating_shards" : 0,
"initializing_shards" : 0,
"unassigned_shards" : 0,
"delayed_unassigned_shards" : 0,
"number_of_pending_tasks" : 0,
"number_of_in_flight_fetch" : 0,
"task_max_waiting_in_queue_millis" : 0,
"active_shards_percent_as_number" : 100.0
}
12. Заходим в Web UI Kibana
13. Устанавливаем сборщик логов filebeat на сервере nginx
wget https://artifacts.elastic.co/downloads/beats/filebeat/filebeat-7.10.1-x86_64.rpm
rpm -ivh filebeat-7.10.1-x86_64.rpm
service filebeat start
14. Конфиг filebeat в /etc/filebeat/filebeat.yml приводим в такой вид.
filebeat.inputs:
- type: log
enabled: true
paths:
- /var/log/nginx/access.log
filebeat.config.modules:
path: ${path.config}/modules.d/*.yml
reload.enabled: false
setup.template.settings:
index.number_of_shards: 1
index.codec: best_compression
setup.template.name: "nginx-test"
setup.template.pattern: "nginx-test-*"
setup.ilm.enabled: false
#-------------------------- Elasticsearch output -------------------
output.elasticsearch:
# Array of hosts to connect to.
hosts: ["192.168.0.1511:9200", "192.168.0.152:9200", "192.168.0.153:9200"]
# Protocol - either `http` (default) or `https`.
protocol: "http"
index: "nginx-test-%{+yyyy.MM.dd}"
username: "elastic"
password: "your_generated_password"
15. Проверяем отказоустойчивость, выключаем ноду ElasticSearch02 и запускаем curl.
--- 192.168.0.152 ping statistics ---
7 packets transmitted, 0 packets received, 100.0% packet loss
curl -u elastic:your_generated_password -X GET "192.168.0.151:9200/_cluster/health?pretty"
{
"cluster_name" : "localhost-cluster",
"status" : "green",
"timed_out" : false,
"number_of_nodes" : 2,
"number_of_data_nodes" : 2,
"active_primary_shards" : 10,
"active_shards" : 20,
"relocating_shards" : 0,
"initializing_shards" : 0,
"unassigned_shards" : 0,
"delayed_unassigned_shards" : 0,
"number_of_pending_tasks" : 0,
"number_of_in_flight_fetch" : 0,
"task_max_waiting_in_queue_millis" : 0,
"active_shards_percent_as_number" : 100.0
}
16. Как видим из скрина, после падения ноды ElasticSearch02 в небольшом промежутке времени остановилось сбор логов, затем сборка логов продолжилось.
Комментариев нет:
Отправить комментарий