ICode9

精准搜索请尝试: 精确搜索
首页 > 其他分享> 文章详细

docker部署elk

2022-06-10 17:36:02  阅读:176  来源: 互联网

标签:elk name 部署 kafka cluster 2181 docker KAFKA


参考资料地址

https://blog.csdn.net/yprufeng?type=blog

总结版本

修改系统句柄数

说明:linux系统默认的可操作句柄数是65535,es集群默认的进程句柄数需要至少为262144个,如果我们想正常启动es集群,我们需要调大这个参数。
命令:
echo vm.max_map_count=262144 >> /etc/sysctl.conf
sysctl -p

修改docker-compose容器启动时间

说明:在使用docker-compose启动多个容器时,在其默认的启动时间60s内无法全部启动完成,容器就会整个启动失败。这里我们将此参数调大到1000s。
使用vi编辑器修改系统环境变量文件/etc/profile,在文件的末尾添加俩个参数,然后更新系统参数,使新添加的参数配置生效。
参数:
export DOCKER_CLIENT_TIMEOUT=1000
export COMPOSE_HTTP_TIMEOUT=1000

创建elk-cluster.yml配置文件

#elk-cluster集群配置文件
version: "3.3"
 
services:
  #######################elasticsearch集群配置################
  es01:
    #镜像名称
    image: elasticsearch:7.12.0
    #容器名称
    container_name: elk-cluster-es01
    hostname: es01
    #开机自启动
    restart: always
    privileged: true
    #环境变量设置
    environment:
      #节点名称
      - node.name=es01
      #集群名称
      - cluster.name=elk-cluster-es
      #其它节点
      - discovery.seed_hosts=es02,es03
      - cluster.initial_master_nodes=es01,es02,es03
      - bootstrap.memory_lock=true
      #加入跨域配置
      - http.cors.enabled=true
      - http.cors.allow-origin=*
      #java堆内存大小设置
      - "ES_JAVA_OPTS=-Xms512m -Xmx512m"
      #开启读写权限
      - "TAKE_FILE_OWNERSHIP=true"
    ulimits:
      memlock:
        soft: -1
        hard: -1
    #数据卷映射
    volumes:
      - /elk/elasticsearch/01/data:/usr/share/elasticsearch/data
      - /elk/elasticsearch/01/logs:/usr/share/elasticsearch/logs
    #端口映射
    ports:
      - 9200:9200
    #网络配置
    networks:
      - elk
      
  es02:
    image: elasticsearch:7.12.0
    container_name: elk-cluster-es02
    hostname: es02
    restart: always
    privileged: true
    environment:
      - node.name=es02
      - cluster.name=elk-cluster-es
      - discovery.seed_hosts=es01,es03
      - cluster.initial_master_nodes=es01,es02,es03
      - bootstrap.memory_lock=true
      #加入跨域配置
      - http.cors.enabled=true
      - http.cors.allow-origin=*
      - "ES_JAVA_OPTS=-Xms512m -Xmx512m"
      - "TAKE_FILE_OWNERSHIP=true"
    ulimits:
      memlock:
        soft: -1
        hard: -1
    volumes:
      - /elk/elasticsearch/02/data:/usr/share/elasticsearch/data
      - /elk/elasticsearch/02/logs:/usr/share/elasticsearch/logs
    #网络配置
    networks:
      - elk
      
  es03:
    image: elasticsearch:7.12.0
    container_name: elk-cluster-es03
    hostname: es03
    restart: always
    privileged: true
    environment:
      - node.name=es03
      - cluster.name=elk-cluster-es
      - discovery.seed_hosts=es01,es02
      - cluster.initial_master_nodes=es01,es02,es03
      - bootstrap.memory_lock=true
      #加入跨域配置
      - http.cors.enabled=true
      - http.cors.allow-origin=*
      - "ES_JAVA_OPTS=-Xms512m -Xmx512m"
      - "TAKE_FILE_OWNERSHIP=true"
    ulimits:
      memlock:
        soft: -1
        hard: -1
    volumes:
      - /elk/elasticsearch/03/data:/usr/share/elasticsearch/data
      - /elk/elasticsearch/03/logs:/usr/share/elasticsearch/logs
    #端口映射
    networks:
      - elk
      
  #####################kibana配置####################################
  kibana:
    image: kibana:7.12.0
    container_name: elk-cluster-kibana
    hostname: kibana
    restart: always
    environment:
      #elasticsearch服务地址
      ELASTICSEARCH_HOSTS: "http://es01:9200"
      #kibana语言配置:en、zh-CN、ja-JP
      I18N_LOCALE: "en"
    ulimits:
      memlock:
        soft: -1
        hard: -1
    #端口映射
    ports:
      - 5601:5601
    networks:
      - elk
    depends_on:
      - es01
      - es02
      - es03
      
  #####################kibana配置####################################
  nginx:
    image: nginx:stable-alpine-perl
    container_name: elk-cluster-nginx
    hostname: nginx
    restart: always
    ulimits:
      memlock:
        soft: -1
        hard: -1
    #端口映射
    ports:
      - 80:80
    networks:
      - elk
    depends_on:
      - kibana
      
  #####################logstash配置####################################
  logstash01:
    image: logstash:7.12.0
    container_name: elk-cluster-logstash01
    hostname: logstash01
    restart: always
    environment:
      #elasticsearch服务地址
      - monitoring.elasticsearch.hosts="http://es01:9200"
    ports:
      - 9600:9600
      - 5044:5044
    networks:
      - elk
    depends_on:
      - es01
      - es02
      - es03
      
  logstash02:
    image: logstash:7.12.0
    container_name: elk-cluster-logstash02
    hostname: logstash02
    restart: always
    environment:
      #elasticsearch服务地址
      - monitoring.elasticsearch.hosts="http://es01:9200"
    ports:
      - 9601:9600
      - 5045:5044
    networks:
      - elk
    depends_on:
      - es01
      - es02
      - es03
      
  logstash03:
    image: logstash:7.12.0
    container_name: elk-cluster-logstash03
    hostname: logstash03
    restart: always
    environment:
      #elasticsearch服务地址
      - monitoring.elasticsearch.hosts="http://es01:9200"
    ports:
      - 9602:9600
      - 5046:5044
    networks:
      - elk
    depends_on:
      - es01
      - es02
      - es03
      
  #####################kafka集群相关配置####################################
  #zookeeper集群
  zk01:
    image: zookeeper:3.7.0
    restart: always
    container_name: elk-cluster-zk01
    hostname: zk01
    ports:
      - 2181:2181
    networks:
      - elk
    volumes:
      - "/elk/zookeeper/zk01/data:/data"
      - "/elk/zookeeper/zk01/logs:/datalog"
    environment:
      ZOO_MY_ID: 1
      ZOO_SERVERS: server.1=0.0.0.0:2888:3888;2181 server.2=zk02:2888:3888;2181 server.3=zk03:2888:3888;2181
    depends_on:
      - es01
      - es02
      - es03
 
  zk02:
    image: zookeeper:3.7.0
    restart: always
    container_name: elk-cluster-zk02
    hostname: zk02
    ports:
      - 2182:2181
    networks:
      - elk
    volumes:
      - "/elk/zookeeper/zk02/data:/data"
      - "/elk/zookeeper/zk02/logs:/datalog"
    environment:
      ZOO_MY_ID: 2
      ZOO_SERVERS: server.1=zk01:2888:3888;2181 server.2=0.0.0.0:2888:3888;2181 server.3=zk03:2888:3888;2181
    depends_on:
      - es01
      - es02
      - es03
 
  zk03:
    image: zookeeper:3.7.0
    restart: always
    container_name: elk-cluster-zk03
    hostname: zk03
    ports:
      - 2183:2181
    networks:
      - elk
    volumes:
      - "/elk/zookeeper/zk03/data:/data"
      - "/elk/zookeeper/zk03/logs:/datalog"
    environment:
      ZOO_MY_ID: 3
      ZOO_SERVERS: server.1=zk01:2888:3888;2181 server.2=zk02:2888:3888;2181 server.3=0.0.0.0:2888:3888;2181
    depends_on:
      - es01
      - es02
      - es03
 
  #kafka集群
  kafka01:
    image: wurstmeister/kafka:2.13-2.7.0
    restart: always
    container_name: elk-cluster-kafka01
    hostname: kafka01
    ports:
      - "9091:9092"
      - "9991:9991"
    networks:
      - elk
    depends_on:
      - zk01
      - zk02
      - zk03
    environment:
      KAFKA_BROKER_ID: 1
      KAFKA_ADVERTISED_HOST_NAME: kafka01
      KAFKA_ADVERTISED_PORT: 9091
      KAFKA_HOST_NAME: kafka01
      KAFKA_ZOOKEEPER_CONNECT: zk01:2181,zk02:2181,zk03:2181
      KAFKA_LISTENERS: PLAINTEXT://kafka01:9092
      KAFKA_ADVERTISED_LISTENERS: PLAINTEXT://192.168.1.120:9091
      JMX_PORT: 9991
      KAFKA_JMX_OPTS: "-Djava.rmi.server.hostname=kafka01 -Dcom.sun.management.jmxremote.port=9991 -Dcom.sun.management.jmxremote=true -Dcom.sun.management.jmxremote.ssl=false -Dcom.sun.managementote.ssl=false -Dcom.sun.management.jmxremote.authenticate=false"
    volumes:
      - "/elk/kafka/kafka01/:/kafka"
 
  kafka02:
    image: wurstmeister/kafka:2.13-2.7.0
    restart: always
    container_name: elk-cluster-kafka02
    hostname: kafka02
    ports:
      - "9092:9092"
      - "9992:9992"
    networks:
      - elk
    depends_on:
      - zk01
      - zk02
      - zk03
    environment:
      KAFKA_BROKER_ID: 2
      KAFKA_ADVERTISED_HOST_NAME: kafka02
      KAFKA_ADVERTISED_PORT: 9092
      KAFKA_HOST_NAME: kafka02
      KAFKA_ZOOKEEPER_CONNECT: zk01:2181,zk02:2181,zk03:2181
      KAFKA_LISTENERS: PLAINTEXT://kafka02:9092
      KAFKA_ADVERTISED_LISTENERS: PLAINTEXT://192.168.1.120.134:9092
      JMX_PORT: 9992
      KAFKA_JMX_OPTS: "-Djava.rmi.server.hostname=kafka02 -Dcom.sun.management.jmxremote.port=9992 -Dcom.sun.management.jmxremote=true -Dcom.sun.management.jmxremote.ssl=false -Dcom.sun.managementote.ssl=false -Dcom.sun.management.jmxremote.authenticate=false"
    volumes:
      - "/elk/kafka/kafka02/:/kafka"
 
  kafka03:
    image: wurstmeister/kafka:2.13-2.7.0
    restart: always
    container_name: elk-cluster-kafka03
    hostname: kafka03
    ports:
      - "9093:9092"
      - "9993:9993"
    networks:
      - elk
    depends_on:
      - zk01
      - zk02
      - zk03
    environment:
      KAFKA_BROKER_ID: 3
      KAFKA_ADVERTISED_HOST_NAME: kafka03
      KAFKA_ADVERTISED_PORT: 9093
      KAFKA_HOST_NAME: kafka03
      KAFKA_ZOOKEEPER_CONNECT: zk01:2181,zk02:2181,zk03:2181
      KAFKA_LISTENERS: PLAINTEXT://kafka03:9092
      KAFKA_ADVERTISED_LISTENERS: PLAINTEXT://192.168.1.120:9093
      JMX_PORT: 9993
      KAFKA_JMX_OPTS: "-Djava.rmi.server.hostname=kafka03 -Dcom.sun.management.jmxremote.port=9993 -Dcom.sun.management.jmxremote=true -Dcom.sun.management.jmxremote.ssl=false -Dcom.sun.managementote.ssl=false -Dcom.sun.management.jmxremote.authenticate=false"
    volumes:
      - "/elk/kafka/kafka3/:/kafka"
 
  #kafka管理工具
  'kafka-manager':
    container_name: elk-cluster-kafka-manager
    image: sheepkiller/kafka-manager:stable
    restart: always
    ports:
      - 9000:9000
    networks:
      - elk
    depends_on:
      - kafka01
      - kafka02
      - kafka03
    environment:
      KM_VERSION: 1.3.3.18
      ZK_HOSTS: zk01:2181,zk02:2181,zk03:2181
 
  #kafka监控工具
  'kafka-offset-monitor':
    container_name: elk-cluster-kafka-offset-monitor
    image: 564239555/kafkaoffsetmonitor:latest
    restart: always
    volumes:
      - /elk/kafkaoffsetmonitor/conf:/kafkaoffsetmonitor
    ports:
      - 9001:8080
    networks:
      - elk
    depends_on:
      - kafka01
      - kafka02
      - kafka03
    environment:
      ZK_HOSTS: zk01:2181,zk02:2181,zk03:2181
      KAFKA_BROKERS: kafka01:9092,kafka02:9092,kafka03:9092
      REFRESH_SECENDS: 10
      RETAIN_DAYS: 2
      
  #######################filebeat配置################
  filebeat:
    #镜像名称
    image: elastic/filebeat:7.12.0
    #容器名称
    container_name: elk-cluster-filebeat
    hostname: filebeat
    #开机自启动
    restart: always
    volumes:
      - /elk/filebeat/data:/elk/logs
    #权限设置
    privileged: true
    #用户
    user: root
    #环境变量设置
    environment:
      #开启读写权限
      - "TAKE_FILE_OWNERSHIP=true"
    ulimits:
      memlock:
        soft: -1
        hard: -1
    #网络配置
    networks:
      - elk 
    depends_on:
      - kafka01
      - kafka02
      - kafka03
      
networks:
  elk:
    driver: bridge

启动elk集群

命令:docker-compose -f elk-cluster.yml up -d

修改filebeat配置文件

指令

docker exec -it elk-cluster-filebeat bash
vi filebeat.yml
filebeat.inputs:
- type: log
  enabled: true
  paths:
    - /elk/logs/*.log  #数据采集输入的文件地址,以后应用生成的日志的映射路径
 
filebeat.config:
  modules:
    path: ${path.config}/modules.d/*.yml
    reload.enabled: false
 
processors:
  - add_cloud_metadata: ~
  - add_docker_metadata: ~
 
output.kafka:
  # initial brokers for reading cluster metadata
  hosts: ["kafka01:9092", "kafka02:9092", "kafka03:9092"] #kafka集群地址
 
  # message topic selection + partitioning
  topic: 'test' # kafka主题名称配置
  partition.round_robin:
    reachable_only: false
 
  required_acks: 1
  compression: gzip
  max_message_bytes: 1000000

创建kafka的主题test

修改logstash配置文件

input{
       #方式一:直接通过logback收集日志 
       tcp{
           port => 5044
           type => "samplelog"
           codec => "json_lines"
       }
       #方式二:kafka方式
       kafka {
           type => 'kafka'
           bootstrap_servers => "kafka01:9092,kafka02:9092,kafka03:9092"
           topics => "test"
           group_id => "elk"
    }
}
 
output{
    #普通方式
    if [type] == 'samplelog'{
        elasticsearch {
            #es地址
            hosts => ["es01:9200","es02:9200","es03:9200"]
            #索引
            index => "elk-cluster-logstash-01-%{[app]}-%{+YYYY.MM.dd}"
        }
    }
    #kafka方式
    if [type] == 'kafka'{
        elasticsearch {
            #es地址
            hosts => ["es01:9200","es02:9200","es03:9200"]
            #索引
            index => "elk-samplelog-%{+YYYY.MM.dd}"
        }
    }
 
}

修改nginx配置

server {
        listen 80;
        server_name kibana;
        location / {
                proxy_pass http://kibana:5601;
                proxy_http_version 1.1;
                proxy_set_header Upgrade $http_upgrade;
                proxy_set_header Connection 'upgrade';
                proxy_set_header Host $host;
                proxy_cache_bypass $http_upgrade;
        }
}

重启服务

命令:docker-compose -f elk-cluster.yml restart

ES集群部署

  • 命令:
    docker pull elasticsearch:7.11.1
    修改linux文件句柄数:因为es集群默认的进程句柄数需要至少为262144个,系统默认的是65535,这里将默认的进程句柄数调大到262144个,以便可以正产启动es集群。
    命令:echo vm.max_map_count=262144 >> /etc/sysctl.conf
    sysctl -p

docker-compose启动文件es-cluster.yml配置内容如下

version: '2.2'
services:
  #服务名称
  es01:
    #镜像名称
    image: elasticsearch:7.11.1
    #容器名称
    container_name: elk-cluster-es-master
    privileged: true
    #环境变量设置
    environment:
      #节点名称
      - node.name=es01
      #是否启用主节点
      - node.master=true
      #集群名称
      - cluster.name=elk-cluster-es
      #其它节点
      - discovery.seed_hosts=es02,es03
      - cluster.initial_master_nodes=es01,es02,es03
      - bootstrap.memory_lock=true
      #加入跨域配置
      - http.cors.enabled=true
      - http.cors.allow-origin=*
      #java堆内存大小设置
      - "ES_JAVA_OPTS=-Xms512m -Xmx512m"
      #开启读写权限
      - "TAKE_FILE_OWNERSHIP=true"
    ulimits:
      memlock:
        soft: -1
        hard: -1
    #数据卷映射
    volumes:
      - /elk/es/master/data:/usr/share/elasticsearch/data
      - /elk/es/master/logs:/usr/share/elasticsearch/logs
    #端口映射
    ports:
      - 9200:9200
    #网络配置
    networks:
      - elastic
  es02:
    image: elasticsearch:7.11.1
    container_name: elk-cluster-es-slave-01
    privileged: true
    environment:
      - node.name=es02
      - cluster.name=elk-cluster-es
      - discovery.seed_hosts=es01,es03
      - cluster.initial_master_nodes=es01,es02,es03
      - bootstrap.memory_lock=true
      #加入跨域配置
      - http.cors.enabled=true
      - http.cors.allow-origin=*
      - "ES_JAVA_OPTS=-Xms512m -Xmx512m"
      - "TAKE_FILE_OWNERSHIP=true"
    ulimits:
      memlock:
        soft: -1
        hard: -1
    volumes:
      - /elk/es/slave01/data:/usr/share/elasticsearch/data
      - /elk/es/slave01/logs:/usr/share/elasticsearch/logs
    networks:
      - elastic
  es03:
    image: elasticsearch:7.11.1
    container_name: elk-cluster-es-slave-02
    privileged: true
    environment:
      - node.name=es03
      - cluster.name=elk-cluster-es
      - discovery.seed_hosts=es01,es02
      - cluster.initial_master_nodes=es01,es02,es03
      - bootstrap.memory_lock=true
      #加入跨域配置
      - http.cors.enabled=true
      - http.cors.allow-origin=*
      - "ES_JAVA_OPTS=-Xms512m -Xmx512m"
      - "TAKE_FILE_OWNERSHIP=true"
    ulimits:
      memlock:
        soft: -1
        hard: -1
    volumes:
      - /elk/es/slave02/data:/usr/share/elasticsearch/data
      - /elk/es/slave02/logs:/usr/share/elasticsearch/logs
    networks:
      - elastic
 
volumes:
  data01:
    driver: local
  data02:
    driver: local
  data03:
    driver: local
 
networks:
  elastic:
    driver: bridge

docker-compose -f es-cluster.yml up -d

装es-head可视化插件管理ES集群

  • 命令:
    docker pull mobz/elasticsearch-head:5
    docker run -d --name elk-cluster-es-head -p 9100:9100 mobz/elasticsearch-head:5

Kibana搭建

  • 命令:
    docker pull kibana:7.11.1
    docker run --name elk-cluster-kibana --restart=always -p 5601:5601 -d kibana:7.11.1

nginx反向代理搭建

拉取镜像

  • 命令:
    docker pull nginx:stable-alpine-perl

启动nginx服务,并设置为开机自启动

  • 命令:docker run --name elk-cluster-nginx --restart=always -p 80:80 -v /elk/nginx:/kibana -d nginx:stable-alpine-perl

添加kibana代理
kibana.conf配置文件如下:

server {
        listen 80;
        server_name kibana;
        auth_basic "Restricted Access";
        auth_basic_user_file /kibana/kibana-user;
        location / {
                proxy_pass http://192.168.1.120:5601;
                proxy_http_version 1.1;
                proxy_set_header Upgrade $http_upgrade;
                proxy_set_header Connection 'upgrade';
                proxy_set_header Host $host;
                proxy_cache_bypass $http_upgrade;
        }
}

listen:访问端口
server_name:服务名称
auth_basic_user_file:访问验证文件配置,该文件目前还不存在,我们后面会在前面启动命令中提到的数据映射目录下/elk/nginx生成一个kibana-user文件
proxy_pass:kibana访问请求路径

安装生成访问验证文件工具httpd-tools

  • 命令:
    yum -y install epel-release
    yum -y install nginx httpd-tools
    生成访问验证文件kibana-user
    htpasswd -cm /elk/nginx/kibana-user kibana
    输入密码

重启nginx容器

  • 命令:
    docker restart elk-cluster-nginx

logstash搭建

-- logstash是一个无状态的流处理软件,本身是没有集群方案的

拉取镜像

  • 命令:
    docker pull logstash:7.11.1

docker-compose启动配置文件logstash-cluster.yml如下:

#elk集群版logstash配置
version: '3'
services:
 logstash-01:
   image: logstash:7.11.1
   container_name: elk-cluster-logstash-01
   hostname: logstash
   restart: always
   ports:
     - 9600:9600
     - 5044:5044
 logstash-02:
   image: logstash:7.11.1
   container_name: elk-cluster-logstash-02
   hostname: logstash
   restart: always
   ports:
     - 9601:9600
     - 5045:5044
 logstash-03:
   image: logstash:7.11.1
   container_name: elk-cluster-logstash-03
   hostname: logstash
   restart: always
   ports:
     - 9602:9600
     - 5046:5044

docker-compose 启动logstash集群

  • 命令:
    docker-compose -f logstash-cluster.yml -p logstash up -d
    -- 特别说明:这里因为前面已经使用docker-compose启动过es集群,所以我们加一个 -p的参数作为区别,不然容器创建的目录是相同的,会有一个警告,不加也不影响,作者这里就加上了。

修改logstash集群默认配置:

进入容器,vi编辑器修改logstash.yml配置
命令:docker exec -it elk-cluster-logstash-01 /bin/bash

ls
cd config
ls
vi logstash.yml
xpack.monitoring.elasticsearch.hosts:[192.168.1.120:8200] //修改为es集群地址

进入容器,vi编辑器修改logstash.conf配置
命令:docker exec -it elk-cluster-logstash-01 /bin/bash

ls
cd pipeline/
ls
vi logstash.conf
---------------------------------
input{
       #方式一:直接通过logback收集日志 
       tcp{
           port => 5044
           type => "samplelog"
           codec => "json_lines"
       }
       #方式二:kafka方式
}
 
output{
    if [type] == 'samplelog'{
        elasticsearch {
            #es地址
            hosts => "192.168.1.120:9200"
            #索引
            index => "elk-cluster-logstash-01-%{[app]}-%{+YYYY.MM.dd}"
        }
    }
    #kafka方式
}
-------------------------------

--- 注意:这里只是elk-cluster-logstash-01的配置修改,其它俩个的修改同上。区别只在于将output中的index索引换成自己的。这里其它部分一致,只以01,02,03区别。

重启logstash集群

命令:docker-compose -f logstash-cluster.yml -p logstash restart

验证logstash是否可以收集数据

(1)使用es-head查看数据
(2)使用kibana查看数据

kafka集群搭建

--- 由于kafka集群是依赖zookeeper实现通信的,我们也需要搭建一套zookeeper集群,本节中我们依旧采用docker-compose的方式搭建我们的kafka集群,同时为了便于管理我们的kafka集群,我们也会安装俩个辅助的工具管理、监控我们的kafka集群,分别为kafka-manager与kafka-offset-monitor,话不多说,开始我们的正文。

拉取镜像:

命令:docker pull <镜像>:<版本号>
zookeeper 3.7.0
kafka
kafka-manager
kafkaoffsetmonitor

docker-compose启动容器超时
在使用docker-compose启动多个容器时,在其默认的启动时间60s内无法全部启动完成,容器就会整个启动失败。这里我们将此参数调大到1000s。使用vi编辑器修改系统变量文件/etc/profile,在文件的末尾添加俩个参数,然后更新系统参数,使新添加的参数配置生效。
export DOCKER_CLIENT_TIMEOUT=1000
export COMPOSE_HTTP_TIMEOUT=1000

vi /etc/profile,在文件的末尾添加俩个参数,然后更新系统参数,使新添加的参数配置生效。
source /etc/profile

创建kafka-cluster.yml启动脚本

#kafka-cluster.yml文件
version: '3.1'
 
services:
  #zk集群
  zk1:
    image: zookeeper:3.7.0
    restart: always
    container_name: elk-cluster-zk1
    hostname: zk1
    ports:
      - 2181:2181
    volumes:
      - "/elk/zookeeper/zk1/data:/data"
      - "/elk/zookeeper/zk1/logs:/datalog"
    environment:
      ZOO_MY_ID: 1
      ZOO_SERVERS: server.1=0.0.0.0:2888:3888;2181 server.2=zk2:2888:3888;2181 server.3=zk3:2888:3888;2181  
  zk2:
    image: zookeeper:3.7.0
    restart: always
    container_name: elk-cluster-zk2
    hostname: zk2
    ports:
      - 2182:2181
    volumes:
      - "/elk/zookeeper/zk2/data:/data"
      - "/elk/zookeeper/zk2/logs:/datalog"
    environment:
      ZOO_MY_ID: 2
      ZOO_SERVERS: server.1=zk1:2888:3888;2181 server.2=0.0.0.0:2888:3888;2181 server.3=zk3:2888:3888;2181
  zk3:
    image: zookeeper:3.7.0
    restart: always
    container_name: elk-cluster-zk3
    hostname: zk3
    ports:
      - 2183:2181
    volumes:
      - "/elk/zookeeper/zk3/data:/data"
      - "/elk/zookeeper/zk3/logs:/datalog"
    environment:
      ZOO_MY_ID: 3
      ZOO_SERVERS: server.1=zk1:2888:3888;2181 server.2=zk2:2888:3888;2181 server.3=0.0.0.0:2888:3888;2181
  #kafka集群
  kafka1:
    image: wurstmeister/kafka:2.13-2.7.0
    restart: always
    container_name: elk-cluster-kafka1
    hostname: kafka1
    ports:
      - "9091:9092"
      - "9991:9991"
    depends_on:
      - zk1
      - zk2
      - zk3
    environment:
      KAFKA_BROKER_ID: 1
      KAFKA_ADVERTISED_HOST_NAME: kafka1
      KAFKA_ADVERTISED_PORT: 9091
      KAFKA_HOST_NAME: kafka1
      KAFKA_ZOOKEEPER_CONNECT: zk1:2181,zk2:2181,zk3:2181
      KAFKA_LISTENERS: PLAINTEXT://kafka1:9092
      KAFKA_ADVERTISED_LISTENERS: PLAINTEXT://192.168.1.120:9091
      JMX_PORT: 9991
      KAFKA_JMX_OPTS: "-Djava.rmi.server.hostname=kafka1 -Dcom.sun.management.jmxremote.port=9991 -Dcom.sun.management.jmxremote=true -Dcom.sun.management.jmxremote.ssl=false -Dcom.sun.managementote.ssl=false -Dcom.sun.management.jmxremote.authenticate=false"
    volumes:
      - "/elk/kafka/kafka1/:/kafka"
  kafka2:
    image: wurstmeister/kafka:2.13-2.7.0
    restart: always
    container_name: elk-cluster-kafka2
    hostname: kafka2
    ports:
      - "9092:9092"
      - "9992:9992"
    depends_on:
      - zk1
      - zk2
      - zk3
    environment:
      KAFKA_BROKER_ID: 2
      KAFKA_ADVERTISED_HOST_NAME: kafka2
      KAFKA_ADVERTISED_PORT: 9092
      KAFKA_HOST_NAME: kafka2
      KAFKA_ZOOKEEPER_CONNECT: zk1:2181,zk2:2181,zk3:2181
      KAFKA_LISTENERS: PLAINTEXT://kafka2:9092
      KAFKA_ADVERTISED_LISTENERS: PLAINTEXT://192.168.1.120:9092
      JMX_PORT: 9992
      KAFKA_JMX_OPTS: "-Djava.rmi.server.hostname=kafka2 -Dcom.sun.management.jmxremote.port=9992 -Dcom.sun.management.jmxremote=true -Dcom.sun.management.jmxremote.ssl=false -Dcom.sun.managementote.ssl=false -Dcom.sun.management.jmxremote.authenticate=false"
    volumes:
      - "/elk/kafka/kafka2/:/kafka"
  kafka3:
    image: wurstmeister/kafka:2.13-2.7.0
    restart: always
    container_name: elk-cluster-kafka3
    hostname: kafka3
    ports:
      - "9093:9092"
      - "9993:9993"
    depends_on:
      - zk1
      - zk2
      - zk3
    environment:
      KAFKA_BROKER_ID: 3
      KAFKA_ADVERTISED_HOST_NAME: kafka3
      KAFKA_ADVERTISED_PORT: 9093
      KAFKA_HOST_NAME: kafka3
      KAFKA_ZOOKEEPER_CONNECT: zk1:2181,zk2:2181,zk3:2181
      KAFKA_LISTENERS: PLAINTEXT://kafka3:9092
      KAFKA_ADVERTISED_LISTENERS: PLAINTEXT://192.168.1.120:9093
      JMX_PORT: 9993
      KAFKA_JMX_OPTS: "-Djava.rmi.server.hostname=kafka3 -Dcom.sun.management.jmxremote.port=9993 -Dcom.sun.management.jmxremote=true -Dcom.sun.management.jmxremote.ssl=false -Dcom.sun.managementote.ssl=false -Dcom.sun.management.jmxremote.authenticate=false"
    volumes:
      - "/elk/kafka/kafka3/:/kafka"
  #kafka管理工具
  'kafka-manager':
    container_name: kafka-manager
    image: sheepkiller/kafka-manager:stable
    restart: always
    ports:
      - 9000:9000
    depends_on:
      - kafka1
      - kafka2
      - kafka3
    environment:
      KM_VERSION: 1.3.3.18
      ZK_HOSTS: zk1:2181,zk2:2181,zk3:2181
  #kafka监控工具
  'kafka-offset-monitor':
    container_name: kafka-offset-monitor
    image: 564239555/kafkaoffsetmonitor:latest
    restart: always
    volumes:
      - /elk/kafkaoffsetmonitor/conf:/kafkaoffsetmonitor
    ports:
      - 9001:8080
    depends_on:
      - kafka1
      - kafka2
      - kafka3
    environment:
      ZK_HOSTS: zk1:2181,zk2:2181,zk3:2181
      KAFKA_BROKERS: kafka1:9092,kafka2:9092,kafka3:9092
      REFRESH_SECENDS: 10
      RETAIN_DAYS: 2

启动kafka集群

命令:docker-compose -f kafka-cluster.yml -p kafka up -d
说明:
(1)-f :指定我们的启动文件
(2)-p:指定容器启动的项目目录,如果我们有多个docker-compose启动项目,如果不指定项目目录,会有项目目录冲突的警告
(3)-d:后台启动

验证kafka集群

访问工具kafka-manager地址:http://192.168.1.120:9000
访问http://192.168.1.120:9001/工具kafka-offset-monitor地址,这里可以实现简单的kafka集群监控。

filebeat服务搭建

--- filebeat是轻量级的日志收集工具

拉取镜像

命令:docker pull elastic/filebeat:7.11.1

启动filebeat临时镜像

说明:因为filebeat的配置文件修改需要权限,我们不能直接在容器中修改,所以我们就临时启动一个filebeat镜像服务,将我们的filebeat容器中的配置文件拷贝与我们的虚拟机,在虚拟机中授权修改配置文件,然后同步到我们的容器中。
命令:docker run -d --name=elk-cluster-filebeat elastic/filebeat:7.11.1
拷贝filebeat容器中的配置文件
(1)拷贝配置文件:docker cp elk-cluster-filebeat:/usr/share/filebeat /elk/
(2)授权:chmod 777 -R /elk/
(3)给filebeat的配置文件filebeat.yml授权
(4)创建一个atp目录,用于存放同步的日志文件

vi修改filebeat.yml配置

--- 这里我们只需要配置一下我们收集的文件路径/usr/share/filebeat/samplelog/*.log和kafka集群的服务地址hosts: ["192.168.1.120:9091", "192.168.1.120:9092", "192.168.1.120:9093"]以及订阅的kafka主题atp。
订阅主题我们在kafka集群搭建一节中已经创建,这里直接使用就ok了。

#数据输入配置
filebeat.inputs:
- type: log
  enabled: true
  paths:
    - /usr/share/filebeat/samplelog/*.log
 
filebeat.config:
  modules:
    path: ${path.config}/modules.d/*.yml
    reload.enabled: false
 
processors:
  - add_cloud_metadata: ~
  - add_docker_metadata: ~
 
#数据输出到kafka配置
output.kafka:
  # initial brokers for reading cluster metadata
  hosts: ["192.168.1.120:9091", "192.168.1.120:9092", "192.168.1.120:9093"]
 
  # message topic selection + partitioning
  topic: 'samplelog'
  partition.round_robin:
    reachable_only: false
 
  required_acks: 1
  compression: gzip
  max_message_bytes: 1000000

移除我们的临时filebeat容器

命令:docker rm -f elk-cluster-filebeat

重新启动一个filebeat容器

命令:docker run -d --name=elk-cluster-filebeat --restart=always --user=root --privileged=true -v /elk/filebeat:/usr/share/filebeat elastic/filebeat:7.11.1

通过查看启动日志确认filebeat服务是否已经正常启动

命令:docker logs elk-cluster-filebeat

elk集群验证

修改我们logstash服务的配置文件,拉取kafka的主题消息。然后重启logstash服务。
我们这里只以 elk-cluster-logstash-01容器的修改为例,其它的容器修改相同,这里不再介绍。

修改我们logstash服务的配置文件

input{
       #方式一:直接通过logback收集日志 
       tcp{
           port => 5044
           type => "samplelog"
           codec => "json_lines"
       }
       #方式二:kafka方式
       kafka {
           type => 'kafka'
           bootstrap_servers => "192.168.1.120:9091,192.168.1.120:9092,192.168.1.120:9093"
           topics => "samplelog"
           group_id => "elk"
    }
}
 
output{
    if [type] == 'samplelog'{
        elasticsearch {
            #es地址
            hosts => "192.168.1.120:9200"
            #索引
            index => "elk-cluster-logstash-01-%{[app]}-%{+YYYY.MM.dd}"
        }
    }
    #kafka方式
    if [type] == 'kafka'{
        elasticsearch {
            #es地址
            hosts => "192.168.1.120:9200"
            #索引
            index => "elk-samplelog-%{+YYYY.MM.dd}"
        }
    }
}

重启logstash集群的服务

docker-compose -f logstash-cluster.yml -p logstash restart

标签:elk,name,部署,kafka,cluster,2181,docker,KAFKA
来源: https://www.cnblogs.com/ningyouyou/p/16363941.html

本站声明: 1. iCode9 技术分享网(下文简称本站)提供的所有内容,仅供技术学习、探讨和分享;
2. 关于本站的所有留言、评论、转载及引用,纯属内容发起人的个人观点,与本站观点和立场无关;
3. 关于本站的所有言论和文字,纯属内容发起人的个人观点,与本站观点和立场无关;
4. 本站文章均是网友提供,不完全保证技术分享内容的完整性、准确性、时效性、风险性和版权归属;如您发现该文章侵犯了您的权益,可联系我们第一时间进行删除;
5. 本站为非盈利性的个人网站,所有内容不会用来进行牟利,也不会利用任何形式的广告来间接获益,纯粹是为了广大技术爱好者提供技术内容和技术思想的分享性交流网站。

专注分享技术,共同学习,共同进步。侵权联系[81616952@qq.com]

Copyright (C)ICode9.com, All Rights Reserved.

ICode9版权所有