码迷,mamicode.com
首页 > 其他好文 > 详细

docker-compose部署模板

时间:2019-03-26 16:49:32      阅读:404      评论:0      收藏:0      [点我收藏+]

标签:work   usr   name   sum   services   master   ext   kafka   ==   

本章介绍ELK系统docker-compose.yml化的文件

docker-compose 需要注意

  1. 不要把 docker 当做数据容器来使用,数据一定要用 volumes 放在容器外面
  2. 不要把 docker-compose 文件暴露给别人, 因为上面有你的服务器信息
  3. 多用 docker-compose 的命令去操作, 不要用 docker 手动命令&docker-compose 去同时操作
  4. 写一个脚本类的东西,自动备份docker 映射出来的数据。
  5. 不要把所有服务都放在一个 docker 容器里面

 

 

管理节点10.191.51.44

数据节点 10.191.51.45/46/47

es docker-compose.yml

技术图片
version: 2
services:
  elasticsearch:
    container_name: ES
    environment :
      - ES_JAVA_OPTS=-Xms4G -Xmx4G
    image: 10.191.51.5/elk/elasticsearch:6.5.4
    volumes:
      - ./config/elasticsearch.yml:/usr/share/elasticsearch/config/elasticsearch.yml
      - ./data:/usr/share/elasticsearch/data
    ports:
      - "9200:9200"
      - "9300:9300"
View Code

管理节点 elasticsearch.yml

技术图片
cluster.name: elasticsearch-cluster
node.name: es-node1
network.bind_host: 0.0.0.0
network.publish_host: 10.191.51.44
http.port: 9200
transport.tcp.port: 9300
http.cors.enabled: true
http.cors.allow-origin: "*"
node.master: false
node.data: false
node.ingest: true
discovery.zen.ping.unicast.hosts: ["10.191.51.44:9300","10.191.51.45:9300","10.191.51.46:9300","10.191.51.47:9300"]
discovery.zen.minimum_master_nodes: 2
View Code

数据节点 elasticsearch.yml

技术图片
cluster.name: elasticsearch-cluster
node.name: es-node2
network.bind_host: 0.0.0.0
network.publish_host: 10.191.51.45
http.port: 9200
transport.tcp.port: 9300
http.cors.enabled: true
http.cors.allow-origin: "*"
node.master: true
node.data: true
discovery.zen.ping.unicast.hosts: ["10.191.51.44:9300","10.191.51.45:9300","10.191.51.46:9300","10.191.51.47:9300"]
discovery.zen.minimum_master_nodes: 2
View Code

 

kafka docker-compose.yml (其中environment需要配置)

技术图片
version: 2
services:
  kafka:
    container_name: kafka0
    environment:
      - KAFKA_BROKER_ID=0
      - KAFKA_ZOOKEEPER_CONNECT=10.191.51.44:2181
      - KAFKA_DEFAULT_REPLICATION_FACTOR=3
      - KAFKA_LISTENERS=PLAINTEXT://0.0.0.0:9092
      - KAFKA_ADVERTISED_HOST_NAME=kafka1
      - KAFKA_ADVERTISED_LISTENERS=PLAINTEXT://10.191.51.45:9092
      - KAFKA_delete_topic_enable=true
    image: 10.191.51.5/elk/wurstmeister/kafka:2.1.1
    ports:
      - "9092:9092"
View Code

 

logstash docker-compose.yml

技术图片
version: 2
services:
  logstash:
    container_name: logstash
    image: 10.191.51.5/elk/logstash:6.5.4
    volumes:
      - ./config/:/usr/share/logstash/config/
      - ./pipeline/:/usr/share/logstash/pipeline/
    ports:
      - "5044:5044"
      - "9600:9600"
View Code

pipeline/logstash.conf 

技术图片
        kafka{
        bootstrap_servers => ["10.191.51.45:9092,10.191.51.46:9092,10.191.51.47:9092"]
        client_id => "logstash-garnet"
        group_id => "logstash-garnet"
        consumer_threads => 8
        decorate_events => true
        topics => ["garnet_garnetAll_log"]
        type => "garnet_all_log"
        }
}
filter{

                if[type]=="garnet_all_log"{
                        mutate{
                                gsub => ["message", "@timestamp", "sampling_time"]
                        }
                        json{
                                source=>"message"
                        }
                        grok{
                                match=>{
                                                                        "message"=>[

                                                                                "%{TIMESTAMP_ISO8601:log_time}\s+\[(?<thread_name>[a-zA-Z0-9\-\s]*)\]\s+%{LOGLEVEL:log_level}\s+\[(?<class_name>[a-zA-Z0-9.]*)\]\s+%{GREEDYDATA:msg_info}parameters=%{GREEDYDATA:msg_json_info}",
                                                                                "%{TIMESTAMP_ISO8601:log_time}\s+\[(?<thread_name>[a-zA-Z0-9\-\s]*)\]\s+%{LOGLEVEL:log_level}\s+\[(?<class_name>[a-zA-Z0-9.]*)\]\s+%{GREEDYDATA:msg_info}"

                                                                                ]
                                                                        }
                        }
                        if [msg_json_info]{
                                json{
                                        source=>"msg_json_info"
                                }
                }
        }
}
output {

        if[type] == "garnet_all_log"{
                elasticsearch{
                        hosts => ["10.191.51.45:9200","10.191.51.46:9200","10.191.51.47:9200"]
                        index => "garnet_all-%{+YYYY.MM.dd}"
                                                }
                                        }
}
View Code

config/pipeline.yml

技术图片
- pipeline.id: pipeline_1
  pipeline.batch.size: 200
  pipeline.batch.delay: 1
  path.config: /usr/share/logstash/pipeline
View Code

config/logstash.yml

技术图片
log.level: warn
xpack.license.self_generated.type: basic
xpack.monitoring.enabled: true
xpack.monitoring.elasticsearch.url: "http://10.191.51.44:9200"
View Code

config/jvm.options 修改

技术图片
-Xms4g
-Xmx4g
View Code

 

 

 

常用命令

进入到相应docker-compose目录下:

docker-compose up -d nginx                     构建建启动nignx容器

docker-compose exec nginx bash            登录到nginx容器中

docker-compose down                              删除所有nginx容器,镜像

docker-compose ps                                   显示所有容器

docker-compose restart nginx                   重新启动nginx容器

docker-compose run --no-deps --rm php-fpm php -v  在php-fpm中不启动关联容器,并容器执行php -v 执行完成后删除容器

docker-compose build nginx                     构建镜像 。        

docker-compose build --no-cache nginx   不带缓存的构建。

docker-compose logs  nginx                     查看nginx的日志 

docker-compose logs -f nginx                   查看nginx的实时日志

 

docker-compose config  -q                        验证(docker-compose.yml)文件配置,当配置正确时,不输出任何内容,当文件配置错误,输出错误信息。 

docker-compose events --json nginx       以json的形式输出nginx的docker日志

docker-compose pause nginx                 暂停nignx容器

docker-compose unpause nginx             恢复ningx容器

docker-compose rm nginx                       删除容器(删除前必须关闭容器)

docker-compose stop nginx                    停止nignx容器

docker-compose start nginx                    启动nignx容器

docker-compose部署模板

标签:work   usr   name   sum   services   master   ext   kafka   ==   

原文地址:https://www.cnblogs.com/lirunzhou/p/10600965.html

(0)
(0)
   
举报
评论 一句话评论(0
登录后才能评论!
© 2014 mamicode.com 版权所有  联系我们:gaon5@hotmail.com
迷上了代码!