Skip to content

Instantly share code, notes, and snippets.

@denzuko
Created August 29, 2017 21:14
Show Gist options
  • Star 0 You must be signed in to star a gist
  • Fork 0 You must be signed in to fork a gist
  • Save denzuko/f3a5eb20d1bbab6bed2a53f21a1be255 to your computer and use it in GitHub Desktop.
Save denzuko/f3a5eb20d1bbab6bed2a53f21a1be255 to your computer and use it in GitHub Desktop.

Expected results:

  • messages coming in from snmp-trap and syslog passed to elastic search's logstash-* index and displayed in kibana

Actual results:

  • pcap shows traffic coming into logstash container but no traffic going out
  • nmap escuster shows an open port
  • curl http://escluster:9200/*/ shows .kibana and nothing else

Additional details:

docker-compose.yml:


---
version: '2.1'
services:
    # data containers
    es_data:
        pids_limit: 1
        ulimits:
            nproc: 2
            nofile:
                soft: 65535
                hard: 65535
        healthcheck:
            test: "exit 0"
        restart: "no"
        image: alpine:latest
        command: "/bin/true"
        volumes:
            - /data/logs
            - /data/data
            - /data/plugins

    escluster:
        cap_add:
            - SYS_RESOURCE
            - IPC_LOCK
            - IPC_OWNER
        privileged: true
        memswap_limit: 8g
        mem_limit: 8g
        mem_swappiness: 0
        cpu_shares: 20
        security_opt:
            - seccomp=unconfined
        ulimits:
            nofile: 65536
            memlock:
                soft: 204800000
                hard: 307200000
        restart: "on-failure:5"
        image: 'elasticsearch:5-alpine'
        environment:
            - "ES_MAX_LOCKED_MEMORY=4Gb"
            - "ES_JAVA_OPTS=-Xms4G -Xmx4G -Djava.security.egd=file:/dev/./urandom"
            - "affinity:com.vzwnet.eng.win.application=monitoring"
            - "VIRTUAL_HOST=search.local"
            - "VIRTUAL_PORT=9200"
            - "cluster.name=monitoring"
            - "bootstrap.memory_lock=true"
            - "discovery.type=zen"
            - "discovery.zen.ping.unicast.hosts=escluster"
            - "discovery.zen.minimum_master_nodes=2"
            - "discovery.zen.ping.multicast.enabled=false"
            - "gateway.expected_nodes=3"
            - "gateway.recover_after_nodes=2"
            - "network.bind=_eth0:ipv4"
            - "network.bind_host=0.0.0.0"
            - "network.publish_host=search.local:443"
            - "http.host=0.0.0.0"
            - "http.port=9200"
            - "http.cors.enabled=false"
            - "http.cors.allow-origin=*"
            - "transport.host=0.0.0.0"
            - "transport.port=9300"
            - 'node.name=$${HOSTNAME}'
            - "es.bootstrap.seccomp=false"
            - "xpack.security.enabled=false"
            - "xpack.monitoring.enabled=false"
            - "xpack.ml.enabled=false"
            - "xpack.graph.enabled=false"
            - "xpack.watcher.enabled=false"
        volumes:
            - ./config:/data/config
        volumes_from:
            - es_data
        ports:
            - "9200"
            - "9300"
        expose:
            - 9200
            - 9300
        healthcheck:
            test:
                - "CMD"
                - "wget"
                - "-q"
                - "-O"
                - "/dev/null"
                - "https://syslog.local/_cat/health?pretty"
            interval: 1m30s
            timeout: 10s
            retries: 3
        stdin_open: true
        tty: true
        networks:
            - default
        labels:
            - domain.application="monitoring"
            - domain.role="collector"

    logstash:
        restart: "on-failure:5"
        image: 'logstash:latest'
        volumes:
            - "./data/logstash/pipeline:/opt/logstash/pipeline/logstash.conf:ro"
        build:
            context: "data/logstash/."
        environment:
            - "LS_JAVA_OPTS=-Xms1G -Xmx1G -Djava.security.egd=file:/dev/./urandom"
            - "xpack.monitoring.enabled=false"
            - "VIRTUAL_HOST=collector.local"
            - "VIRTUAL_PORT=8000"
        links:
            - "escluster"
        labels:
            - domain.application="monitoring"
            - domain.role="collector"
        networks:
            - default
        depends_on:
            escluster:
                condition: service_started
        volumes:
            - "/var/log:/var/log:ro"
        ports:
            - "1512:5000/tcp"
            - "1512:5000/udp"
            - "156:156/tcp"
            - "156:156/udp"
            - "514:1514/tcp"
            - "514:1514/udp"
        entrypoint: "/opt/logstash/bin/logstash"
        command:
            - '--debug'
            - '-w'
            - '1'
            - '-f'
            - '/opt/logstash/pipeline/logstash.conf'

    kibana:
        memswap_limit: 4g
        mem_limit: 4g
        pids_limit: 100
        ulimits:
            nproc: 65535
            memlock:
                soft: -1
                hard: -1
            nofile:
                soft: 20000
                hard: 40000
        restart: "on-failure:5"
        image: 'kibana:5'
        environment:
            VIRTUAL_HOST: "syslog.local"
            VIRTUAL_PORT: "5601"
            SERVER_NAME: 'syslog.local'
            SERVER_HOST: '0.0.0.0'
            SERVER_PORT": "5601"
            ELASTICSEARCH_URL: "http://escluster:9200/"
            XPACK_SECURITY_ENABLED: "false"
            XPACK_MONITORING_ENABLED: "false"
            XPACK_ML_ENABLED: "false"
            XPACK_GRAPH_ENABLED: "false"
            XPACK_REPORTING_ENABLED: "false"
            ES_JAVA_OPTS: "-Xms4G -Xmx4G -Djava.security.egd=file:/dev/./urandom"
        links:
            - "escluster:elasticsearch"
        volumes:
            - /dev/urandom:/dev/random:ro
        labels:
            - domain.application="monitoring"
            - domain.role="database"
        networks:
            - default
        depends_on:
            escluster:
                condition: service_started

# vimrc: set makeprg=docker-compose :

data/logstash/pipeline:

#####
###
#

input {

    snmptrap {
        community => "public"
        host => "0.0.0.0"
        port => 162
        type => "snmptrap"
    }

    syslog {
        port => 1514
        type => "syslog"
    }

}

filter {

  # Merge stacktraces and Jersey logs
  multiline {
      pattern => "^\s+|^Caused by:|^%{JAVACLASS:class}:|^%{NUMBER} < |^%{NUMBER} > [^GET|POST|PUT|DELETE|PATCH]"
      what => "previous"
  }

  # Parse logback messages
  grok {
      match => { "message" => "%{TIMESTAMP_ISO8601:ts}\s+\[%{GREEDYDATA:thread}\]\s+%{WORD:level}\s+%{JAVACLASS:class}" }
      add_field => { "subType" => "java" }
      remove_tag => ["_grokparsefailure"]
  }

  # InfluxDB
  grok {
      match => { "message" => "\[%{WORD:subsystem}\] (?<ts>[0-9]{4}/[0-9]{2}/[0-9]{2} [0-9]{2}:[0-9]{2}:[0-9]{2})" }
      add_field => { "subType" => "influxdb" }
      remove_tag => ["_grokparsefailure"]
  }

  date {
      match => [ "ts", "YYYY-MM-dd HH:mm:ss,SSS", "YYYY/MM/dd HH:mm:ss" ]
      # Use the log timestamp to get sub-second precision (useful for ordering)
      target => "@timestamp"
      # Remove the ts field as it confuses Elasticsearch (dynamic mapping misses some date formats)
      remove_field => [ "ts" ]
  }

}

output {

    elasticsearch {
        hosts => ["escluster:9200"]
        sniffing => true
        ssl => false
        ssl_certificate_verification => false
    }

    stdout {
        codec => rubydebug
    }

    file {
            path => "/var/log/logstash.log"
            codec => "plain"
    }

}

##
# vim:et:si:ts=4:sts=4:sw=4:

logstash logs:

logstash_1        | {:timestamp=>"2017-08-29T19:03:06.137000+0000", :message=>"config LogStash::Outputs::ElasticSearch/@hosts = [\"http://escluster:9200\"]", :level=>:debug, :file=>"logstash/config/mixin.rb", :line=>"154", :method=>"config_init"}
logstash_1        | {:timestamp=>"2017-08-29T19:03:06.139000+0000", :message=>"config LogStash::Outputs::ElasticSearch/@index = \"syslog-%{+YYYY.MM}\"", :level=>:debug, :file=>"logstash/config/mixin.rb", :line=>"154", :method=>"config_init"}
logstash_1        | {:timestamp=>"2017-08-29T19:03:06.141000+0000", :message=>"config LogStash::Outputs::ElasticSearch/@codec = <LogStash::Codecs::Plain charset=>\"UTF-8\">", :level=>:debug, :file=>"logstash/config/mixin.rb", :line=>"154", :method=>"config_init"}
logstash_1        | {:timestamp=>"2017-08-29T19:03:06.142000+0000", :message=>"config LogStash::Outputs::ElasticSearch/@workers = 1", :level=>:debug, :file=>"logstash/config/mixin.rb", :line=>"154", :method=>"config_init"}
logstash_1        | {:timestamp=>"2017-08-29T19:03:06.144000+0000", :message=>"config LogStash::Outputs::ElasticSearch/@manage_template = true", :level=>:debug, :file=>"logstash/config/mixin.rb", :line=>"154", :method=>"config_init"}
logstash_1        | {:timestamp=>"2017-08-29T19:03:06.146000+0000", :message=>"config LogStash::Outputs::ElasticSearch/@template_name = \"logstash\"", :level=>:debug, :file=>"logstash/config/mixin.rb", :line=>"154", :method=>"config_init"}
logstash_1        | {:timestamp=>"2017-08-29T19:03:06.147000+0000", :message=>"config LogStash::Outputs::ElasticSearch/@template_overwrite = false", :level=>:debug, :file=>"logstash/config/mixin.rb", :line=>"154", :method=>"config_init"}
logstash_1        | {:timestamp=>"2017-08-29T19:03:06.149000+0000", :message=>"config LogStash::Outputs::ElasticSearch/@parent = nil", :level=>:debug, :file=>"logstash/config/mixin.rb", :line=>"154", :method=>"config_init"}
logstash_1        | {:timestamp=>"2017-08-29T19:03:06.150000+0000", :message=>"config LogStash::Outputs::ElasticSearch/@flush_size = 500", :level=>:debug, :file=>"logstash/config/mixin.rb", :line=>"154", :method=>"config_init"}
logstash_1        | {:timestamp=>"2017-08-29T19:03:06.152000+0000", :message=>"config LogStash::Outputs::ElasticSearch/@idle_flush_time = 1", :level=>:debug, :file=>"logstash/config/mixin.rb", :line=>"154", :method=>"config_init"}
logstash_1        | {:timestamp=>"2017-08-29T19:03:06.154000+0000", :message=>"config LogStash::Outputs::ElasticSearch/@upsert = \"\"", :level=>:debug, :file=>"logstash/config/mixin.rb", :line=>"154", :method=>"config_init"}
logstash_1        | {:timestamp=>"2017-08-29T19:03:06.155000+0000", :message=>"config LogStash::Outputs::ElasticSearch/@doc_as_upsert = false", :level=>:debug, :file=>"logstash/config/mixin.rb", :line=>"154", :method=>"config_init"}
logstash_1        | {:timestamp=>"2017-08-29T19:03:06.157000+0000", :message=>"config LogStash::Outputs::ElasticSearch/@max_retries = 3", :level=>:debug, :file=>"logstash/config/mixin.rb", :line=>"154", :method=>"config_init"}
logstash_1        | {:timestamp=>"2017-08-29T19:03:06.159000+0000", :message=>"config LogStash::Outputs::ElasticSearch/@script = \"\"", :level=>:debug, :file=>"logstash/config/mixin.rb", :line=>"154", :method=>"config_init"}
logstash_1        | {:timestamp=>"2017-08-29T19:03:06.160000+0000", :message=>"config LogStash::Outputs::ElasticSearch/@script_type = \"inline\"", :level=>:debug, :file=>"logstash/config/mixin.rb", :line=>"154", :method=>"config_init"}
logstash_1        | {:timestamp=>"2017-08-29T19:03:06.162000+0000", :message=>"config LogStash::Outputs::ElasticSearch/@script_lang = \"\"", :level=>:debug, :file=>"logstash/config/mixin.rb", :line=>"154", :method=>"config_init"}
logstash_1        | {:timestamp=>"2017-08-29T19:03:06.163000+0000", :message=>"config LogStash::Outputs::ElasticSearch/@script_var_name = \"event\"", :level=>:debug, :file=>"logstash/config/mixin.rb", :line=>"154", :method=>"config_init"}
logstash_1        | {:timestamp=>"2017-08-29T19:03:06.165000+0000", :message=>"config LogStash::Outputs::ElasticSearch/@scripted_upsert = false", :level=>:debug, :file=>"logstash/config/mixin.rb", :line=>"154", :method=>"config_init"}
logstash_1        | {:timestamp=>"2017-08-29T19:03:06.167000+0000", :message=>"config LogStash::Outputs::ElasticSearch/@retry_max_interval = 2", :level=>:debug, :file=>"logstash/config/mixin.rb", :line=>"154", :method=>"config_init"}
logstash_1        | {:timestamp=>"2017-08-29T19:03:06.168000+0000", :message=>"config LogStash::Outputs::ElasticSearch/@retry_max_items = 500", :level=>:debug, :file=>"logstash/config/mixin.rb", :line=>"154", :method=>"config_init"}
logstash_1        | {:timestamp=>"2017-08-29T19:03:06.170000+0000", :message=>"config LogStash::Outputs::ElasticSearch/@retry_on_conflict = 1", :level=>:debug, :file=>"logstash/config/mixin.rb", :line=>"154", :method=>"config_init"}
logstash_1        | {:timestamp=>"2017-08-29T19:03:06.172000+0000", :message=>"config LogStash::Outputs::ElasticSearch/@pipeline = nil", :level=>:debug, :file=>"logstash/config/mixin.rb", :line=>"154", :method=>"config_init"}
logstash_1        | {:timestamp=>"2017-08-29T19:03:06.173000+0000", :message=>"config LogStash::Outputs::ElasticSearch/@action = \"index\"", :level=>:debug, :file=>"logstash/config/mixin.rb", :line=>"154", :method=>"config_init"}
logstash_1        | {:timestamp=>"2017-08-29T19:03:06.175000+0000", :message=>"config LogStash::Outputs::ElasticSearch/@ssl_certificate_verification = true", :level=>:debug, :file=>"logstash/config/mixin.rb", :line=>"154", :method=>"config_init"}
logstash_1        | {:timestamp=>"2017-08-29T19:03:06.176000+0000", :message=>"config LogStash::Outputs::ElasticSearch/@sniffing = false", :level=>:debug, :file=>"logstash/config/mixin.rb", :line=>"154", :method=>"config_init"}
logstash_1        | {:timestamp=>"2017-08-29T19:03:06.178000+0000", :message=>"config LogStash::Outputs::ElasticSearch/@sniffing_delay = 5", :level=>:debug, :file=>"logstash/config/mixin.rb", :line=>"154", :method=>"config_init"}
logstash_1        | {:timestamp=>"2017-08-29T19:03:06.180000+0000", :message=>"Normalizing http path", :path=>nil, :normalized=>nil, :level=>:debug, :file=>"logstash/outputs/elasticsearch/http_client_builder.rb", :line=>"18", :method=>"build"}

...
logstash_1        | {:timestamp=>"2017-08-29T20:10:51.720000+0000", :message=>"Flushing buffer at interval", :instance=>"#<LogStash::Outputs::ElasticSearch::Buffer"...

(last line repeats)

elastic search logs:

Attaching to foundationdeployment_escluster_1
�[36mescluster_1       |�[0m [2017-08-29T18:34:43,958][INFO ][o.e.n.Node               ] [] initializing ...
�[36mescluster_1       |�[0m [2017-08-29T18:34:44,026][WARN ][o.e.d.e.NodeEnvironment  ] ES has detected the [path.data] folder using the cluster name as a folder [/usr/share/elasticsearch/data], Elasticsearch 6.0 will not allow the cluster name as a folder within the data path
�[36mescluster_1       |�[0m [2017-08-29T18:34:44,205][INFO ][o.e.e.NodeEnvironment    ] [51wXKZq] using [1] data paths, mounts [[/usr/share/elasticsearch/data (/dev/vda1)]], net usable_space [57.7gb], net total_space [77.4gb], spins? [possibly], types [ext4]
�[36mescluster_1       |�[0m [2017-08-29T18:34:44,206][INFO ][o.e.e.NodeEnvironment    ] [51wXKZq] heap size [3.9gb], compressed ordinary object pointers [true]
�[36mescluster_1       |�[0m [2017-08-29T18:34:44,221][INFO ][o.e.n.Node               ] node name [51wXKZq] derived from node ID [51wXKZqpQR6GoPoa-8121Q]; set [node.name] to override
�[36mescluster_1       |�[0m [2017-08-29T18:34:44,221][INFO ][o.e.n.Node               ] version[5.5.2], pid[1], build[b2f0c09/2017-08-14T12:33:14.154Z], OS[Linux/4.4.0-87-generic/amd64], JVM[Oracle Corporation/OpenJDK 64-Bit Server VM/1.8.0_131/25.131-b11]
�[36mescluster_1       |�[0m [2017-08-29T18:34:44,222][INFO ][o.e.n.Node               ] JVM arguments [-Xms2g, -Xmx2g, -XX:+UseConcMarkSweepGC, -XX:CMSInitiatingOccupancyFraction=75, -XX:+UseCMSInitiatingOccupancyOnly, -XX:+AlwaysPreTouch, -Xss1m, -Djava.awt.headless=true, -Dfile.encoding=UTF-8, -Djna.nosys=true, -Djdk.io.permissionsUseCanonicalPath=true, -Dio.netty.noUnsafe=true, -Dio.netty.noKeySetOptimization=true, -Dio.netty.recycler.maxCapacityPerThread=0, -Dlog4j.shutdownHookEnabled=false, -Dlog4j2.disable.jmx=true, -Dlog4j.skipJansi=true, -XX:+HeapDumpOnOutOfMemoryError, -Xms4G, -Xmx4G, -Djava.security.egd=file:/dev/./urandom, -Des.path.home=/usr/share/elasticsearch]
�[36mescluster_1       |�[0m [2017-08-29T18:34:46,498][INFO ][o.e.p.PluginsService     ] [51wXKZq] loaded module [aggs-matrix-stats]
�[36mescluster_1       |�[0m [2017-08-29T18:34:46,498][INFO ][o.e.p.PluginsService     ] [51wXKZq] loaded module [ingest-common]
�[36mescluster_1       |�[0m [2017-08-29T18:34:46,498][INFO ][o.e.p.PluginsService     ] [51wXKZq] loaded module [lang-expression]
�[36mescluster_1       |�[0m [2017-08-29T18:34:46,498][INFO ][o.e.p.PluginsService     ] [51wXKZq] loaded module [lang-groovy]
�[36mescluster_1       |�[0m [2017-08-29T18:34:46,498][INFO ][o.e.p.PluginsService     ] [51wXKZq] loaded module [lang-mustache]
�[36mescluster_1       |�[0m [2017-08-29T18:34:46,499][INFO ][o.e.p.PluginsService     ] [51wXKZq] loaded module [lang-painless]
�[36mescluster_1       |�[0m [2017-08-29T18:34:46,499][INFO ][o.e.p.PluginsService     ] [51wXKZq] loaded module [parent-join]
�[36mescluster_1       |�[0m [2017-08-29T18:34:46,499][INFO ][o.e.p.PluginsService     ] [51wXKZq] loaded module [percolator]
�[36mescluster_1       |�[0m [2017-08-29T18:34:46,499][INFO ][o.e.p.PluginsService     ] [51wXKZq] loaded module [reindex]
�[36mescluster_1       |�[0m [2017-08-29T18:34:46,499][INFO ][o.e.p.PluginsService     ] [51wXKZq] loaded module [transport-netty3]
�[36mescluster_1       |�[0m [2017-08-29T18:34:46,500][INFO ][o.e.p.PluginsService     ] [51wXKZq] loaded module [transport-netty4]
�[36mescluster_1       |�[0m [2017-08-29T18:34:46,500][INFO ][o.e.p.PluginsService     ] [51wXKZq] no plugins loaded
�[36mescluster_1       |�[0m [2017-08-29T18:34:50,727][INFO ][o.e.d.DiscoveryModule    ] [51wXKZq] using discovery type [zen]
�[36mescluster_1       |�[0m [2017-08-29T18:34:51,835][INFO ][o.e.n.Node               ] initialized
�[36mescluster_1       |�[0m [2017-08-29T18:34:51,835][INFO ][o.e.n.Node               ] [51wXKZq] starting ...
�[36mescluster_1       |�[0m [2017-08-29T18:34:52,331][INFO ][o.e.t.TransportService   ] [51wXKZq] publish_address {127.0.0.1:9300}, bound_addresses {127.0.0.1:9300}
�[36mescluster_1       |�[0m [2017-08-29T18:34:52,367][WARN ][o.e.b.BootstrapChecks    ] [51wXKZq] max virtual memory areas vm.max_map_count [65530] is too low, increase to at least [262144]
�[36mescluster_1       |�[0m [2017-08-29T18:34:55,462][INFO ][o.e.c.s.ClusterService   ] [51wXKZq] new_master {51wXKZq}{51wXKZqpQR6GoPoa-8121Q}{l_NojvkrS2y5HM4HAewwtA}{127.0.0.1}{127.0.0.1:9300}, reason: zen-disco-elected-as-master ([0] nodes joined)
�[36mescluster_1       |�[0m [2017-08-29T18:34:55,523][INFO ][o.e.h.n.Netty4HttpServerTransport] [51wXKZq] publish_address {172.18.0.3:9200}, bound_addresses {0.0.0.0:9200}
�[36mescluster_1       |�[0m [2017-08-29T18:34:55,523][INFO ][o.e.n.Node               ] [51wXKZq] started
�[36mescluster_1       |�[0m [2017-08-29T18:34:55,946][INFO ][o.e.g.GatewayService     ] [51wXKZq] recovered [1] indices into cluster_state
�[36mescluster_1       |�[0m [2017-08-29T18:34:56,441][INFO ][o.e.c.r.a.AllocationService] [51wXKZq] Cluster health status changed from [RED] to [YELLOW] (reason: [shards started [[.kibana][0]] ...]).
�[36mescluster_1       |�[0m [2017-08-29T18:36:10,876][INFO ][o.e.m.j.JvmGcMonitorService] [51wXKZq] [gc][79] overhead, spent [288ms] collecting in the last [1s]

Sign up for free to join this conversation on GitHub. Already have an account? Sign in to comment