Skip to content

Instantly share code, notes, and snippets.

@DiogoAndre
Created December 15, 2017 11:24
Show Gist options
  • Save DiogoAndre/7f7eb3671412f322a2887c365cdcb3a2 to your computer and use it in GitHub Desktop.
Save DiogoAndre/7f7eb3671412f322a2887c365cdcb3a2 to your computer and use it in GitHub Desktop.
[2017-12-15T11:13:12,932][INFO ][logstash.modules.scaffold] Initializing module {:module_name=>"fb_apache", :directory=>"/data/elk/logstash-6.0.1/modules/fb_apache/configuration"}
[2017-12-15T11:13:12,935][DEBUG][logstash.plugins.registry] Adding plugin to the registry {:name=>"fb_apache", :type=>:modules, :class=>#<LogStash::Modules::Scaffold:0x7309d925 @module_name="fb_apache", @directory="/data/elk/logstash-6.0.1/modules/fb_apache/configuration", @kibana_version_parts=["6", "0", "0"]>}
[2017-12-15T11:13:12,935][INFO ][logstash.modules.scaffold] Initializing module {:module_name=>"netflow", :directory=>"/data/elk/logstash-6.0.1/modules/netflow/configuration"}
[2017-12-15T11:13:12,936][DEBUG][logstash.plugins.registry] Adding plugin to the registry {:name=>"netflow", :type=>:modules, :class=>#<LogStash::Modules::Scaffold:0x6064ef34 @module_name="netflow", @directory="/data/elk/logstash-6.0.1/modules/netflow/configuration", @kibana_version_parts=["6", "0", "0"]>}
[2017-12-15T11:13:13,090][DEBUG][logstash.runner ] -------- Logstash Settings (* means modified) ---------
[2017-12-15T11:13:13,090][DEBUG][logstash.runner ] node.name: "elk-net-bt01-mn"
[2017-12-15T11:13:13,090][DEBUG][logstash.runner ] *path.data: "/data/logstash" (default: "/data/elk/logstash-6.0.1/data")
[2017-12-15T11:13:13,090][DEBUG][logstash.runner ] modules.cli: []
[2017-12-15T11:13:13,090][DEBUG][logstash.runner ] *modules: [{"name"=>"netflow", "var.elasticsearche.hosts"=>"localhost:9200", "var.kibana.hosts."=>"10.10.10.10:5601", "var.input.udp.port"=>2055}] (default: [])
[2017-12-15T11:13:13,090][DEBUG][logstash.runner ] modules_setup: false
[2017-12-15T11:13:13,090][DEBUG][logstash.runner ] config.test_and_exit: false
[2017-12-15T11:13:13,090][DEBUG][logstash.runner ] config.reload.automatic: false
[2017-12-15T11:13:13,090][DEBUG][logstash.runner ] config.reload.interval: 3000000000
[2017-12-15T11:13:13,090][DEBUG][logstash.runner ] config.support_escapes: false
[2017-12-15T11:13:13,091][DEBUG][logstash.runner ] metric.collect: true
[2017-12-15T11:13:13,091][DEBUG][logstash.runner ] pipeline.id: "main"
[2017-12-15T11:13:13,091][DEBUG][logstash.runner ] pipeline.system: false
[2017-12-15T11:13:13,091][DEBUG][logstash.runner ] pipeline.workers: 6
[2017-12-15T11:13:13,091][DEBUG][logstash.runner ] pipeline.output.workers: 1
[2017-12-15T11:13:13,091][DEBUG][logstash.runner ] pipeline.batch.size: 125
[2017-12-15T11:13:13,091][DEBUG][logstash.runner ] pipeline.batch.delay: 5
[2017-12-15T11:13:13,091][DEBUG][logstash.runner ] pipeline.unsafe_shutdown: false
[2017-12-15T11:13:13,092][DEBUG][logstash.runner ] pipeline.reloadable: true
[2017-12-15T11:13:13,092][DEBUG][logstash.runner ] path.plugins: []
[2017-12-15T11:13:13,092][DEBUG][logstash.runner ] *config.debug: true (default: false)
[2017-12-15T11:13:13,092][DEBUG][logstash.runner ] *log.level: "debug" (default: "info")
[2017-12-15T11:13:13,092][DEBUG][logstash.runner ] version: false
[2017-12-15T11:13:13,092][DEBUG][logstash.runner ] help: false
[2017-12-15T11:13:13,092][DEBUG][logstash.runner ] log.format: "plain"
[2017-12-15T11:13:13,092][DEBUG][logstash.runner ] http.host: "127.0.0.1"
[2017-12-15T11:13:13,092][DEBUG][logstash.runner ] http.port: 9600..9700
[2017-12-15T11:13:13,092][DEBUG][logstash.runner ] http.environment: "production"
[2017-12-15T11:13:13,092][DEBUG][logstash.runner ] queue.type: "memory"
[2017-12-15T11:13:13,092][DEBUG][logstash.runner ] queue.drain: false
[2017-12-15T11:13:13,092][DEBUG][logstash.runner ] queue.page_capacity: 262144000
[2017-12-15T11:13:13,092][DEBUG][logstash.runner ] queue.max_bytes: 1073741824
[2017-12-15T11:13:13,092][DEBUG][logstash.runner ] queue.max_events: 0
[2017-12-15T11:13:13,093][DEBUG][logstash.runner ] queue.checkpoint.acks: 1024
[2017-12-15T11:13:13,093][DEBUG][logstash.runner ] queue.checkpoint.writes: 1024
[2017-12-15T11:13:13,093][DEBUG][logstash.runner ] queue.checkpoint.interval: 1000
[2017-12-15T11:13:13,093][DEBUG][logstash.runner ] dead_letter_queue.enable: false
[2017-12-15T11:13:13,093][DEBUG][logstash.runner ] dead_letter_queue.max_bytes: 1073741824
[2017-12-15T11:13:13,093][DEBUG][logstash.runner ] slowlog.threshold.warn: -1
[2017-12-15T11:13:13,093][DEBUG][logstash.runner ] slowlog.threshold.info: -1
[2017-12-15T11:13:13,093][DEBUG][logstash.runner ] slowlog.threshold.debug: -1
[2017-12-15T11:13:13,093][DEBUG][logstash.runner ] slowlog.threshold.trace: -1
[2017-12-15T11:13:13,093][DEBUG][logstash.runner ] *path.queue: "/data/logstash/queue" (default: "/data/elk/logstash-6.0.1/data/queue")
[2017-12-15T11:13:13,093][DEBUG][logstash.runner ] *path.dead_letter_queue: "/data/logstash/dead_letter_queue" (default: "/data/elk/logstash-6.0.1/data/dead_letter_queue")
[2017-12-15T11:13:13,093][DEBUG][logstash.runner ] *path.settings: "/etc/logstash" (default: "/data/elk/logstash-6.0.1/config")
[2017-12-15T11:13:13,093][DEBUG][logstash.runner ] *path.logs: "/data/log/logstash" (default: "/data/elk/logstash-6.0.1/logs")
[2017-12-15T11:13:13,093][DEBUG][logstash.runner ] --------------- Logstash Settings -------------------
[2017-12-15T11:13:13,101][DEBUG][logstash.config.source.multilocal] Reading pipeline configurations from YAML {:location=>"/etc/logstash/pipelines.yml"}
[2017-12-15T11:13:13,104][WARN ][logstash.config.source.multilocal] Ignoring the 'pipelines.yml' file because modules or command line options are specified
[2017-12-15T11:13:13,125][DEBUG][logstash.agent ] Agent: Configuring metric collection
[2017-12-15T11:13:13,132][DEBUG][logstash.instrument.periodicpoller.os] PeriodicPoller: Starting {:polling_interval=>5, :polling_timeout=>120}
[2017-12-15T11:13:13,135][DEBUG][logstash.instrument.periodicpoller.cgroup] Error, cannot retrieve cgroups information {:exception=>"Errno::ENOENT", :message=>"No such file or directory - /sys/fs/cgroup/cpuacct/user/11705.user/17.session/cpu.cfs_period_us"}
[2017-12-15T11:13:13,154][DEBUG][logstash.instrument.periodicpoller.jvm] PeriodicPoller: Starting {:polling_interval=>5, :polling_timeout=>120}
[2017-12-15T11:13:13,198][DEBUG][logstash.instrument.periodicpoller.persistentqueue] PeriodicPoller: Starting {:polling_interval=>5, :polling_timeout=>120}
[2017-12-15T11:13:13,201][DEBUG][logstash.instrument.periodicpoller.deadletterqueue] PeriodicPoller: Starting {:polling_interval=>5, :polling_timeout=>120}
[2017-12-15T11:13:13,209][DEBUG][logstash.agent ] starting agent
[2017-12-15T11:13:13,214][DEBUG][logstash.agent ] Starting puma
[2017-12-15T11:13:13,220][DEBUG][logstash.agent ] Trying to start WebServer {:port=>9600}
[2017-12-15T11:13:13,223][DEBUG][logstash.config.modulescommon] Specified modules {:modules_array=>"[{\"name\"=>\"netflow\", \"var.elasticsearche.hosts\"=>\"localhost:9200\", \"var.kibana.hosts.\"=>\"10.10.10.10:5601\", \"var.input.udp.port\"=>2055}]"}
[2017-12-15T11:13:13,232][DEBUG][logstash.api.service ] [api-service] start
[2017-12-15T11:13:13,282][INFO ][logstash.agent ] Successfully started Logstash API endpoint {:port=>9600}
[2017-12-15T11:13:13,297][DEBUG][logstash.agent ] Converging pipelines
[2017-12-15T11:13:13,298][DEBUG][logstash.agent ] Needed actions to converge {:actions_count=>1}
[2017-12-15T11:13:13,300][DEBUG][logstash.agent ] Executing action {:action=>LogStash::PipelineAction::Create/pipeline_id:module-netflow}
[2017-12-15T11:13:18,162][DEBUG][logstash.instrument.periodicpoller.cgroup] Error, cannot retrieve cgroups information {:exception=>"Errno::ENOENT", :message=>"No such file or directory - /sys/fs/cgroup/cpuacct/user/11705.user/17.session/cpu.cfs_period_us"}
Sign up for free to join this conversation on GitHub. Already have an account? Sign in to comment