Skip to content

Instantly share code, notes, and snippets.

@mingder78
Created December 7, 2016 15:23
Show Gist options
  • Star 0 You must be signed in to star a gist
  • Fork 0 You must be signed in to fork a gist
  • Save mingder78/335ccd66deafd809d2dbde8fbd41f6c7 to your computer and use it in GitHub Desktop.
Save mingder78/335ccd66deafd809d2dbde8fbd41f6c7 to your computer and use it in GitHub Desktop.
elasticsearch cluster installed by chef
[mwang@ELKServer02-tw elasticsearch]$ ls
elasticsearch.yml logging.yml scripts
[mwang@ELKServer02-tw elasticsearch]$ vi elasticsearch.yml
[mwang@ELKServer02-tw elasticsearch]$ cat *yml
index.number_of_shards: 3
index.number_of_replicas: 1
threadpool.search.queue_size: 10000
network.host: 0.0.0.0
# ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
# THIS FILE IS MANAGED BY CHEF, DO NOT EDIT MANUALLY, YOUR CHANGES WILL BE OVERWRITTEN!
# ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
#
# See the source file for context and more information:
# <https://github.com/elastic/elasticsearch/blob/master/core/config/elasticsearch.yml>
#
# See the documentation for further information on configuration options:
# <http://www.elastic.co/guide/en/elasticsearch/reference/current/setup-configuration.html>
#
# To set configurations not exposed by this template, set the `configuration`
# parameter in your `elasticsearch_configure` resource, as a hash of dotted keys
# and string values.
#
# // ...
# 'threadpool.index.type' => 'fixed',
# 'threadpool.index.size' => '2'
# // ...
#
# ---------------------------------- Cluster -----------------------------------
#
cluster.name: elasticsearch-wits-tw
node.master: true
node.data: true
#
# ------------------------------------ Node ------------------------------------
#
node.name: ELKServer02-tw
node.max_local_storage_nodes: 1
#
# ----------------------------------- Paths ------------------------------------
#
path.conf: /etc/elasticsearch
path.data: /diskZ/var/es_indexes
path.logs: /diskZ/var/log/elasticsearch
#
# ----------------------------------- Memory -----------------------------------
#
#
# ---------------------------------- Network -----------------------------------
#
http.port: 9200
#
# ---------------------------------- Gateway -----------------------------------
#
#
# --------------------------------- Discovery ----------------------------------
#
discovery.zen.ping.multicast.enabled: false
discovery.zen.minimum_master_nodes: 1
discovery.zen.ping.unicast.hosts: ["ELKServer02-tw", "ELKServer01-tw:9301"]
#
# ---------------------------------- Various -----------------------------------
#
action.destructive_requires_name: true
#
# -------------------------- Custom Chef Configuration --------------------------
#
gateway.expected_nodes: 0
//------------------ logging.yml
# ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
# THIS FILE IS MANAGED BY CHEF, DO NOT EDIT MANUALLY, YOUR CHANGES WILL BE OVERWRITTEN!
# ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
#
# See the source file for context and more information:
# <https://github.com/elastic/cookbook-elasticsearch/blob/master/templates/default/logging.yml.erb>
#
# You may also supply your own template to the elasticsearch cookbook's
# elasticsearch_configure resource using the template_logging_yml and cookbook_logging_yml
# parameters defined here:
# <https://github.com/elastic/cookbook-elasticsearch/blob/master/libraries/resource_configure.rb#L48-L49>
#
# you can override this using by setting a system property, for example -Des.logger.level=DEBUG
es.logger.level: INFO
rootLogger: ${es.logger.level}, console, file
logger:
# log action execution errors for easier debugging
action: DEBUG
# deprecation logging, turn to DEBUG to see them
deprecation: INFO, deprecation_log_file
# reduce the logging for aws, too much is logged under the default INFO
com.amazonaws: WARN
# aws will try to do some sketchy JMX stuff, but its not needed.
com.amazonaws.jmx.SdkMBeanRegistrySupport: ERROR
com.amazonaws.metrics.AwsSdkMetrics: ERROR
org.apache.http: INFO
# gateway
#gateway: DEBUG
#index.gateway: DEBUG
# peer shard recovery
#indices.recovery: DEBUG
# discovery
#discovery: TRACE
index.search.slowlog: TRACE, index_search_slow_log_file
index.indexing.slowlog: TRACE, index_indexing_slow_log_file
# ----- Configuration set by Chef ---------------------------------------------
# -----------------------------------------------------------------------------
additivity:
index.search.slowlog: false
index.indexing.slowlog: false
deprecation: false
appender:
console:
type: console
layout:
type: consolePattern
conversionPattern: "[%d{ISO8601}][%-5p][%-25c] %m%n"
file:
type: dailyRollingFile
file: ${path.logs}/${cluster.name}.log
datePattern: "'.'yyyy-MM-dd"
layout:
type: pattern
conversionPattern: "[%d{ISO8601}][%-5p][%-25c] %.10000m%n"
# Use the following log4j-extras RollingFileAppender to enable gzip compression of log files.
# For more information see https://logging.apache.org/log4j/extras/apidocs/org/apache/log4j/rolling/RollingFileAppender.html
#file:
#type: extrasRollingFile
#file: ${path.logs}/${cluster.name}.log
#rollingPolicy: timeBased
#rollingPolicy.FileNamePattern: ${path.logs}/${cluster.name}.log.%d{yyyy-MM-dd}.gz
#layout:
#type: pattern
#conversionPattern: "[%d{ISO8601}][%-5p][%-25c] %m%n"
deprecation_log_file:
type: dailyRollingFile
file: ${path.logs}/${cluster.name}_deprecation.log
datePattern: "'.'yyyy-MM-dd"
layout:
type: pattern
conversionPattern: "[%d{ISO8601}][%-5p][%-25c] %m%n"
index_search_slow_log_file:
type: dailyRollingFile
file: ${path.logs}/${cluster.name}_index_search_slowlog.log
datePattern: "'.'yyyy-MM-dd"
layout:
type: pattern
conversionPattern: "[%d{ISO8601}][%-5p][%-25c] %m%n"
index_indexing_slow_log_file:
type: dailyRollingFile
file: ${path.logs}/${cluster.name}_index_indexing_slowlog.log
datePattern: "'.'yyyy-MM-dd"
layout:
type: pattern
conversionPattern: "[%d{ISO8601}][%-5p][%-25c] %m%n"
Sign up for free to join this conversation on GitHub. Already have an account? Sign in to comment