Skip to content

Instantly share code, notes, and snippets.

@reiddraper
Created October 24, 2013 16:23
Show Gist options
  • Save reiddraper/e44b72a1bb6658bf3a30 to your computer and use it in GitHub Desktop.
Save reiddraper/e44b72a1bb6658bf3a30 to your computer and use it in GitHub Desktop.
## The enabled Yokozuna set this 'on'.
yokozuna = off
## The port number which Solr binds to.
yokozuna.solr_port = 10014
## The port number which Solr JMX binds to.
yokozuna.solr_jmx_port = 10013
## The arguments to pass to the Solr JVM. Non-standard
## arguments, i.e. -XX, may not be portable across JVM
## implementations. E.g. -XX:+UseCompressedStrings.
yokozuna.solr_jvm_args = -Xms1g -Xmx1g -XX:+UseStringCache -XX:+UseCompressedOops
## The data under which to store all Yokozuna related data.
## Including the Solr index data.
yokozuna.data_dir = ./data/yz
## Path (relative or absolute) to the working directory for
## the replication process
mdc.data_root = ./data/riak_repl/
## The cluster manager will listen for connections from remote
## clusters on this ip and port. Every node runs one cluster manager,
## but only the cluster manager running on the cluster_leader will
## service requests. This can change as nodes enter and leave the
## cluster. The value is a combination of an IP address
## (**not hostname**) followed by a port number
mdc.cluster_manager = 127.0.0.1:10016
## The hard limit of fullsync workers that will be running on the
## source side of a cluster across all nodes on that cluster for a
## fullsync to a sink cluster. This means if one has configured fullsync
## for two different clusters, both with a max_fssource_cluster of 5, 10
## fullsync workers can be in progress. Only affects nodes on the source
## cluster on which this parameter is defined via the configuration
## file or command line
mdc.max_fssource_cluster = 5
## Limits the number of fullsync workers that will be running on
## each individual node in a source cluster. This is a hard limit for
## all fullsyncs enabled; additional fullsync configurations will not
## increase the number of fullsync workers allowed to run on any node.
## Only affects nodes on the source cluster on which this parameter
## is defined via the configuration file or command line
mdc.max_fssource_node = 1
## Limits the number of fullsync workers allowed to run on each
## individual node in a sink cluster. This is a hard limit for all
## fullsync sources interacting with the sink cluster. Thus, multiple
## simultaneous source connections to the sink cluster will have to
## share the sink node's number of maximum connections. Only affects
## nodes on the sink cluster on which this parameter is defined via
## the configuration file or command line.
mdc.max_fssink_node = 1
## Whether to initiate a fullsync on initial connection from
## the secondary cluster
mdc.fullsync_on_connect = true
## a single integer value representing the duration to wait
## in minutes between fullsyncs, or a list of
## {clustername, time_in_minutes} pairs for each sink
## participating in fullsync replication.
## mdc.fullsync_interval.all = 30m
## The maximum size the realtime replication queue can grow to
## before new objects are dropped. Defaults to 100MB. Dropped objects
## will need to be replication with a fullsync.
mdc.rtq_max_bytes = 100MB
## Enable Riak CS proxy_get and block filter.
mdc.proxy_get = off
## A heartbeat message is sent from the source to the sink every
## heartbeat_interval. Setting heartbeat_interval to
## undefined disables the realtime heartbeat. This feature is only
## available in Riak Enterprise 1.3.2+.
mdc.realtime.heartbeat_interval = 15s
## If a heartbeat response is not received in rt_heartbeat_timeout
## seconds, then the source connection exits and will be re-established.
## This feature is only available in Riak Enterprise 1.3.2+.
mdc.realtime.heartbeat_timeout = 15s
## To force Riak to reload SNMP configuration files on startup
snmp.force_reload = true
## Polling interval for stats gathering
snmp.polling_interval = 1m
## Enable or disable replication traps.
snmp.repl_traps = off
## Alarm thresholds for specific stats (microseconds). Each threshold
## name below matches its SNMP object name. The default value of 0
## disables alarms for that stat.
snmp.nodeGetTimeMeanThreshold = 0
snmp.nodeGetTimeMedianThreshold = 0
snmp.nodeGetTime95Threshold = 0
snmp.nodeGetTime99Threshold = 0
snmp.nodeGetTime100Threshold = 0
snmp.nodePutTimeMeanThreshold = 0
snmp.nodePutTimeMedianThreshold = 0
snmp.nodePutTime95Threshold = 0
snmp.nodePutTime99Threshold = 0
snmp.nodePutTime100Threshold = 0
## Java Management Extensions for Riak
## on - turn it on
jmx = off
## leveldb data_root
leveldb.data_root = ./data/leveldb
## This parameter defines the percentage, 1 to 100, of total
## server memory to assign to leveldb. leveldb will dynamically
## adjust it internal cache sizes as Riak activates / inactivates
## vnodes on this server to stay within this size. The memory size
## can alternately be assigned as a byte count via total_leveldb_mem instead.
leveldb.total_leveldb_mem_percent = 90
## Each database .sst table file can include an optional "bloom filter"
## that is highly effective in shortcutting data queries that are destined
## to not find the requested key. The bloom_filter typically increases the
## size of an .sst table file by about 2%. This option must be set to true
## in the riak.conf to take effect.
leveldb.bloomfilter = on
## Set to 'off' to disable the admin panel.
riak_control = off
## Authentication style used for access to the admin
## panel. Valid styles are: off, userlist
riak_control.auth = userlist
## If auth is set to 'userlist' then this is the
## list of usernames and passwords for access to the
## admin panel.
riak_control.user.user.password = pass
## bitcask data root
bitcask.data_root = ./data/bitcask
## Configure how Bitcask writes data to disk.
## erlang: Erlang's built-in file API
## nif: Direct calls to the POSIX C API
## The NIF mode provides higher throughput for certain
## workloads, but has the potential to negatively impact
## the Erlang VM, leading to higher worst-case latencies
## and possible throughput collapse.
bitcask.io_mode = erlang
## enable active anti-entropy subsystem
anti_entropy = on
## Storage_backend specifies the Erlang module defining the storage
## mechanism that will be used on this node.
# storage_backend = bitcask
## raw_name is the first part of all URLS used by the Riak raw HTTP
## interface. See riak_web.erl and raw_http_resource.erl for
## details.
## raw_name = riak
## Restrict how fast AAE can build hash trees. Building the tree
## for a given partition requires a full scan over that partition's
## data. Once built, trees stay built until they are expired.
## Config is of the form:
## {num-builds, per-timespan}
## Default is 1 build per hour.
anti_entropy.build_limit.number = 1
anti_entropy.build_limit.per_timespan = 1h
## Determine how often hash trees are expired after being built.
## Periodically expiring a hash tree ensures the on-disk hash tree
## data stays consistent with the actual k/v backend data. It also
## helps Riak identify silent disk failures and bit rot. However,
## expiration is not needed for normal AAE operation and should be
## infrequent for performance reasons. The time is specified in
## milliseconds. The default is 1 week.
anti_entropy.expire = 1w
## Limit how many AAE exchanges/builds can happen concurrently.
anti_entropy.concurrency = 2
## The tick determines how often the AAE manager looks for work
## to do (building/expiring trees, triggering exchanges, etc).
## The default is every 15 seconds. Lowering this value will
## speedup the rate that all replicas are synced across the cluster.
## Increasing the value is not recommended.
anti_entropy.tick = 15s
## The directory where AAE hash trees are stored.
anti_entropy.data_dir = ./data/anti_entropy
## The LevelDB options used by AAE to generate the LevelDB-backed
## on-disk hashtrees.
anti_entropy.write_buffer_size = 4MB
anti_entropy.max_open_files = 20
## mapred_name is URL used to submit map/reduce requests to Riak.
mapred_name = mapred
## mapred_2i_pipe indicates whether secondary-index
## MapReduce inputs are queued in parallel via their own
## pipe ('true'), or serially via a helper process
## ('false' or undefined). Set to 'false' or leave
## undefined during a rolling upgrade from 1.0.
mapred_2i_pipe = on
## Each of the following entries control how many Javascript
## virtual machines are available for executing map, reduce,
## pre- and post-commit hook functions.
javascript_vm.map_count = 8
javascript_vm.reduce_count = 6
javascript_vm.hook_count = 2
## js_max_vm_mem is the maximum amount of memory, in megabytes,
## allocated to the Javascript VMs. If unset, the default is
## 8MB.
javascript_vm.max_vm_mem = 8
## js_thread_stack is the maximum amount of thread stack, in megabyes,
## allocate to the Javascript VMs. If unset, the default is 16MB.
## NOTE: This is not the same as the C thread stack.
javascript_vm.thread_stack = 16
## js_source_dir should point to a directory containing Javascript
## source files which will be loaded by Riak when it initializes
## Javascript VMs.
## javascript_vm.source_dir = /tmp/js_source
## http_url_encoding determines how Riak treats URL encoded
## buckets, keys, and links over the REST API. When set to 'on'
## Riak always decodes encoded values sent as URLs and Headers.
## Otherwise, Riak defaults to compatibility mode where links
## are decoded, but buckets and keys are not. The compatibility
## mode will be removed in a future release.
http_url_encoding = on
## Switch to vnode-based vclocks rather than client ids. This
## significantly reduces the number of vclock entries.
## Only set on if *all* nodes in the cluster are upgraded to 1.0
vnode_vclocks = on
## This option toggles compatibility of keylisting with 1.0
## and earlier versions. Once a rolling upgrade to a version
## > 1.0 is completed for a cluster, this should be set to
## true for better control of memory usage during key listing
## operations
listkeys_backpressure = on
## This option specifies how many of each type of fsm may exist
## concurrently. This is for overload protection and is a new
## mechanism that obsoletes 1.3's health checks. Note that this number
## represents two potential processes, so +P in vm.args should be at
## least 3X the fsm_limit.
fsm_limit = 50000
## retry_put_coordinator_failure will enable/disable the
## 'retry_put_coordinator_failure' per-operation option of the
## put FSM.
## on = Riak 2.0 behavior (strongly recommended)
## off = Riak 1.x behavior
retry_put_coordinator_failure = on
## object_format controls which binary representation of a riak_object
## is stored on disk.
## Current options are: v0, v1.
## v0: Original erlang:term_to_binary format. Higher space overhead.
## v1: New format for more compact storage of small values.
object_format = v1
## listener.http.<name> is an IP address and TCP port that the Riak
## HTTP interface will bind.
listener.http.internal = 127.0.0.1:10018
## listener.protobuf.<name> is an IP address and TCP port that the Riak
## Protocol Buffers interface will bind.
listener.protobuf.internal = 127.0.0.1:10017
## pb_backlog is the maximum length to which the queue of pending
## connections may grow. If set, it must be an integer >= 0.
## By default the value is 5. If you anticipate a huge number of
## connections being initialised *simultaneously*, set this number
## higher.
## protobuf.backlog = 64
## listener.https.<name> is an IP address and TCP port that the Riak
## HTTPS interface will bind.
## listener.https.internal = 127.0.0.1:10018
## Default ring creation size. Make sure it is a power of 2,
## e.g. 16, 32, 64, 128, 256, 512 etc
## ring_size = 64
## Default location of ringstate
ring.state_dir = ./data/ring
## Default cert location for https can be overridden
## with the ssl config variable, for example:
## ssl.certfile = ./etc/cert.pem
## Default key location for https can be overridden
## with the ssl config variable, for example:
## ssl.keyfile = ./etc/key.pem
## Default signing authority location for https can be overridden
## with the ssl config variable, for example:
## ssl.cacertfile = ./etc/cacertfile.pem
## handoff.port is the TCP port that Riak uses for
## intra-cluster data handoff.
handoff.port = 10019
## To encrypt riak_core intra-cluster data handoff traffic,
## uncomment the following line and edit its path to an
## appropriate certfile and keyfile. (This example uses a
## single file with both items concatenated together.)
## handoff.ssl.certfile = /tmp/erlserver.pem
## DTrace support
## Do not enable 'dtrace' unless your Erlang/OTP
## runtime is compiled to support DTrace. DTrace is
## available in R15B01 (supported by the Erlang/OTP
## official source package) and in R14B04 via a custom
## source repository & branch.
dtrace = off
platform_bin_dir = ./bin
platform_data_dir = ./data
platform_etc_dir = ./etc
platform_lib_dir = ./lib
platform_log_dir = ./log
## where do you want the console.log output:
## off : nowhere
## file: the file specified by log.console.file
## console : standard out
## both : log.console.file and standard out.
log.console = both
## the log level of the console log
log.console.level = info
## location of the console log
log.console.file = ./log/console.log
## location of the error log
log.error.file = ./log/error.log
## turn on syslog
log.syslog = off
## Whether to write a crash log, and where.
## Commented/omitted/undefined means no crash logger.
log.crash.file = ./log/crash.log
## Maximum size in bytes of events in the crash log - defaults to 65536
log.crash.msg_size = 64KB
## Maximum size of the crash log in bytes, before its rotated, set
## to 0 to disable rotation - default is 0
log.crash.size = 10MB
## What time to rotate the crash log - default is no time
## rotation. See the lager README for a description of this format:
## https://github.com/basho/lager/blob/master/README.org
log.crash.date = $D0
## Number of rotated crash logs to keep, 0 means keep only the
## current one - default is 0
log.crash.count = 5
## Whether to redirect error_logger messages into lager - defaults to true
log.error.redirect = on
## maximum number of error_logger messages to handle in a second
## lager 2.0.0 shipped with a limit of 50, which is a little low for riak's startup
log.error.messages_per_second = 100
## Name of the riak node
nodename = dev1@127.0.0.1
## Cookie for distributed node communication. All nodes in the same cluster
## should use the same cookie or they will not be able to communicate.
distributed_cookie = riak
erlang.async_threads = 64
## Increase number of concurrent ports/sockets
erlang.max_ports = 64000
## Set the location of crash dumps
erlang.crash_dump = ./log/erl_crash.dump
## Raise the ETS table limit
erlang.max_ets_tables = 256000
## Raise the default erlang process limit
erlang.process_limit = 256000
## For nodes with many busy_dist_port events, Basho recommends
## raising the sender-side network distribution buffer size.
## 32MB may not be sufficient for some workloads and is a suggested
## starting point.
## The Erlang/OTP default is 1024 (1 megabyte).
## See: http://www.erlang.org/doc/man/erl.html#%2bzdbbl
## erlang.zdbbl = 32768
## Erlang VM scheduler tuning.
## Prerequisite: a patched VM from Basho, or a VM compiled separately
## with this patch applied:
## https://gist.github.com/evanmcc/a599f4c6374338ed672e
## erlang.sfwi = 500
## Reading or writing objects bigger than this size will write
## a warning in the logs.
warn_object_size = 5MB
## Writing an object bigger than this will fail.
max_object_size = 50MB
## Writing an object with more than this number of siblings
## will generate a warning in the logs.
warn_siblings = 25
## Writing an object with more than this number of siblings
## will fail.
max_siblings = 100
Sign up for free to join this conversation on GitHub. Already have an account? Sign in to comment