This file contains bidirectional Unicode text that may be interpreted or compiled differently than what appears below. To review, open the file in an editor that reveals hidden Unicode characters.
Learn more about bidirectional Unicode characters
daemonize yes | |
pidfile /var/run/redis_util.pid | |
port 6379 | |
# Close the connection after a client is idle for N seconds (0 to disable) | |
timeout 60 | |
# Set server verbosity to 'debug' | |
# it can be one of: | |
# debug (a lot of information, useful for development/testing) | |
# notice (moderately verbose, what you want in production probably) | |
# warning (only very important / critical messages are logged) | |
loglevel warning | |
# Specify the log file name. Also 'stdout' can be used to force | |
# the demon to log on the standard output. Note that if you use standard | |
# output for logging but daemonize, logs will be sent to /dev/null | |
logfile /data/redis/redis.log | |
# Set the number of databases. The default database is DB 0, you can select | |
# a different one on a per-connection basis using SELECT <dbid> where | |
# dbid is a number between 0 and 'databases'-1 | |
databases 16 | |
################################ SNAPSHOTTING ################################# | |
# | |
# Save the DB on disk: | |
# | |
# save <seconds> <changes> | |
# | |
# Will save the DB if both the given number of seconds and the given | |
# number of write operations against the DB occurred. | |
# | |
# In the example below the behaviour will be to save: | |
# after 900 sec (15 min) if at least 1 key changed | |
# after 300 sec (5 min) if at least 10 keys changed | |
# after 60 sec if at least 10000 keys changed | |
#save 900 1 | |
#save 300 10 | |
#save 60 10000 | |
# Compress string objects using LZF when dump .rdb databases? | |
# For default that's set to 'yes' as it's almost always a win. | |
# If you want to save some CPU in the saving child set it to 'no' but | |
# the dataset will likely be bigger if you have compressible values or keys. | |
# FOR 1.2.1 | |
rdbcompression yes | |
# The filename where to dump the DB | |
dbfilename redis_state.rdb | |
# For default save/load DB in/from the working directory | |
dir /data/redis | |
################################# REPLICATION ################################# | |
# Master-Slave replication. Use slaveof to make a Redis instance a copy of | |
# another Redis server. Note that the configuration is local to the slave | |
# so for example it is possible to configure the slave to save the DB with a | |
# different interval, or to listen to another port, and so on. | |
# | |
# slaveof <masterip> <masterport> | |
# If the master is password protected (using the "requirepass" configuration | |
# directive below) it is possible to tell the slave to authenticate before | |
# starting the replication synchronization process, otherwise the master will | |
# refuse the slave request. | |
# | |
# masterauth <master-password> | |
# When a slave lost the connection with the master, or when the replication | |
# is still in progress, the slave can act in two different ways: | |
# | |
# 1) if slave-serve-stale-data is set to 'yes' (the default) the slave will | |
# still reply to client requests, possibly with out of data data, or the | |
# data set may just be empty if this is the first synchronization. | |
# | |
# 2) if slave-serve-stale data is set to 'no' the slave will reply with | |
# an error "SYNC with master in progress" to all the kind of commands | |
# but to INFO and SLAVEOF. | |
# | |
slave-serve-stale-data yes | |
maxmemory 16500mb | |
maxmemory-policy allkeys-lru | |
maxmemory-samples 10 | |
############################## APPEND ONLY MODE ############################### | |
# By default Redis asynchronously dumps the dataset on disk. If you can live | |
# with the idea that the latest records will be lost if something like a crash | |
# happens this is the preferred way to run Redis. If instead you care a lot | |
# about your data and don't want to that a single record can get lost you should | |
# enable the append only mode: when this mode is enabled Redis will append | |
# every write operation received in the file appendonly.aof. This file will | |
# be read on startup in order to rebuild the full dataset in memory. | |
# | |
# Note that you can have both the async dumps and the append only file if you | |
# like (you have to comment the "save" statements above to disable the dumps). | |
# Still if append only mode is enabled Redis will load the data from the | |
# log file at startup ignoring the dump.rdb file. | |
# | |
# IMPORTANT: Check the BGREWRITEAOF to check how to rewrite the append | |
# log file in background when it gets too big. | |
appendonly yes | |
# The name of the append only file (default: "appendonly.aof") | |
appendfilename appendonly.aof | |
appendfsync no | |
no-appendfsync-on-rewrite no | |
# Automatic rewrite of the append only file. | |
# Redis is able to automatically rewrite the log file implicitly calling | |
# BGREWRITEAOF when the AOF log size will growth by the specified percentage. | |
# | |
# This is how it works: Redis remembers the size of the AOF file after the | |
# latest rewrite (or if no rewrite happened since the restart, the size of | |
# the AOF at startup is used). | |
# | |
# This base size is compared to the current size. If the current size is | |
# bigger than the specified percentage, the rewrite is triggered. Also | |
# you need to specify a minimal size for the AOF file to be rewritten, this | |
# is useful to avoid rewriting the AOF file even if the percentage increase | |
# is reached but it is still pretty small. | |
# | |
# Specify a percentage of zero in order to disable the automatic AOF | |
# rewrite feature. | |
auto-aof-rewrite-percentage 100 | |
auto-aof-rewrite-min-size 1gb | |
################################ LUA SCRIPTING ############################### | |
# Max execution time of a Lua script in milliseconds. | |
# | |
# If the maximum execution time is reached Redis will log that a script is | |
# still in execution after the maximum allowed time and will start to | |
# reply to queries with an error. | |
# | |
# When a long running script exceed the maximum execution time only the | |
# SCRIPT KILL and SHUTDOWN NOSAVE commands are available. The first can be | |
# used to stop a script that did not yet called write commands. The second | |
# is the only way to shut down the server in the case a write commands was | |
# already issue by the script but the user don't want to wait for the natural | |
# termination of the script. | |
# | |
# Set it to 0 or a negative value for unlimited execution without warnings. | |
#lua-time-limit 5000 | |
################################ REDIS CLUSTER ############################### | |
# | |
# Normal Redis instances can't be part of a Redis Cluster, only nodes that are | |
# started as cluster nodes can. In order to start a Redis instance as a | |
# cluster node enable the cluster support uncommenting the following: | |
# | |
# cluster-enabled yes | |
# Every cluster node has a cluster configuration file. This file is not | |
# intended to be edited by hand. It is created and updated by Redis nodes. | |
# Every Redis Cluster node requires a different cluster configuration file. | |
# Make sure that instances running in the same system does not have | |
# overlapping cluster configuration file names. | |
# | |
# cluster-config-file nodes-6379.conf | |
# In order to setup your cluster make sure to read the documentation | |
# available at http://redis.io web site. | |
################################## SLOW LOG ################################### | |
# The Redis Slow Log is a system to log queries that exceeded a specified | |
# execution time. The execution time does not include the I/O operations | |
# like talking with the client, sending the reply and so forth, | |
# but just the time needed to actually execute the command (this is the only | |
# stage of command execution where the thread is blocked and can not serve | |
# other requests in the meantime). | |
# | |
# You can configure the slow log with two parameters: one tells Redis | |
# what is the execution time, in microseconds, to exceed in order for the | |
# command to get logged, and the other parameter is the length of the | |
# slow log. When a new command is logged the oldest one is removed from the | |
# queue of logged commands. | |
# The following time is expressed in microseconds, so 1000000 is equivalent | |
# to one second. Note that a negative number disables the slow log, while | |
# a value of zero forces the logging of every command. | |
slowlog-log-slower-than 10000 | |
# There is no limit to this length. Just be aware that it will consume memory. | |
# You can reclaim memory used by the slow log with SLOWLOG RESET. | |
slowlog-max-len 1024 | |
vm-enabled no | |
############################### ADVANCED CONFIG ############################### | |
# Hashes are encoded in a special way (much more memory efficient) when they | |
# have at max a given numer of elements, and the biggest element does not | |
# exceed a given threshold. You can configure this limits with the following | |
# configuration directives. | |
hash-max-zipmap-entries 512 | |
hash-max-zipmap-value 64 | |
# Similarly to hashes, small lists are also encoded in a special way in order | |
# to save a lot of space. The special representation is only used when | |
# you are under the following limits: | |
list-max-ziplist-entries 512 | |
list-max-ziplist-value 64 | |
# Sets have a special encoding in just one case: when a set is composed | |
# of just strings that happens to be integers in radix 10 in the range | |
# of 64 bit signed integers. | |
# The following configuration setting sets the limit in the size of the | |
# set in order to use this special memory saving encoding. | |
set-max-intset-entries 512 | |
# Active rehashing uses 1 millisecond every 100 milliseconds of CPU time in | |
# order to help rehashing the main Redis hash table (the one mapping top-level | |
# keys to values). The hash table implementation redis uses (see dict.c) | |
# performs a lazy rehashing: the more operation you run into an hash table | |
# that is rhashing, the more rehashing "steps" are performed, so if the | |
# server is idle the rehashing is never complete and some more memory is used | |
# by the hash table. | |
# | |
# The default is to use this millisecond 10 times every second in order to | |
# active rehashing the main dictionaries, freeing memory when possible. | |
# | |
# If unsure: | |
# use "activerehashing no" if you have hard latency requirements and it is | |
# not a good thing in your environment that Redis can reply form time to time | |
# to queries with 2 milliseconds delay. | |
# | |
# use "activerehashing yes" if you don't have such hard requirements but | |
# want to free memory asap when possible. | |
activerehashing yes | |
################################## INCLUDES ################################### | |
# Include one or more other config files here. This is useful if you | |
# have a standard template that goes to all redis server but also need | |
# to customize a few per-server settings. Include files can include | |
# other files, so use this wisely. | |
# | |
# include /path/to/local.conf | |
# include /path/to/other.conf |
Sign up for free
to join this conversation on GitHub.
Already have an account?
Sign in to comment