Skip to content

Instantly share code, notes, and snippets.

@luckylittle
Last active September 21, 2023 03:34
Show Gist options
  • Star 0 You must be signed in to star a gist
  • Fork 0 You must be signed in to fork a gist
  • Save luckylittle/fa882d0c2d5fbec3aa8b83618a64389e to your computer and use it in GitHub Desktop.
Save luckylittle/fa882d0c2d5fbec3aa8b83618a64389e to your computer and use it in GitHub Desktop.
Examples of all Splunk Configuration files (v9.1.0.2)
# Version 9.1.0.2
#
# This is an example alert_actions.conf. Use this file to configure alert
# actions for saved searches.
#
# To use one or more of these configurations, copy the configuration block into
# alert_actions.conf in $SPLUNK_HOME/etc/system/local/. You must restart
# Splunk to enable configurations.
#
# To learn more about configuration files (including precedence) please see the
# documentation located at
# http://docs.splunk.com/Documentation/Splunk/latest/Admin/Aboutconfigurationfiles
[email]
# keep the search artifacts around for 24 hours
ttl = 86400
# if no @ is found in the address the hostname of the current machine is appended
from = splunk
format = table
inline = false
sendresults = true
hostname = CanAccessFromTheWorld.com
command = sendemail "to=$action.email.to$" "server=$action.email.mailserver{default=localhost}$" "from=$action.email.from{default=splunk@localhost}$" "subject=$action.email.subject{recurse=yes}$" "format=$action.email.format{default=csv}$" "sssummary=Saved Search [$name$]: $counttype$($results.count$)" "sslink=$results.url$" "ssquery=$search$" "ssname=$name$" "inline=$action.email.inline{default=False}$" "sendresults=$action.email.sendresults{default=False}$" "sendpdf=$action.email.sendpdf{default=False}$" "pdfview=$action.email.pdfview$" "searchid=$search_id$" "graceful=$graceful{default=True}$" maxinputs="$maxinputs{default=1000}$" maxtime="$action.email.maxtime{default=5m}$"
use_tls = 1
sslVersions = tls1.2
sslVerifyServerCert = true
sslCommonNameToCheck = host1, host2
[rss]
# at most 30 items in the feed
items_count=30
# keep the search artifacts around for 24 hours
ttl = 86400
command = createrss "path=$name$.xml" "name=$name$" "link=$results.url$" "descr=Alert trigger: $name$, results.count=$results.count$ " "count=30" "graceful=$graceful{default=1}$" maxtime="$action.rss.maxtime{default=1m}$"
[summary_index]
# don't need the artifacts anytime after they're in the summary index
ttl = 120
# make sure the following keys are not added to marker (command, ttl, maxresults, _*)
command = summaryindex addtime=true index="$action.summary_index._name{required=yes}$" file="$name$_$#random$.stash" name="$name$" marker="$action.summary_index*{format=$KEY=\\\"$VAL\\\", key_regex="action.summary_index.(?!(?:command|maxresults|ttl|(?:_.*))$)(.*)"}$"
[summary_metric_index]
# don't need the artifacts anytime after they're in the summary index
ttl = 120
# make sure that "mcollect" is the SPL command and has the option "split=allnums"
command = mcollect index="$action.summary_index._name{required=yes}$" file="$name_hash$_$#random$.stash" name="$name$" marker="$action.summary_index*{format=$KEY=\\\"$VAL\\\", key_regex="action.summary_index.(?!(?:command|forceCsvResults|inline|maxresults|maxtime|python\\.version|ttl|track_alert|(?:_.*))$)(.*)"}$" split=allnums $action.summary_index._metric_dims$
[custom_action]
# flag the action as custom alert action
is_custom = 1
# configure appearance in the UI
label = Custom Alert Action
description = Triggers a custom alert action
icon_path = custom_alert.png
# override default script execution
# java.path is a path pointer file in <app>/bin pointing to the actual java executable
alert.execute.cmd = java.path
alert.execute.cmd.arg.1 = -jar
alert.execute.cmd.arg.2 = $SPLUNK_HOME/etc/apps/myapp/bin/custom.jar
alert.execute.cmd.arg.3 = --execute
# Version 9.1.0.2
#
# The following are example app.conf configurations. Configure properties for
# your custom application.
#
# There is NO DEFAULT app.conf.
#
# To use one or more of these configurations, copy the configuration block into
# app.conf in $SPLUNK_HOME/etc/system/local/. You must restart Splunk to
# enable configurations.
#
# To learn more about configuration files (including precedence) please see the
# documentation located at
# http://docs.splunk.com/Documentation/Splunk/latest/Admin/Aboutconfigurationfiles
[launcher]
author=<author of app>
description=<textual description of app>
version=<version of app>
[triggers]
########## Conf-level reload triggers ##########
# Do not force a restart of Splunk Enterprise for state changes of MyApp
# Do not run special code to tell MyApp to reload myconffile.conf
# Apps with custom config files usually pick this option:
reload.myconffile = simple
# Do not force a restart of Splunk Enterprise for state changes of MyApp.
# Splunk Enterprise calls the /admin/myendpoint/_reload method in my custom
# EAI handler.
# Use this advanced option only if MyApp requires custom code to reload
# its configuration when its state changes
reload.myotherconffile = access_endpoints /admin/myendpoint
########## Stanza-level reload triggers ##########
# For any changed inputs.conf stanzas in the newly pushed cluster
# bundle that start with the "monitor" stanza prefix, e.g.
# [monitor://*], invoke the corresponding monitor input reload handler
# as specified, i.e. /data/inputs/monitor/_reload
#
# NOTE: The scripted input reload handler and the http input reload
# will NOT be invoked if the only changed inputs stanzas in the
# newly pushed cluster bundle are monitor inputs.
reload.inputs.monitor = access_endpoints /data/inputs/monitor
reload.inputs.script = access_endpoints /data/inputs/script
reload.inputs.http = access_endpoints /data/inputs/http
# Version 9.1.0.2
#
# This is an example audit.conf. Use this file to configure auditing.
#
# There is NO DEFAULT audit.conf.
#
# To use one or more of these configurations, copy the configuration block into
# audit.conf in $SPLUNK_HOME/etc/system/local/. You must restart Splunk to
# enable configurations.
#
# To learn more about configuration files (including precedence) please see the
# documentation located at
# http://docs.splunk.com/Documentation/Splunk/latest/Admin/Aboutconfigurationfiles
# Version 9.1.0.2
#
# This is an example authentication.conf. authentication.conf is used to
# configure LDAP, Scripted, SAML and Proxy SSO authentication in addition
# to Splunk's native authentication.
#
# To use one of these configurations, copy the configuration block into
# authentication.conf in $SPLUNK_HOME/etc/system/local/. You must reload
# auth in manager or restart Splunk to enable configurations.
#
# To learn more about configuration files (including precedence) please see
# the documentation located at
# http://docs.splunk.com/Documentation/Splunk/latest/Admin/Aboutconfigurationfiles
##### Use just Splunk's built-in authentication (default):
[authentication]
authType = Splunk
##### LDAP examples
#### Basic LDAP configuration example
[authentication]
authType = LDAP
authSettings = ldaphost
[ldaphost]
host = ldaphost.domain.com
port = 389
SSLEnabled = 0
bindDN = cn=Directory Manager
bindDNpassword = password
userBaseDN = ou=People,dc=splunk,dc=com
userBaseFilter = (objectclass=splunkusers)
groupBaseDN = ou=Groups,dc=splunk,dc=com
groupBaseFilter = (objectclass=splunkgroups)
userNameAttribute = uid
realNameAttribute = givenName
groupMappingAttribute = dn
groupMemberAttribute = uniqueMember
groupNameAttribute = cn
timelimit = 10
network_timeout = 15
# This stanza maps roles you have created in authorize.conf to LDAP Groups
[roleMap_ldaphost]
admin = SplunkAdmins
#### Example using the same server as 'ldaphost', but treating each user as
#### their own group
[authentication]
authType = LDAP
authSettings = ldaphost_usergroups
[ldaphost_usergroups]
host = ldaphost.domain.com
port = 389
SSLEnabled = 0
bindDN = cn=Directory Manager
bindDNpassword = password
userBaseDN = ou=People,dc=splunk,dc=com
userBaseFilter = (objectclass=splunkusers)
groupBaseDN = ou=People,dc=splunk,dc=com
groupBaseFilter = (objectclass=splunkusers)
userNameAttribute = uid
realNameAttribute = givenName
groupMappingAttribute = uid
groupMemberAttribute = uid
groupNameAttribute = uid
timelimit = 10
network_timeout = 15
[roleMap_ldaphost_usergroups]
admin = admin_user1;admin_user2;admin_user3;admin_user4
power = power_user1;power_user2
user = user1;user2;user3
#### Sample Configuration for Active Directory (AD)
[authentication]
authSettings = AD
authType = LDAP
[AD]
SSLEnabled = 1
bindDN = ldap_bind@splunksupport.kom
bindDNpassword = ldap_bind_user_password
groupBaseDN = CN=Groups,DC=splunksupport,DC=kom
groupBaseFilter =
groupMappingAttribute = dn
groupMemberAttribute = member
groupNameAttribute = cn
host = ADbogus.splunksupport.kom
port = 636
realNameAttribute = cn
userBaseDN = CN=Users,DC=splunksupport,DC=kom
userBaseFilter =
userNameAttribute = sAMAccountName
timelimit = 15
network_timeout = 20
anonymous_referrals = 0
[roleMap_AD]
admin = SplunkAdmins
power = SplunkPowerUsers
user = SplunkUsers
#### Sample Configuration for Sun LDAP Server
[authentication]
authSettings = SunLDAP
authType = LDAP
[SunLDAP]
SSLEnabled = 0
bindDN = cn=Directory Manager
bindDNpassword = Directory_Manager_Password
groupBaseDN = ou=Groups,dc=splunksupport,dc=com
groupBaseFilter =
groupMappingAttribute = dn
groupMemberAttribute = uniqueMember
groupNameAttribute = cn
host = ldapbogus.splunksupport.com
port = 389
realNameAttribute = givenName
userBaseDN = ou=People,dc=splunksupport,dc=com
userBaseFilter =
userNameAttribute = uid
timelimit = 5
network_timeout = 8
[roleMap_SunLDAP]
admin = SplunkAdmins
power = SplunkPowerUsers
user = SplunkUsers
#### Sample Configuration for OpenLDAP
[authentication]
authSettings = OpenLDAP
authType = LDAP
[OpenLDAP]
bindDN = uid=directory_bind,cn=users,dc=osx,dc=company,dc=com
bindDNpassword = directory_bind_account_password
groupBaseFilter =
groupNameAttribute = cn
SSLEnabled = 0
port = 389
userBaseDN = cn=users,dc=osx,dc=company,dc=com
host = hostname_OR_IP
userBaseFilter =
userNameAttribute = uid
groupMappingAttribute = uid
groupBaseDN = dc=osx,dc=company,dc=com
groupMemberAttribute = memberUid
realNameAttribute = cn
timelimit = 5
network_timeout = 8
dynamicGroupFilter = (objectclass=groupOfURLs)
dynamicMemberAttribute = memberURL
nestedGroups = 1
[roleMap_OpenLDAP]
admin = SplunkAdmins
power = SplunkPowerUsers
user = SplunkUsers
##### Scripted Auth examples
#### The following example is for RADIUS authentication:
[authentication]
authType = Scripted
authSettings = script
[script]
scriptPath = "$SPLUNK_HOME/bin/python" "$SPLUNK_HOME/share/splunk/authScriptSamples/radiusScripted.py"
# Cache results for 1 second per call
[cacheTiming]
userLoginTTL = 1
userInfoTTL = 1
#### The following example works with PAM authentication:
[authentication]
authType = Scripted
authSettings = script
[script]
scriptPath = "$SPLUNK_HOME/bin/python" "$SPLUNK_HOME/share/splunk/authScriptSamples/pamScripted.py"
# Cache results for different times per function
[cacheTiming]
userLoginTTL = 30s
userInfoTTL = 1min
##### SAML auth example
[authentication]
authSettings = samlv2
authType = SAML
[samlv2]
attributeQuerySoapPassword = changeme
attributeQuerySoapUsername = test
entityId = test-splunk
idpAttributeQueryUrl = https://exsso/idp/attrsvc.ssaml2
idpCertPath = /home/splunk/etc/auth/idp.crt
idpSSOUrl = https://exsso/idp/SSO.saml2
idpSLOUrl = https://exsso/idp/SLO.saml2
signAuthnRequest = true
signedAssertion = true
attributeQueryRequestSigned = true
attributeQueryResponseSigned = true
redirectPort = 9332
cipherSuite = TLSv1 MEDIUM:@STRENGTH
nameIdFormat = urn:oasis:names:tc:SAML:1.1:nameid-format:emailAddress
[roleMap_SAML]
admin = SplunkAdmins
power = SplunkPowerUsers
user = all
[userToRoleMap_SAML]
samluser = user::Saml Real Name::samluser@domain.com
[authenticationResponseAttrMap_SAML]
role = "http://schemas.microsoft.com/ws/2008/06/identity/claims/groups"
mail = "http://schemas.xmlsoap.org/ws/2005/05/identity/claims/emailaddress"
realName = "http://schemas.microsoft.com/identity/claims/displayname"
# Multifactor authentication example
[authentication]
externalTwoFactorAuthVendor = duo
externalTwoFactorAuthSettings = duo-mfa
# Duo specific authentication setting example
[duo-mfa]
apiHostname = api-xyz.duosecurity.com
appSecretKey = mustBeARandomStringOfSize40OrLonger
integrationKey = mustBeADuoProvidedStringOfSize20
secretKey = mustBeADuoProvidedStringOfSize40
enableMfaAuthRest = true
##### Proxy SSO auth example
[authentication]
authSettings = my_proxy
authType = ProxySSO
[my_proxy]
excludedUsers = user1,user2
excludedAutoMappedRoles = admin
defaultRoleIfMissing = user
[roleMap_proxySSO]
admin = group1;group2
user = group1;group3
[userToRoleMap_proxySSO]
proxy_user1 = user
proxy_user2 = power;can_delete
[splunk_auth]
minPasswordLength = 8
minPasswordUppercase = 1
minPasswordLowercase = 1
minPasswordSpecial = 1
minPasswordDigit = 0
expirePasswordDays = 90
expireAlertDays = 15
expireUserAccounts = true
forceWeakPasswordChange = false
lockoutUsers = true
lockoutAttempts = 5
lockoutThresholdMins = 5
lockoutMins = 30
enablePasswordHistory = false
passwordHistoryCount = 24
# Version 9.1.0.2
#
# This is an example authorize.conf. Use this file to configure roles and
# capabilities.
#
# To use one or more of these configurations, copy the configuration block
# into authorize.conf in $SPLUNK_HOME/etc/system/local/. You must reload
# auth or restart Splunk to enable configurations.
#
# To learn more about configuration files (including precedence) please see
# the documentation located at
# http://docs.splunk.com/Documentation/Splunk/latest/Admin/Aboutconfigurationfiles
[role_ninja]
rtsearch = enabled
importRoles = user
srchFilter = host=foo
srchIndexesAllowed = *
srchIndexesDefault = mail;main
srchJobsQuota = 8
rtSrchJobsQuota = 8
srchDiskQuota = 500
srchTimeWin = 86400
srchTimeEarliest = 2592000
# This creates the role 'ninja', which inherits capabilities from the 'user'
# role. ninja has almost the same capabilities as power, except cannot
# schedule searches.
#
# The search filter limits ninja to searching on host=foo.
#
# ninja is allowed to search all public indexes (those that do not start
# with underscore), and will search the indexes mail and main if no index is
# specified in the search.
#
# ninja is allowed to run 8 search jobs and 8 real time search jobs
# concurrently (these counts are independent).
#
# ninja is allowed to take up 500 megabytes total on disk for all their jobs.
#
# ninja is allowed to run searches that span a maximum of one day
#
# ninja is allowed to run searches on data that is newer than 30 days ago
# Version 9.1.0.2
# Example: Monitoring console
# User is administrator of 3 Splunk deployments: US security, Global Security,
# and US Applications, and wants convenient access to the monitoring console
# for each.
[bookmarks_mc:US Security]
url = https://us-security.testcorp.example:8000/en-US/app/splunk_monitoring_console/monitoringconsole_overview
[bookmarks_mc:Global Security]
url = https://global-security.testcorp.example:8000/en-US/app/splunk_monitoring_console/monitoringconsole_overview
[bookmarks_mc:US Applications]
url = http://us-applications.testcorp.example:8000/en-US/app/splunk_monitoring_console/monitoringconsole_overview
# Version 9.1.0.2
#
# The following is an example collections.conf configuration.
#
# To use one or more of these configurations, copy the configuration block
# into collections.conf in $SPLUNK_HOME/etc/system/local/. You must restart
# Splunk to enable configurations.
#
# To learn more about configuration files (including precedence) please see
# the documentation located at
# http://docs.splunk.com/Documentation/Splunk/latest/Admin/Aboutconfigurationfiles
# Note this example uses a compound acceleration. Please check collections.conf.spec
# for restrictions on compound acceleration.
[mycollection]
field.foo = number
field.bar = string
accelerated_fields.myacceleration = {"foo": 1, "bar": -1}
# Version 9.1.0.2
#
# This is an example commands.conf. Use this file to configure settings
# for external search commands.
#
# To use one or more of these configurations, copy the configuration block
# into commands.conf in $SPLUNK_HOME/etc/system/local/. You must restart
# Splunk to enable configurations.
#
# To learn more about configuration files (including precedence)
# see the documentation located at
# http://docs.splunk.com/Documentation/Splunk/latest/Admin/Aboutconfigurationfiles
# Note: These are examples. Replace the values with your own
# customizations.
##############
# defaults for all external commands, exceptions are below in
# individual stanzas
# type of script: 'python', 'perl'
TYPE = python
# default “filename” would be <stanza-name>.py for python,
# <stanza-name>.pl for perl, and
# <stanza-name> otherwise
# is command streamable?
streaming = false
# maximum data that can be passed to command (0 = no limit)
maxinputs = 50000
# end defaults
#####################
[createrss]
filename = createrss.py
[diff]
filename = diff.py
[runshellscript]
filename = runshellscript.py
[sendemail]
filename = sendemail.py
[uniq]
filename = uniq.py
[windbag]
filename = windbag.py
supports_multivalues = true
[xmlkv]
filename = xmlkv.py
[xmlunescape]
filename = xmlunescape.py
# Version 9.1.0.2
#
# Configuration for example datamodels
#
# An example of accelerating data for the 'mymodel' datamodel for the
# past five days, generating and checking the column stores every 10 minutes
[mymodel]
acceleration = true
acceleration.earliest_time = -5d
acceleration.poll_buckets_until_maxtime = true
acceleration.cron_schedule = */10 * * * *
acceleration.hunk.compression_codec = snappy
acceleration.hunk.dfs_block_size = 134217728
acceleration.hunk.file_format = orc
# Version 9.1.0.2
#
# This file contains example patterns for the metadata files default.meta and
# local.meta
#
# This example would make all of the objects in an app globally accessible to
# all apps
[]
export=system
# Version 9.1.0.2
#
# Example 1
# Deployment client receives apps and places them into the same
# repositoryLocation (locally, relative to $SPLUNK_HOME) as it picked them
# up from. This is typically $SPLUNK_HOME/etc/apps. There
# is nothing in [deployment-client] because the deployment client is not
# overriding the value set on the deployment server side.
[deployment-client]
[target-broker:deploymentServer]
targetUri= https://deploymentserver.splunk.mycompany.com:8089
# Example 2
# Deployment server keeps apps to be deployed in a non-standard location on
# the server side (perhaps for organization purposes).
# Deployment client receives apps and places them in the standard location.
# Note: Apps deployed to any location other than
# $SPLUNK_HOME/etc/apps on the deployment client side will
# not be recognized and run.
# This configuration rejects any location specified by the deployment server
# and replaces it with the standard client-side location.
[deployment-client]
serverRepositoryLocationPolicy = rejectAlways
repositoryLocation = $SPLUNK_HOME/etc/apps
[target-broker:deploymentServer]
targetUri= https://deploymentserver.splunk.mycompany.com:8089
# Example 3
# Deployment client should get apps from an HTTP server that is different
# from the one specified by the deployment server.
[deployment-client]
serverEndpointPolicy = rejectAlways
endpoint = http://apache.mycompany.server:8080/$serverClassName$/$appName$.tar
[target-broker:deploymentServer]
targetUri= https://deploymentserver.splunk.mycompany.com:8089
# Example 4
# Deployment client should get apps from a location on the file system and
# not from a location specified by the deployment server
[deployment-client]
serverEndpointPolicy = rejectAlways
endpoint = file:/<some_mount_point>/$serverClassName$/$appName$.tar
handshakeRetryIntervalInSecs=20
[target-broker:deploymentServer]
targetUri= https://deploymentserver.splunk.mycompany.com:8089
# Example 5
# Deployment client should phonehome server for app updates quicker
# Deployment client should only send back appEvents once a day
[deployment-client]
phoneHomeIntervalInSecs=30
appEventsResyncIntervalInSecs=86400
[target-broker:deploymentServer]
targetUri= https://deploymentserver.splunk.mycompany.com:8089
# Example 6
# Sets the deployment client connection/transaction timeouts to 1 minute.
# Deployment clients terminate connections if deployment server does not reply.
[deployment-client]
connect_timeout=60
send_timeout=60
recv_timeout=60
# Version 9.1.0.2
#
# These are example configurations for distsearch.conf. Use this file to
# configure distributed search. For all available attribute/value pairs, see
# distsearch.conf.spec.
#
# There is NO DEFAULT distsearch.conf.
#
# To use one or more of these configurations, copy the configuration block into
# distsearch.conf in $SPLUNK_HOME/etc/system/local/. You must restart Splunk
# to enable configurations.
#
# To learn more about configuration files (including precedence) please see the
# documentation located at
# http://docs.splunk.com/Documentation/Splunk/latest/Admin/Aboutconfigurationfiles
[distributedSearch]
servers = https://192.168.1.1:8059,https://192.168.1.2:8059
# This entry distributes searches to 192.168.1.1:8059,192.168.1.2:8059.
# These machines will be contacted on port 8059 using https
# Attributes not set here will use the defaults listed in distsearch.conf.spec.
# this stanza controls the timing settings for connecting to a remote peer and
# the send timeout
[replicationSettings]
connectionTimeout = 10
sendRcvTimeout = 60
# this stanza controls what files are replicated to the other peer each is a
# regex
[replicationAllowlist]
allConf = *.conf
# Mounted bundles example.
# This example shows two distsearch.conf configurations, one for the search
# head and another for each of the search head's search peers. It shows only
# the attributes necessary to implement mounted bundles.
# On a search head whose Splunk server name is "searcher01":
[replicationSettings]
...
replicationPolicy = mounted
# On each search peer:
[searchhead:searcher01]
mounted_bundles = true
bundles_location = /opt/shared_bundles/searcher01
# Version 9.1.0.2
# DO NOT EDIT THIS FILE!
# Please make all changes to files in $SPLUNK_HOME/etc/system/local.
# To make changes, copy the section/stanza you want to change from $SPLUNK_HOME/etc/system/default
# into ../local and edit there.
#
# This file contains mappings between Splunk eventtypes and event renderers.
#
# Beginning with version 6.0, Splunk Enterprise does not support the
# customization of event displays using event renderers.
#
[event_renderer_1]
eventtype = hawaiian_type
priority = 1
css_class = EventRenderer1
[event_renderer_2]
eventtype = french_food_type
priority = 1
template = event_renderer2.html
css_class = EventRenderer2
[event_renderer_3]
eventtype = japan_type
priority = 1
css_class = EventRenderer3
# Version 9.1.0.2
#
# This is an example eventdiscoverer.conf. These settings are used to control
# the discovery of common eventtypes used by the typelearner search command.
#
# To use one or more of these configurations, copy the configuration block into
# eventdiscoverer.conf in $SPLUNK_HOME/etc/system/local/. You must restart
# Splunk to enable configurations.
#
# To learn more about configuration files (including precedence) please see the
# documentation located at
# http://docs.splunk.com/Documentation/Splunk/latest/Admin/Aboutconfigurationfiles
# Terms in this list are never considered for defining an eventtype.
ignored_keywords = foo, bar, application, kate, charlie
# Fields in this list are never considered for defining an eventtype.
ignored_fields = pid, others, directory
# Version 9.1.0.2
#
# This file contains an example eventtypes.conf. Use this file to configure custom eventtypes.
#
# To use one or more of these configurations, copy the configuration block into eventtypes.conf
# in $SPLUNK_HOME/etc/system/local/. You must restart Splunk to enable configurations.
#
# To learn more about configuration files (including precedence) please see the documentation
# located at http://docs.splunk.com/Documentation/Splunk/latest/Admin/Aboutconfigurationfiles
#
# The following example makes an eventtype called "error" based on the search "error OR fatal."
[error]
search = error OR fatal
# The following example makes an eventtype template because it includes a field name
# surrounded by the percent character (in this case "%code%").
# The value of "%code%" is substituted into the event type name for that event.
# For example, if the following example event type is instantiated on an event that has a
# "code=432," it becomes "cisco-432".
[cisco-%code%]
search = cisco
# Version 9.1.0.2
#
# Here are some examples of stanzas in federated.conf
#
#
[provider://provider_1]
hostPort = remote_searchhead1:8090
password = secret1
serviceAccount = user1
type = splunk
appContext = search
useFSHKnowledgeObjects = 0
mode = standard
[provider://provider_2]
hostPort = remote_searchhead2:8090
password = secret2
serviceAccount = user2
type = splunk
appContext = search
useFSHKnowledgeObjects = 1
mode = transparent
# Version 9.1.0.2
#
# This file contains an example fields.conf. Use this file to configure
# dynamic field extractions.
#
# To use one or more of these configurations, copy the configuration block into
# fields.conf in $SPLUNK_HOME/etc/system/local/. You must restart Splunk to
# enable configurations.
#
# To learn more about configuration files (including precedence) please see the
# documentation located at
# http://docs.splunk.com/Documentation/Splunk/latest/Admin/Aboutconfigurationfiles
#
# These tokenizers result in the values of To, From and Cc treated as a list,
# where each list element is an email address found in the raw string of data.
[To]
TOKENIZER = (\w[\w\.\-]*@[\w\.\-]*\w)
[From]
TOKENIZER = (\w[\w\.\-]*@[\w\.\-]*\w)
[Cc]
TOKENIZER = (\w[\w\.\-]*@[\w\.\-]*\w)
# Version 9.1.0.2
#
# The following are example global-banner.conf configurations. Configure properties for
# your custom application.
#
# To use one or more of these configurations, copy the configuration block into
# app.conf in $SPLUNK_HOME/etc/system/local/. You must restart Splunk to
# enable configurations.
#
# To learn more about configuration files (including precedence) please see the
# documentation located at
# http://docs.splunk.com/Documentation/Splunk/latest/Admin/Aboutconfigurationfiles
[BANNER_MESSAGE_SINGLETON]
global_banner.visible = false
global_banner.message = Sample banner notification text. Please replace with your own message.
global_banner.background_color = blue
global_banner.hyperlink = https://www.splunk.com/
global_banner.hyperlink_text = Splunk
# Version 9.1.0.2
#
# This file contains an example health.conf. Use this file to configure thresholds
# for Splunk Enterprise's built in Health Report.
#
# To use one or more of these configurations, copy the configuration block
# into health.conf in $SPLUNK_HOME/etc/system/local/. You must restart
# Splunk to enable configurations.
[health_reporter]
# Every 30 seconds a new ‘PeriodicHealthReporter=INFO’ log entry will be created.
full_health_log_interval = 30
# If an indicator’s health status changes before 600 milliseconds elapses,
# the status change will be suppressed.
suppress_status_update_ms = 600
# Alerting for all features is enabled.
# You can disable alerting for each feature by setting 'alert.disabled' to 1.
alert.disabled = 0
# If you don't want to send alerts too frequently, you can define a minimum
# time period that must elapse before another alert is fired. Alerts triggered
# during the suppression period are sent after the period expires as a batch.
# The suppress_period value can be in seconds, minutes, hours, and days, and
# uses the format: 60s, 60m, 60h and 60d.
# Default is 10 minutes.
alert.suppress_period = 30m
[alert_action:email]
# Enable email alerts for the health report.
# Before you can send an email alert, you must configure the email notification
# settings on the email settings page.
# In the 'Search and Reporting' app home page, click Settings > Server settings
# > Email settings, and specify values for the settings.
# After you configure email settings, click Settings > Alert actions.
# Make sure that the 'Send email' option is enabled.
disabled = 0
# Define recipients when an email alert is triggered.
# You can define 'to', 'cc', and 'bcc' recipients.
# For multiple recipients in a list, separate email addresses with commas.
# If there is no recipient for a certain recipient type (e.g. bcc), leave the value blank.
action.to = admin_1@testcorp.example, admin_2@testcorp.example
action.cc = admin_3@testcorp.example, admin_4@testcorp.example
action.bcc =
[alert_action:pagerduty]
# Enable Pager Duty alerts for the health report.
# Before you can send an alert to PagerDuty, you must configure some settings
# on both the PagerDuty side and the Splunk Enterprise side.
# In PagerDuty, you must add a service to save your new integration.
# From the Integrations tab of the created service, copy the Integration Key
# string to the 'action.integration_url_override' below.
# On the Splunk side, you must install the PagerDuty Incidents app from
# Splunkbase.
# After you install the app, in Splunk Web, click Settings > Alert actions.
# Make sure that the PagerDuty app is enabled.
disabled = 0
action.integration_url_override = 123456789012345678901234567890ab
[alert_action:mobile]
# Enable Splunk Mobile alerts for the health report.
# You need to configure the 'alert_recipients' under this stanza in order to
# send health report alerts to the Splunk Mobile app on your phone.
#
# Steps to setup the health report mobile alert:
# * Download the Splunk Mobile App on your phone and open the app.
# * Download the Cloud Gateway App from Splunkbase to your splunk instance.
# * In Splunk Web, click Settings > Alert actions and make sure the Cloud
# Gateway App is enabled.
# * In Splunk Web, click Cloud Gateway App > Configure and enable Splunk
# Mobile.
# * In Splunk Web, click Cloud Gateway App > Register and copy the activation
# code displayed in the Splunk Mobile App to register your device(phone).
# * In health.conf configure 'alert_recipients' under the [alert_action:mobile]
# stanza, e.g. action.alert_recipients = admin
#
# Details of how to install and use the Cloud Gateway App please refer to
# https://docs.splunk.com/Documentation/Gateway
disabled = 0
action.alert_recipients = admin
[alert_action:victorops]
# Enable VictorOps alerts for the health report.
# Before you can send an alert to VictorOps, you must configure some settings
# on both the VictorOps side and the Splunk Enterprise side.
# In VictorOps, you must create an API key and can optionally create a routing key.
# On the Splunk side, you must install the VictorOps App from Splunkbase.
# After you install the app, in Splunk Web, click Settings > Alert actions.
# Make sure that the VictorOps app is enabled and the API key is properly configured.
disabled = 0
# alert message type in VictorOps.
# Valid alert message types in VictorOps:
# * CRITICAL - Triggers an incident.
# * WARNING - May trigger an incident, depending on your settings in VictorOps.
# * ACKNOWLEDGEMENT - Acknowledges an incident. This value is unlikely to be useful.
# * INFO - Creates a timeline event, but does not trigger an incident.
# * RECOVERY - Resolves an incident. This value is unlikely to be useful.
action.message_type = CRITICAL
# ID of the incident in VictorOps.
* Optional.
action.entity_id =
# Use this field to choose one of the API keys configured in passwords.conf
# under victorops_app.
# Leave this field empty if you want to use the default API key.
* Optional.
action.record_id =
# Use this field to overwrite the default routing key.
* Optional.
action.routing_key_override =
[clustering]
# Clustering health report will run in every 20 seconds.
health_report_period = 20
# Enable the clustering feature health check.
disabled = 0
[feature:s2s_autolb]
# If more than 20% of forwarding destinations have failed, health status changes to yellow.
indicator:s2s_connections:yellow = 20
# If more than 70% of forwarding destinations have failed, health status changes to red.
indicator:s2s_connections:red = 70
# Alerting for all indicators is disabled.
alert.disabled = 1
[feature:batchreader]
# Enable alerts for feature:batchreader. If there is no 'alert.disabled' value
# specified in a feature stanza, then the alert is enabled for the feature by
# default.
# You can also enable/disable alerts at the indicator level, using the setting:
# 'alert:<indicator name>.disabled'.
alert.disabled = 0
# You can define which color triggers an alert.
# If the value is yellow, both yellow and red trigger an alert.
# If the value is red, only red triggers an alert.
# Default value is red.
# You can also define the threshold_color for each indicator using the setting:
# 'alert:<indicator name>.threshold_color'.
# Indicator level setting overrides the feature level threshold_color setting.
alert.threshold_color = red
# You can define the duration that an unhealthy status persists before the alert fires.
# Default value is 60 seconds.
# You can also define the min_duration_sec for each indicator using the setting:
# 'alert:<indicator name>.min_duration_sec'.
# Indicator level setting overrides feature level min_duration_sec setting.
alert.min_duration_sec = 30
# Suppresses color changes for this feature until March 25, 2021 8:00:00 PM GMT.
snooze_end_time = 1616702400
# Version 9.1.0.2
#
# This file contains an example indexes.conf. Use this file to configure
# indexing properties.
#
# To use one or more of these configurations, copy the configuration block
# into indexes.conf in $SPLUNK_HOME/etc/system/local/. You must restart
# Splunk to enable configurations.
#
# To learn more about configuration files (including precedence) please see
# the documentation located at
# http://docs.splunk.com/Documentation/Splunk/latest/Admin/Aboutconfigurationfiles
#
# The following example defines a new high-volume index, called "hatch", and
# sets this to be the default index for both incoming data and search.
#
# Note that you may want to adjust the indexes that your roles have access
# to when creating indexes (in authorize.conf)
defaultDatabase = hatch
[hatch]
homePath = $SPLUNK_DB/hatchdb/db
coldPath = $SPLUNK_DB/hatchdb/colddb
thawedPath = $SPLUNK_DB/hatchdb/thaweddb
maxDataSize = 10000
maxHotBuckets = 10
# The following example changes the default amount of space used on a
# per-index basis.
[default]
maxTotalDataSizeMB = 650000
maxGlobalRawDataSizeMB = 0
maxGlobalDataSizeMB = 0
# The following example changes the time data is kept around by default.
# It also sets an export script. NOTE: You must edit this script to set
# export location before running it.
[default]
maxWarmDBCount = 200
frozenTimePeriodInSecs = 432000
rotatePeriodInSecs = 30
coldToFrozenScript = "$SPLUNK_HOME/bin/python" "$SPLUNK_HOME/bin/myColdToFrozenScript.py"
# This example freezes buckets on the same schedule, but lets Splunk do the
# freezing process as opposed to a script
[default]
maxWarmDBCount = 200
frozenTimePeriodInSecs = 432000
rotatePeriodInSecs = 30
coldToFrozenDir = "$SPLUNK_HOME/myfrozenarchive"
### This example demonstrates the use of volumes ###
# volume definitions; prefixed with "volume:"
[volume:hot1]
path = /mnt/fast_disk
maxVolumeDataSizeMB = 100000
[volume:cold1]
path = /mnt/big_disk
# maxVolumeDataSizeMB not specified: no data size limitation on top of the
# existing ones
[volume:cold2]
path = /mnt/big_disk2
maxVolumeDataSizeMB = 1000000
# index definitions
[idx1]
homePath = volume:hot1/idx1
coldPath = volume:cold1/idx1
# thawedPath must be specified, and cannot use volume: syntax
# choose a location convenient for reconstitition from archive goals
# For many sites, this may never be used.
thawedPath = $SPLUNK_DB/idx1/thaweddb
[idx2]
# note that the specific indexes must take care to avoid collisions
homePath = volume:hot1/idx2
coldPath = volume:cold2/idx2
thawedPath = $SPLUNK_DB/idx2/thaweddb
[idx3]
homePath = volume:hot1/idx3
coldPath = volume:cold2/idx3
thawedPath = $SPLUNK_DB/idx3/thaweddb
[idx4]
datatype = metric
homePath = volume:hot1/idx4
coldPath = volume:cold2/idx4
thawedPath = $SPLUNK_DB/idx4/thaweddb
metric.maxHotBuckets = 6
metric.splitByIndexKeys = metric_name
### Indexes may be allocated space in effective groups by sharing volumes ###
# perhaps we only want to keep 100GB of summary data and other
# low-volume information
[volume:small_indexes]
path = /mnt/splunk_indexes
maxVolumeDataSizeMB = 100000
# and this is our main event series, allowing 50 terabytes
[volume:large_indexes]
path = /mnt/splunk_indexes
maxVolumeDataSizeMB = 50000000
# summary and rare_data together will be limited to 100GB
[summary]
homePath=volume:small_indexes/summary/db
coldPath=volume:small_indexes/summary/colddb
thawedPath=$SPLUNK_DB/summary/thaweddb
# low-volume indexes probably don't want a lot of hot buckets
maxHotBuckets = 2
# if the volume is quite low, and you have data sunset goals you may
# want to have smaller buckets
maxDataSize = 500
[rare_data]
homePath=volume:small_indexes/rare_data/db
coldPath=volume:small_indexes/rare_data/colddb
thawedPath=$SPLUNK_DB/rare_data/thaweddb
maxHotBuckets = 2
# main, and any other large volume indexes you add sharing large_indexes
# will be together be constrained to 50TB, separately from the 100GB of
# the small_indexes
[main]
homePath=volume:large_indexes/main/db
coldPath=volume:large_indexes/main/colddb
thawedPath=$SPLUNK_DB/main/thaweddb
# large buckets and more hot buckets are desirable for higher volume
# indexes, and ones where the variations in the timestream of events is
# hard to predict.
maxDataSize = auto_high_volume
maxHotBuckets = 10
# Allow the main index up to 8TB of the 50TB volume limit.
homePath.maxDataSizeMB = 8000000
[idx1_large_vol]
homePath=volume:large_indexes/idx1_large_vol/db
coldPath=volume:large_indexes/idx1_large_vol/colddb
thawedPath=$SPLUNK_DB/idx1_large/thaweddb
# this index will exceed the default of .5TB requiring a change to maxTotalDataSizeMB
maxTotalDataSizeMB = 750000
maxDataSize = auto_high_volume
maxHotBuckets = 10
# but the data will only be retained for about 30 days
frozenTimePeriodInSecs = 2592000
### This example demonstrates database size constraining ###
# In this example per-database constraint is combined with volumes. While a
# central volume setting makes it easy to manage data size across multiple
# indexes, there is a concern that bursts of data in one index may
# significantly displace data from others. The homePath.maxDataSizeMB setting
# can be used to assure that no index will ever take more than certain size,
# therefore alleviating the concern.
# global settings
# will be inherited by all indexes: no database will exceed 1TB
homePath.maxDataSizeMB = 1000000
# volumes
[volume:caliente]
path = /mnt/fast_disk
maxVolumeDataSizeMB = 100000
[volume:frio]
path = /mnt/big_disk
maxVolumeDataSizeMB = 1000000
# and this is our main event series, allowing about 50 terabytes
[volume:large_indexes]
path = /mnt/splunk_indexes
maxVolumeDataSizeMB = 50000000
# indexes
[i1]
homePath = volume:caliente/i1
# homePath.maxDataSizeMB is inherited
coldPath = volume:frio/i1
# coldPath.maxDataSizeMB not specified: no limit - old-style behavior
thawedPath = $SPLUNK_DB/i1/thaweddb
[i2]
homePath = volume:caliente/i2
# overrides the default maxDataSize
homePath.maxDataSizeMB = 1000
coldPath = volume:frio/i2
# limits the cold DB's
coldPath.maxDataSizeMB = 10000
thawedPath = $SPLUNK_DB/i2/thaweddb
[i3]
homePath = /old/style/path
homePath.maxDataSizeMB = 1000
coldPath = volume:frio/i3
coldPath.maxDataSizeMB = 10000
thawedPath = $SPLUNK_DB/i3/thaweddb
# main, and any other large volume indexes you add sharing large_indexes
# will together be constrained to 50TB, separately from the rest of
# the indexes
[main]
homePath=volume:large_indexes/main/db
coldPath=volume:large_indexes/main/colddb
thawedPath=$SPLUNK_DB/main/thaweddb
# large buckets and more hot buckets are desirable for higher volume indexes
maxDataSize = auto_high_volume
maxHotBuckets = 10
# Allow main index to override global and use 8TB of the 50TB volume limit.
homePath.maxDataSizeMB = 8000000
### This example demonstrates how to configure a volume that points to
### S3-based remote storage and indexes that use this volume. The setting
### "storageType=remote" indicates that this is a remote-storage volume.
### The "remotePath" parameter associates the index with that volume
### and configures a top-level location for uploading buckets.
[volume:s3]
storageType = remote
path = s3://remote_volume
remote.s3.bucket_name = example-s3-bucket
remote.s3.access_key = S3_ACCESS_KEY
remote.s3.secret_key = S3_SECRET_KEY
[default]
remotePath = volume:s3/$_index_name
[i4]
coldPath = $SPLUNK_DB/$_index_name/colddb
homePath = $SPLUNK_DB/$_index_name/db
thawedPath = $SPLUNK_DB/$_index_name/thaweddb
[i5]
coldPath = $SPLUNK_DB/$_index_name/colddb
homePath = $SPLUNK_DB/$_index_name/db
thawedPath = $SPLUNK_DB/$_index_name/thaweddb
### This example demonstrates how to configure a volume that points to
### GCS-based remote storage.
### "storageType=remote" indicates that this is a remote-storage volume.
### The "remotePath" parameter associates the index with that volume
### and configures a top-level location for uploading buckets.
[volume:gs]
storageType = remote
path = gs://test-bucket/some/path
remote.gs.credential_file = credentials.json
[default]
remotePath = volume:gs/$_index_name
[i6]
coldPath = $SPLUNK_DB/$_index_name/colddb
homePath = $SPLUNK_DB/$_index_name/db
thawedPath = $SPLUNK_DB/$_index_name/thaweddb
# Version 9.1.0.2
#
# This is an example inputs.conf. Use this file to configure data inputs.
#
# To use one or more of these configurations, copy the configuration block into
# inputs.conf in $SPLUNK_HOME/etc/system/local/. You must restart Splunk to
# enable configurations.
#
# To learn more about configuration files (including precedence) please see the
# documentation located at
# http://docs.splunk.com/Documentation/Splunk/latest/Admin/Aboutconfigurationfiles
# The following configuration reads all the files in the directory /var/log.
[monitor:///var/log]
# The following configuration reads all the files under /var/log/httpd and
# classifies them as sourcetype::access_common.
#
# When checking a file for new data, if the file's modification time is from
# before seven days ago, the file will no longer be checked for changes
# until you restart the software.
[monitor:///var/log/httpd]
sourcetype = access_common
ignoreOlderThan = 7d
# The following configuration reads all the
# files under /mnt/logs. When the path is /mnt/logs/<host>/... it
# sets the hostname (by file) to <host>.
[monitor:///mnt/logs]
host_segment = 3
# The following configuration listens on TCP port 9997 for raw
# data from ANY remote server (not just a Splunk instance). The host of the
# data is set to the IP address of the remote server.
[tcp://:9997]
# The following configuration listens on TCP port 9995 for raw
# data from ANY remote server. The host of the data is set as the host name of
# the remote server. All data will also be assigned the sourcetype "log4j" and
# the source "tcp:9995".
[tcp://:9995]
connection_host = dns
sourcetype = log4j
source = tcp:9995
# The following configuration listens on TCP port 9995 for raw
# data from 10.1.1.10.
# All data is assigned the host "webhead-1", the sourcetype "access_common" and
# the the source "//10.1.1.10/var/log/apache/access.log".
[tcp://192.0.2.10:9995]
host = webhead-1
sourcetype = access_common
source = //192.0.2.10/var/log/apache/access.log
# The following configuration listens on TCP port 9996 for
# Splunk cooked event data from ANY splunk forwarder.
# The host of the data is set to the host name of the remote server ONLY IF the
# remote data has no host set, or if it is set to "localhost".
[splunktcp://:9996]
connection_host = dns
# The following configuration listens on TCP port 9996 for
# distributed search data from 10.1.1.100. The data is processed the same as
# locally indexed data.
[splunktcp://192.0.2.100:9996]
# The following configuration listens on TCP port 514 for data
# from syslog.corp.company.net. The data is assigned the sourcetype "syslog"
# and the host is set to the host name of the remote server.
[tcp://syslog.corp.example.net:514]
sourcetype = syslog
connection_host = dns
# Following configuration limits the acceptance of data to forwarders
# that have been configured with the token value specified in 'token' field.
# NOTE: The token value is encrypted. The REST endpoint encrypts the token
# while saving it.
[splunktcptoken://tok1]
token = $7$ifQTPTzHD/BA8VgKvVcgO1KQAtr3N1C8S/1uK3nAKIE9dd9e9g==
# Set up Secure Sockets Layer (SSL):
[SSL]
serverCert=$SPLUNK_HOME/etc/auth/server.pem
password=password
requireClientCert=false
[splunktcp-ssl:9996]
# Use file system change monitor:
[fschange:/etc/]
fullEvent=true
pollPeriod=60
recurse=true
sendEventMaxSize=100000
index=main
# Monitor the Security Windows Event Log channel, getting the most recent
# events first, then older, and finally continuing to gather newly arriving events
[WinEventLog://Security]
disabled = 0
start_from = newest
evt_dc_name =
evt_dns_name =
evt_resolve_ad_ds =
evt_resolve_ad_obj = 1
checkpointInterval = 5
# Monitor the ForwardedEvents Windows Event Log channel, only gathering the
# events that arrive after monitoring starts, going forward in time.
[WinEventLog://ForwardedEvents]
disabled = 0
start_from = oldest
current_only = 1
batch_size = 10
checkpointInterval = 5
[tcp://9994]
queueSize=50KB
persistentQueueSize=100MB
# Perfmon: Windows performance monitoring examples
# You must specify the names of objects, counters and instances
# exactly as they are shown in the Performance Monitor application. Splunk Web
# is the recommended interface to use to configure performance monitor inputs.
# These stanzas gather performance data from the local system only.
# Use wmi.conf for performance monitor metrics on remote systems.
# Query the PhysicalDisk performance object and gather disk access data for
# all physical drives installed in the system. Store this data in the
# "perfmon" index.
[perfmon://LocalPhysicalDisk]
interval = 10
object = PhysicalDisk
counters = Disk Bytes/sec; % Disk Read Time; % Disk Write Time; % Disk Time
instances = *
disabled = 0
index = PerfMon
# Gather common memory statistics using the Memory performance object, every
# 5 seconds. Store the data in the "main" index. Since none of the counters
# specified have applicable instances, the instances attribute is not required.
[perfmon://LocalMainMemory]
interval = 5
object = Memory
counters = Committed Bytes; Available Bytes; % Committed Bytes In Use
disabled = 0
index = main
# Gather data on USB activity levels every 10 seconds. Store this data in the
# default index.
[perfmon://USBChanges]
interval = 10
object = USB
counters = Usb Control Data Bytes/Sec
instances = *
disabled = 0
# Admon: Windows Active Directory monitoring examples
# Monitor the default domain controller (DC) for the domain that the computer
# running Splunk belongs to. Start monitoring at the root node of Active
# Directory.
[admon://NearestDC]
targetDc =
startingNode =
# Monitor a specific DC, with a specific starting node. Store the events in
# the "admon" Splunk index. Do not print Active Directory schema. Do not
# index baseline events.
[admon://DefaultTargetDC]
targetDc = pri01.eng.ad.splunk.com
startingNode = OU=Computers,DC=eng,DC=ad,DC=splunk,DC=com
index = admon
printSchema = 0
baseline = 0
# Monitor two different DCs with different starting nodes.
[admon://DefaultTargetDC]
targetDc = pri01.eng.ad.splunk.com
startingNode = OU=Computers,DC=eng,DC=ad,DC=splunk,DC=com
[admon://SecondTargetDC]
targetDc = pri02.eng.ad.splunk.com
startingNode = OU=Computers,DC=hr,DC=ad,DC=splunk,DC=com
# logD
[logd://example]
logd-backtrace = false
logd-debug = false
logd-info = false
logd-loss = false
logd-signpost = false
logd-predicate = 'subsystem == "com.apple.TimeMachine" && eventMessage CONTAINS[c] "backup"'
logd-process = 220,221,223
logd-source = false
journalctl-include-fields = PRIORITY,CMD,EXE
logd-exclude-fields = bootUUID,formatString
logd-interval = 60
logd-starttime = "2015-01-10 17:15:00"
#journald
[journald://example]
journalctl-include-fields = MESSAGE
journalctl-exclude-fields = _UID,_MACHINE_ID,_GID,_COMM,_EXE
journalctl-filter = _SYSTEMD_UNIT=avahi-daemon.service _PID=28097 + _SYSTEMD_UNIT=dbus.service
journalctl-unit = systemd-modules-load.service
journalctl-identifier = SYSLOG_IDENTIFIER
journalctl-priority = 0
journalctl-boot = 2
journalctl-facility = help
journalctl-grep =^WARN.*disk,.*errno=\d+\S+restarting
journalctl-user-unit = SERVICENAME
journalctl-dmesg = true
journalctl-quiet = true
# Version 9.1.0.2
#
# This file contains an example SPLUNK_HOME/etc/instance.cfg file; the
# instance.cfg file is not to be modified or removed by user. LEAVE THE
# instance.cfg FILE ALONE.
#
[general]
guid = 12345678-abcd-abcd-abcd-12345678900
# Version 9.1.0.2
# CAUTION: Do not alter the settings in limits.conf unless you know what you are doing.
# Improperly configured limits may result in splunkd crashes and/or memory overuse.
[searchresults]
maxresultrows = 50000
# maximum number of times to try in the atomic write operation (1 = no retries)
tocsv_maxretry = 5
# retry period is 1/2 second (500 milliseconds)
tocsv_retryperiod_ms = 500
[subsearch]
# maximum number of results to return from a subsearch
maxout = 100
# maximum number of seconds to run a subsearch before finalizing
maxtime = 10
# time to cache a given subsearch's results
ttl = 300
[anomalousvalue]
maxresultrows = 50000
# maximum number of distinct values for a field
maxvalues = 100000
# maximum size in bytes of any single value (truncated to this size if larger)
maxvaluesize = 1000
[associate]
maxfields = 10000
maxvalues = 10000
maxvaluesize = 1000
# for the contingency, ctable, and counttable commands
[ctable]
maxvalues = 1000
[correlate]
maxfields = 1000
# for bin/bucket/discretize
[discretize]
maxbins = 50000
# if maxbins not specified or = 0, defaults to searchresults::maxresultrows
[inputcsv]
# maximum number of retries for creating a tmp directory (with random name in
# SPLUNK_HOME/var/run/splunk)
mkdir_max_retries = 100
[kmeans]
maxdatapoints = 100000000
[kv]
# when non-zero, the point at which kv should stop creating new columns
maxcols = 512
[rare]
maxresultrows = 50000
# maximum distinct value vectors to keep track of
maxvalues = 100000
maxvaluesize = 1000
[restapi]
# maximum result rows to be returned by /events or /results getters from REST
# API
maxresultrows = 50000
[search]
# how long searches should be stored on disk once completed
ttl = 86400
# the approximate maximum number of timeline buckets to maintain
status_buckets = 300
# the last accessible event in a call that takes a base and bounds
max_count = 10000
# the minimum length of a prefix before a * to ask the index about
min_prefix_len = 1
# the length of time to persist search cache entries (in seconds)
cache_ttl = 300
# By default, we will not retry searches in the event of indexer
# failures with indexer clustering enabled.
# Hence, the default value for search_retry here is false.
search_retry = false
# Timeout value for checking search marker files like hotbucketmarker or backfill
# marker.
check_search_marker_done_interval = 60
# Time interval of sleeping between subsequent search marker files checks.
check_search_marker_sleep_interval = 1
# The total number of concurrent searches is set to 10 manually.
total_search_concurrency_limit = 100
# If number of CPUs in your machine is 14, then the total system-wide limit of
# concurrent historical searches on this machine is 20, which is
# max_searches_per_cpu x number_of_cpus + base_max_searches = 1 x 14 + 6 = 20.
max_searches_per_cpu = 1
base_max_searches = 6
# Whether maximum number of concurrent searches are enforced cluster-wide
# for admission of adhoc searches
shc_adhoc_quota_enforcement = on
# Enable throttling on both CPU and memory
remote_search_requests_throttling_type = per_cpu, physical_ram
# If the peer node has 48 cores, the following setting allows a maximum of 720
# concurrent searches.
[search_throttling::per_cpu]
max_concurrent = 13
# If the peer has 64 GB of RAM, the following setting allows a maximum of 512
# concurrent searches.
[search_throttling::physical_ram]
min_memory_per_search = 134217728
[scheduler]
# Percent of total concurrent searches that will be used by scheduler is
# total concurrency x max_searches_perc = 20 x 60% = 12 scheduled searches
# User default value (needed only if different from system/default value) when
# no max_searches_perc.<n>.when (if any) below matches.
max_searches_perc = 60
# Increase the value between midnight-5AM.
max_searches_perc.0 = 75
max_searches_perc.0.when = * 0-5 * * *
# More specifically, increase it even more on weekends.
max_searches_perc.1 = 85
max_searches_perc.1.when = * 0-5 * * 0,6
# Maximum number of concurrent searches is enforced cluster-wide by the
# captain for scheduled searches. For a 3 node SHC total concurrent
# searches = 3 x 20 = 60. The total searches (adhoc + scheduled) = 60, then
# no more scheduled searches can start until some slots are free.
shc_syswide_quota_enforcement = true
[slc]
# maximum number of clusters to create
maxclusters = 10000
[findkeywords]
#events to use in findkeywords command (and patterns UI)
maxevents = 50000
[stats]
maxresultrows = 50000
maxvalues = 10000
maxvaluesize = 1000
[top]
maxresultrows = 50000
# maximum distinct value vectors to keep track of
maxvalues = 100000
maxvaluesize = 1000
[search_optimization]
enabled = true
[search_optimization::predicate_split]
enabled = true
[search_optimization::predicate_push]
enabled = true
[search_optimization::predicate_merge]
enabled = true
inputlookup_merge = true
merge_to_base_search = true
[search_optimization::projection_elimination]
enabled = true
cmds_black_list = eval, rename
[search_optimization::search_flip_normalization]
enabled = true
[search_optimization::reverse_calculated_fields]
enabled = true
[search_optimization::search_sort_normalization]
enabled = true
[search_optimization::replace_table_with_fields]
enabled = true
[search_optimization::replace_stats_cmds_with_tstats]
enabled = true
detect_search_time_field_collisions = true
[search_optimization::replace_datamodel_stats_cmds_with_tstats]
enabled = true
# Version 9.1.0.2
#
# This file and all forms of literals.conf are now deprecated.
# Instead, use the messages.conf file which is documented
# at "Customize Splunk Web messages" in the Splunk documentation.
# Version 9.1.0.2
#
# Example macros.conf
#
# macro foobar that takes no arguments can be invoked via `foobar`
[foobar]
# the defintion of a macro can invoke another macro. nesting can be indefinite
# and cycles will be detected and result in an error
definition = `foobar(foo=defaultfoo)`
# macro foobar that takes one argument, invoked via `foobar(someval)`
[foobar(1)]
args = foo
# note this is definition will include the leading and trailing quotes, i.e.
# something `foobar(someval)`
# would expand to
# something "foo = someval"
definition = "foo = $foo$"
# macro that takes two arguments
# note that macro arguments can be named so this particular macro could be
# invoked equivalently as `foobar(1,2)` `foobar(foo=1,bar=2)` or
# `foobar(bar=2,foo=1)`
[foobar(2)]
args = foo, bar
definition = "foo = $foo$, bar = $bar$"
# macro that takes one argument that does validation
[foovalid(1)]
args = foo
definition = "foovalid = $foo$"
# the validation eval function takes any even number of arguments (>=2) where
# the first argument is a boolean expression, the 2nd a string, the third
# boolean, 4th a string, etc etc etc
validation = validate(foo>15,"foo must be greater than 15",foo<=100,"foo must be <= 100")
# macro showing simple boolean validation, where if foo > bar is not true,
# errormsg is displayed
[foovalid(2)]
args = foo, bar
definition = "foo = $foo$ and bar = $bar$"
validation = foo > bar
errormsg = foo must be greater than bar
# example of an eval-based definition. For example in this case
# `fooeval(10,20)` would get replaced by 10 + 20
[fooeval(2)]
args = foo, bar
definition = if (bar > 0, "$foo$ + $bar$", "$foo$ - $bar$")
iseval = true
# Version 9.1.0.2
#
# This file contains an example messages.conf of attribute/value pairs for
# configuring externalized strings.
#
# There is a messages.conf in $SPLUNK_HOME/etc/system/default/. To set custom
# configurations, place a messages.conf in $SPLUNK_HOME/etc/system/local/. You
# must restart the instance to enable configurations.
#
# To learn more about configuration files (including precedence) please see the
# documentation located at
# http://docs.splunk.com/Documentation/Splunk/latest/Admin/Aboutconfigurationfiles
#
# For the full list of all literals that can be overridden, check out
# $SPLUNK_HOME/etc/system/default/messages.conf
[DISK_MON]
name = Disk Monitor
[DISK_MON:INSUFFICIENT_DISK_SPACE_ERROR__S_S_LLU]
message = Cannot write data to index path '%s' because you are low on disk space on partition '%s'. Indexing has been paused.
action = Free disk space above %lluMB to resume indexing.
severity = warn
capabilities = indexes_edit
help = learnmore.indexer.setlimits
[LM_LICENSE]
name = License Manager
[LM_LICENSE:EXPIRED_STATUS__LD]
message = Your license has expired as of $t%ld.
action = $CONTACT_SPLUNK_SALES_TEXT$
capabilities = license_edit
[LM_LICENSE:EXPIRING_STATUS__LD]
message = Your license will soon expire on $t%ld.
action = $CONTACT_SPLUNK_SALES_TEXT$
capabilities = license_edit
[LM_LICENSE:INDEXING_LIMIT_EXCEEDED]
message = Daily indexing volume limit exceeded today.
action = See [[/manager/search/licenseusage|License Manager]] for details.
severity = warn
capabilities = license_view_warnings
help = learnmore.license.features
[LM_LICENSE:MASTER_CONNECTION_ERROR__S_LD_LD]
message = Failed to contact license master: reason='%s', first failure time=%ld ($t%ld).
severity = warn
capabilities = license_edit
help = learnmore.license.features
[LM_LICENSE:SLAVE_WARNING__LD_S]
message = License warning issued within past 24 hours: $t%ld.
action = Please refer to the License Usage Report view on license master '%s' to find out more.
severity = warn
capabilities = license_edit
help = learnmore.license.features
# Version 9.1.0.2
#
# This file contains example metric alerts.
#
# To use one or more of these configurations, copy the configuration block into
# metric_alerts.conf in $SPLUNK_HOME/etc/system/local/. You must restart Splunk
# to enable configurations.
#
# To learn more about configuration files (including precedence) please see the
# documentation located at
# http://docs.splunk.com/Documentation/Splunk/latest/Admin/Aboutconfigurationfiles
# The following searches are example searches. To create your own search,
# modify the values by following the spec outlined in metric_alerts.conf.spec.
[alert1]
groupby = host, app
filter = region=east
condition = 'avg(mem.used)' > 50
action.email = 1
action.email.to = nonexist@abc.xyz
[alert2]
groupby = host, app
filter = region=east
condition = 'max(cpu.util)' > 80
action.email = 1
action.email.to = nonexist@abc.xyz
# Version 9.1.0.2
#
# This file contains example saved searches and alerts.
#
# To use one or more of these configurations, copy the configuration block into
# metric_rollups.conf in $SPLUNK_HOME/etc/system/local/. You must restart Splunk
# to enable configurations.
#
# To learn more about configuration files (including precedence) please see the
# documentation located at
# http://docs.splunk.com/Documentation/Splunk/latest/Admin/Aboutconfigurationfiles
# The following searches are example searches. To create your own search,
# modify the values by following the spec outlined in metric_rollups.conf.spec.
[index:mySourceMetricIndex]
# defaultAggregation is applied to all the measures/metric names unless overided
defaultAggregation = avg
# Override metric_name_1 aggregation from avg to min
aggregation.metric_name_1 = min
# Override metric_name_2 aggregation from avg to count
aggregation.metric_name_2 = count
# Exclude dimension_1 and dimension_2 during rollup
dimensionList = dimension_1, dimension_2
dimensionListType = excluded
# All the above settings applies globally to all the summary definitions below
# Each summary here specifies the target index and span
# Two summaries definied, need to define each summary as rollup.<0, 1, 2..>...
rollup.0.rollupIndex = myTargetMetricIndex_0
rollup.0.span = 1h
rollup.1.rollupIndex = myTargetMetricIndex_1
rollup.1.span = 1d
# Exclude metric_1 and metric_2 during rollup
metricList = metric_1, metric_2
metricListType = excluded
# Version 9.1.0.2
#
# This file contains example multi key/value extraction configurations.
#
# To use one or more of these configurations, copy the configuration block into
# multikv.conf in $SPLUNK_HOME/etc/system/local/. You must restart Splunk to
# enable configurations.
#
# To learn more about configuration files (including precedence) please see the
# documentation located at
# http://docs.splunk.com/Documentation/Splunk/latest/Admin/Aboutconfigurationfiles
# This example breaks up the output from top:
# Sample output:
# Processes: 56 total, 2 running, 54 sleeping... 221 threads 10:14:07
#.....
#
# PID COMMAND %CPU TIME #TH #PRTS #MREGS RPRVT RSHRD RSIZE VSIZE
# 29960 mdimport 0.0% 0:00.29 3 60 50 1.10M 2.55M 3.54M 38.7M
# 29905 pickup 0.0% 0:00.01 1 16 17 164K 832K 764K 26.7M
#....
[top_mkv]
# pre table starts at "Process..." and ends at line containing "PID"
pre.start = "Process"
pre.end = "PID"
pre.ignore = _all_
# specify table header location and processing
header.start = "PID"
header.linecount = 1
header.replace = "%" = "_", "#" = "_"
header.tokens = _tokenize_, -1," "
# table body ends at the next "Process" line (ie start of another top) tokenize
# and inherit the number of tokens from previous section (header)
body.end = "Process"
body.tokens = _tokenize_, 0, " "
## This example handles the output of 'ls -lah' command:
#
# total 2150528
# drwxr-xr-x 88 john john 2K Jan 30 07:56 .
# drwxr-xr-x 15 john john 510B Jan 30 07:49 ..
# -rw------- 1 john john 2K Jan 28 11:25 .hiden_file
# drwxr-xr-x 20 john john 680B Jan 30 07:49 my_dir
# -r--r--r-- 1 john john 3K Jan 11 09:00 my_file.txt
[ls-lah-cpp]
pre.start = "total"
pre.linecount = 1
# the header is missing, so list the column names
header.tokens = _token_list_, mode, links, user, group, size, date, name
# The ends when we have a line starting with a space
body.end = "^\s*$"
# This filters so that only lines that contain with .cpp are used
body.member = "\.cpp"
# concatenates the date into a single unbreakable item
body.replace = "(\w{3})\s+(\d{1,2})\s+(\d{2}:\d{2})" ="\1_\2_\3"
# ignore dirs
body.ignore = _regex_ "^drwx.*",
body.tokens = _tokenize_, 0, " "
# Version 9.1.0.2
#
# This file contains an example outputs.conf. Use this file to configure
# forwarding in a distributed set up.
#
# To use one or more of these configurations, copy the configuration block into
# outputs.conf in $SPLUNK_HOME/etc/system/local/. You must restart Splunk to
# enable configurations.
#
# To learn more about configuration files (including precedence) please see the
# documentation located at
# http://docs.splunk.com/Documentation/Splunk/latest/Admin/Aboutconfigurationfiles
# Specify a target group for an IP:PORT which consists of a single receiver.
# This is the simplest possible configuration; it sends data to the host at
# 10.1.1.197 on port 9997.
[tcpout:group1]
server=10.1.1.197:9997
# Specify a target group for a hostname which consists of a single receiver.
[tcpout:group2]
server=myhost.Splunk.com:9997
# Specify a target group made up of two receivers. In this case, the data will
# be distributed using AutoLB between these two receivers. You can specify as
# many receivers as you wish here. You can combine host name and IP if you
# wish.
# NOTE: Do not use this configuration with SplunkLightForwarder.
[tcpout:group3]
server=myhost.Splunk.com:9997,10.1.1.197:6666
# You can override any of the global configuration values on a per-target group
# basis. All target groups that do not override a global config will inherit
# the global config.
# Send every event to a receiver at foo.Splunk.com:9997 with a maximum queue
# size of 100,500 events.
[tcpout:group4]
server=foo.Splunk.com:9997
heartbeatFrequency=45
maxQueueSize=100500
# Send data to a receiving system that controls access by tokens.
# NOTE: token value is encrypted. Encryption is done by REST endpoint while saving.
[tcpout:group4]
server=foo.Splunk.com:9997
token=$1$/fRSBT+2APNAyCB7tlcgOyLnAtqAQFC8NI4TGA2wX4JHfN5d9g==
# Clone events to groups indexer1 and indexer2. Also, index all this data
# locally as well.
[tcpout]
indexAndForward=true
[tcpout:indexer1]
server=Y.Y.Y.Y:9997
[tcpout:indexer2]
server=X.X.X.X:6666
# Clone events between two data balanced groups.
[tcpout:indexer1]
server=A.A.A.A:1111, B.B.B.B:2222
[tcpout:indexer2]
server=C.C.C.C:3333, D.D.D.D:4444
# Syslout output configuration
# This example sends only events generated by the splunk daemon to a remote
# syslog host in syslog-compliant format:
[syslog:syslog-out1]
disabled = false
server = X.X.X.X:9099
type = tcp
priority = <34>
timestampformat = %b %e %H:%M:%S
# Auto Load Balancing
# This example balances output between two indexers listening on
# port 4433: 192.0.2.100:4433 and 192.0.2.101:4433.
# To achieve this you'd create a DNS entry for 'splunkLB' pointing
# to the two IP addresses of your indexers:
#
# $ORIGIN example.com.
# splunkLB A 192.0.2.100
# splunkLB A 192.0.2.101
[tcpout]
defaultGroup = lb
[tcpout:lb]
server = splunkLB.example.com:4433
# Alternatively, you can use autoLB directly without DNS:
[tcpout]
defaultGroup = lb
[tcpout:lb]
server = 192.0.2.100:4433, 192.0.2.101:4433
# Compression
#
# This example sends compressed events to the remote indexer.
# If set to "true", you do not need to set the 'compressed' setting to
"true" in the inputs.conf file on the receiver for compression
of data to occur.
# This setting applies to non-SSL forwarding only. For SSL forwarding with
compression, Splunk software uses the 'useClientSSLCompression' setting.
[tcpout]
server = splunkServer.example.com:4433
compressed = true
# SSL
#
# This example sends events to an indexer via SSL using splunk's
# self signed cert:
[tcpout]
server = splunkServer.example.com:4433
sslPassword = password
clientCert = $SPLUNK_HOME/etc/auth/server.pem
#
# The following example shows how to route events to syslog server
# This is similar to tcpout routing, but DEST_KEY is set to _SYSLOG_ROUTING
#
# 1. Edit $SPLUNK_HOME/etc/system/local/props.conf and set a TRANSFORMS-routing
# attribute:
[default]
TRANSFORMS-routing=errorRouting
[syslog]
TRANSFORMS-routing=syslogRouting
# 2. Edit $SPLUNK_HOME/etc/system/local/transforms.conf and set errorRouting
# and syslogRouting rules:
[errorRouting]
REGEX=error
DEST_KEY=_SYSLOG_ROUTING
FORMAT=errorGroup
[syslogRouting]
REGEX=.
DEST_KEY=_SYSLOG_ROUTING
FORMAT=syslogGroup
# 3. Edit $SPLUNK_HOME/etc/system/local/outputs.conf and set which syslog
# outputs go to with servers or groups:
[syslog]
defaultGroup=everythingElseGroup
[syslog:syslogGroup]
server = 10.1.1.197:9997
[syslog:errorGroup]
server=10.1.1.200:9999
[syslog:everythingElseGroup]
server=10.1.1.250:6666
#
# Perform selective indexing and forwarding
#
# Using a heavy forwarder, you can index and store data locally, and
# forward the data out to a receiving indexer. In the example, by
# setting the defaultGroup to a non-existent group named "noforward",
# the forwarder only forwards data that has been routed using explicit
# target groups defined in the inputs.conf
# 1. In outputs.conf:
[tcpout]
defaultGroup = noforward
[indexAndForward]
index=true
selectiveIndexing=true
[tcpout:indexers]
server = 10.1.1.197:9997, 10.1.1.200:9997
# 2. In inputs.conf, add _INDEX_AND_FORWARD_ROUTING to the input
stanza for any data that you want to index locally, or
_TCP_ROUTING=<target_group> for data to be forwarded.
[monitor:///var/log/messages/]
_INDEX_AND_FORWARD_ROUTING=local
[monitor:///var/log/httpd/]
_TCP_ROUTING=indexers
# Output to S3 for Ingest Actions
# For example, sending to an AWS bucket "buttercup-bucket", with a prefix
# in front of all paths "some-prefix", along with encryption using AWS
# SSE-S3 to the us-west-2 region:
[rfs:s3]
path = s3://buttercup-bucket/some-prefix
remote.s3.encryption = sse-s3
remote.s3.endpoint = https://s3.us-west-2.amazonaws.com
remote.s3.signature_version = v4
remote.s3.supports_versioning = false
remote.s3.access_key = <access key here>
remote.s3.secret_key = <secret key here>
# Version 9.1.0.2
#
# The following are example passwords.conf configurations. Configure properties for
# your custom application.
#
# There is NO DEFAULT passwords.conf. The file only gets created once you add/edit
# a credential information via the storage endpoint as follows.
#
# The POST request to add user1 credentials to the storage/password endpoint
# curl -k -u admin:changeme https://localhost:8089/servicesNS/nobody/search/storage/passwords -d name=user1 -d password=changeme2
#
# The GET request to list all the credentials stored at the storage/passwords endpoint
# curl -k -u admin:changeme https://localhost:8089/services/storage/passwords
#
# To use one or more of these configurations, copy the configuration block into
# passwords.conf in $SPLUNK_HOME/etc/<apps>/local/. You must restart Splunk to
# enable configurations.
#
# To learn more about configuration files (including precedence) please see the
# documentation located at
# http://docs.splunk.com/Documentation/Splunk/latest/Admin/Aboutconfigurationfiles
#
[credential::testuser:]
password = changeme
# Version 9.1.0.2
#
# This file contains example registry monitor filters. To create your own
# filter, use the information in procmon-filters.conf.spec.
#
# To use one or more of these configurations, copy the configuration block into
# procmon-filters.conf in $SPLUNK_HOME/etc/system/local/. You must restart
# Splunk to enable configurations.
#
# To learn more about configuration files (including precedence) please see the
# documentation located at
# http://docs.splunk.com/Documentation/Splunk/latest/Admin/Aboutconfigurationfiles
[default]
hive = .*
[not-splunk-optimize]
proc = (?<!splunk-optimize.exe)$
type = create|exit|image
# Version 9.1.0.2
#
# The following are example props.conf configurations. Configure properties for
# your data.
#
# To use one or more of these configurations, copy the configuration block into
# props.conf in $SPLUNK_HOME/etc/system/local/. You must restart Splunk to
# enable configurations.
#
# To learn more about configuration files (including precedence) please see the
# documentation located at
# http://docs.splunk.com/Documentation/Splunk/latest/Admin/Aboutconfigurationfiles
########
# Line merging settings
########
# The following example line-merges source data into multi-line events for
# apache_error sourcetype.
[apache_error]
SHOULD_LINEMERGE = True
########
# Settings for tuning
########
# The following example limits the amount of characters indexed per event from
# host::small_events.
[host::small_events]
TRUNCATE = 256
# The following example turns off DATETIME_CONFIG (which can speed up indexing)
# from any path that ends in /mylogs/*.log.
#
# In addition, the default splunk behavior of finding event boundaries
# via per-event timestamps can't work with NONE, so we disable
# SHOULD_LINEMERGE, essentially declaring that all events in this file are
# single-line.
[source::.../mylogs/*.log]
DATETIME_CONFIG = NONE
SHOULD_LINEMERGE = false
########
# Timestamp extraction configuration
########
# The following example sets Eastern Time Zone if host matches nyc*.
[host::nyc*]
TZ = US/Eastern
# The following example uses a custom datetime.xml that has been created and
# placed in a custom app directory. This sets all events coming in from hosts
# starting with dharma to use this custom file.
[host::dharma*]
DATETIME_CONFIG = <etc/apps/custom_time/datetime.xml>
########
## Timezone alias configuration
########
# The following example uses a custom alias to disambiguate the Australian
# meanings of EST/EDT
TZ_ALIAS = EST=GMT+10:00,EDT=GMT+11:00
# The following example gives a sample case wherein, one timezone field is
# being replaced by/interpreted as another.
TZ_ALIAS = EST=AEST,EDT=AEDT
########
# Transform configuration
########
# The following example creates a search field for host::foo if tied to a
# stanza in transforms.conf.
[host::foo]
TRANSFORMS-foo=foobar
# The following stanza extracts an ip address from _raw
[my_sourcetype]
EXTRACT-extract_ip = (?<ip>\d{1,3}\.\d{1,3}\.\d{1,3}\.\d{1,3})
# The following example shows how to configure lookup tables
[my_lookuptype]
LOOKUP-foo = mylookuptable userid AS myuserid OUTPUT username AS myusername
# The following shows how to specify field aliases
FIELDALIAS-foo = user AS myuser id AS myid
########
# Sourcetype configuration
########
# The following example sets a sourcetype for the file web_access.log for a
# unix path.
[source::.../web_access.log]
sourcetype = splunk_web_access
# The following example sets a sourcetype for the Windows file iis6.log. Note:
# Backslashes within Windows file paths must be escaped.
[source::...\\iis\\iis6.log]
sourcetype = iis_access
# The following example extracts data from a .Z archive
[preprocess-Z]
invalid_cause = archive
is_valid = False
LEARN_MODEL = false
[source::....Z(.\d+)?]
unarchive_cmd = gzip -cd -
sourcetype = preprocess-Z
NO_BINARY_CHECK = true
# The following example learns a custom sourcetype and limits the range between
# different examples with a smaller than default maxDist.
[custom_sourcetype]
LEARN_MODEL = true
maxDist = 30
# rule:: and delayedrule:: configuration
# The following examples create sourcetype rules for custom sourcetypes with
# regex.
[rule::bar_some]
sourcetype = source_with_lots_of_bars
MORE_THAN_80 = ----
[delayedrule::baz_some]
sourcetype = my_sourcetype
LESS_THAN_70 = ####
########
# File configuration
########
# Binary file configuration
# The following example eats binary files from the sourcetype
# "imported_records".
[imported_records]
NO_BINARY_CHECK = true
# File checksum configuration
# The following example checks the entirety of every file in the web_access
# directory rather than skipping files that appear to be the same.
[source::.../web_access/*]
CHECK_METHOD = entire_md5
########
# Metric configuration
########
# A metric sourcetype of type statsd with 'regex_stanza1', 'regex_stanza2' to
# extract dimensions
[metric_sourcetype_name]
METRICS_PROTOCOL = statsd
STATSD-DIM-TRANSFORMS = regex_stanza1, regex_stanza2
#Convert a single log event into multiple metrics using METRIC-SCHEMA-TRANSFORMS
#and index time extraction feature.
[logtometrics]
METRIC-SCHEMA-TRANSFORMS = metric-schema:logtometrics
TRANSFORMS-group = extract_group
TRANSFORMS-name = extract_name
TRANSFORMS-max_size_kb = extract_max_size_kb
TRANSFORMS-current_size_kb = extract_current_size_kb
TRANSFORMS-current_size = extract_current_size
TRANSFORMS-largest_size = extract_largest_size
TRANSFORMS-smallest_size = extract_smallest_size
category = metrics
should_linemerge = false
# Version 9.1.0.2
[pubsub-server:deploymentServer]
disabled=false
targetUri=somehost:8089
[pubsub-server:internalbroker]
disabled=false
targetUri=direct
# Version 9.1.0.2
#
# This file contains example REST endpoint configurations.
#
# To use one or more of these configurations, copy the configuration block into
# restmap.conf in $SPLUNK_HOME/etc/system/local/. You must restart Splunk to
# enable configurations.
#
# To learn more about configuration files (including precedence) please see the
# documentation located at
# http://docs.splunk.com/Documentation/Splunk/latest/Admin/Aboutconfigurationfiles
# The following are default REST configurations. To create your own endpoints,
# modify the values by following the spec outlined in restmap.conf.spec.
# /////////////////////////////////////////////////////////////////////////////
# global settings
# /////////////////////////////////////////////////////////////////////////////
[global]
# indicates if auths are allowed via GET params
allowGetAuth=false
#The default handler (assuming that we have PYTHONPATH set)
pythonHandlerPath=$SPLUNK_HOME/bin/rest_handler.py
# /////////////////////////////////////////////////////////////////////////////
# internal C++ handlers
# NOTE: These are internal Splunk-created endpoints. 3rd party developers can
# only use script or search can be used as handlers.
# (Please see restmap.conf.spec for help with configurations.)
# /////////////////////////////////////////////////////////////////////////////
[SBA:sba]
match=/properties
capability=get_property_map
[asyncsearch:asyncsearch]
match=/search
capability=search
[indexing-preview:indexing-preview]
match=/indexing/preview
capability=(edit_monitor or edit_sourcetypes) and (edit_user and edit_tcp)
# Version 9.1.0.2
#
# This file contains example saved searches and alerts.
#
# To use one or more of these configurations, copy the configuration block into
# savedsearches.conf in $SPLUNK_HOME/etc/system/local/. You must restart Splunk
# to enable configurations.
#
# To learn more about configuration files (including precedence) please see the
# documentation located at
# http://docs.splunk.com/Documentation/Splunk/latest/Admin/Aboutconfigurationfiles
# The following searches are example searches. To create your own search,
# modify the values by following the spec outlined in savedsearches.conf.spec.
[Daily indexing volume by server]
search = index=_internal todaysBytesIndexed LicenseManager-Audit NOT source=*web_service.log NOT source=*web_access.log | eval Daily
_Indexing_Volume_in_MBs = todaysBytesIndexed/1024/1024 | timechart avg(Daily_Indexing_Volume_in_MBs) by host
dispatch.earliest_time = -7d
[Errors in the last 24 hours]
search = error OR failed OR severe OR ( sourcetype=access_* ( 404 OR 500 OR 503 ) )
dispatch.earliest_time = -1d
[Errors in the last hour]
search = error OR failed OR severe OR ( sourcetype=access_* ( 404 OR 500 OR 503 ) )
dispatch.earliest_time = -1h
[KB indexed per hour last 24 hours]
search = index=_internal metrics group=per_index_thruput NOT debug NOT sourcetype=splunk_web_access | timechart fixedrange=t span=1h
sum(kb) | rename sum(kb) as totalKB
dispatch.earliest_time = -1d
[Messages by minute last 3 hours]
search = index=_internal eps "group=per_source_thruput" NOT filetracker | eval events=eps*kb/kbps | timechart fixedrange=t span=1m s
um(events) by series
dispatch.earliest_time = -3h
[Splunk errors last 24 hours]
search = index=_internal " error " NOT debug source=*/splunkd.log*
dispatch.earliest_time = -24h
[stats with durable search]
search = index=_internal eps | stats avg(eps) as avg, max(eps) as max, min(eps) as min
dispatch.indexed_earliest = -30m
dispatch.indexed_latest = now
durable.track_time_type = _indextime
durable.lag_time = 60
durable.backfill_type = time_interval
durable.max_backfill_intervals = 100
# Version 9.1.0.2
#
# The following are example stanzas for searchbnf.conf configurations.
#
##################
# selfjoin
##################
[selfjoin-command]
syntax = selfjoin (<selfjoin-options>)* <field-list>
shortdesc = Join results with itself.
description = Join results with itself. Must specify at least one field to join on.
usage = public
example1 = selfjoin id
comment1 = Joins results with itself on 'id' field.
related = join
tags = join combine unite
[selfjoin-options]
syntax = overwrite=<bool> | max=<int> | keepsingle=<int>
description = The selfjoin joins each result with other results that\
have the same value for the join fields. 'overwrite' controls if\
fields from these 'other' results should overwrite fields of the\
result used as the basis for the join (default=true). max indicates\
the maximum number of 'other' results each main result can join with.\
(default = 1, 0 means no limit). 'keepsingle' controls whether or not\
results with a unique value for the join fields (and thus no other\
results to join with) should be retained. (default = false)
# Version 9.1.0.2
#
# The following are examples of segmentation configurations.
#
# To use one or more of these configurations, copy the configuration block into
# segmenters.conf in $SPLUNK_HOME/etc/system/local/. You must restart Splunk to
# enable configurations.
#
# To learn more about configuration files (including precedence) please see the
# documentation located at
# http://docs.splunk.com/Documentation/Splunk/latest/Admin/Aboutconfigurationfiles
# Example of a segmenter that doesn't index the date as segments in syslog
# data:
[syslog]
FILTER = ^.*?\d\d:\d\d:\d\d\s+\S+\s+(.*)$
# Example of a segmenter that only indexes the first 256b of events:
[limited-reach]
LOOKAHEAD = 256
# Example of a segmenter that only indexes the first line of an event:
[first-line]
FILTER = ^(.*?)(\n|$)
# Turn segmentation off completely:
[no-segmentation]
LOOKAHEAD = 0
# Version 9.1.0.2
#
# This file contains an example server.conf. Use this file to configure SSL
# and HTTP server options.
#
# To use one or more of these configurations, copy the configuration block
# into server.conf in $SPLUNK_HOME/etc/system/local/. You must restart
# Splunk to enable configurations.
#
# To learn more about configuration files (including precedence) please see
# the documentation located at
# http://docs.splunk.com/Documentation/Splunk/latest/Admin/Aboutconfigurationfiles
# Allow users 8 hours before they time out
[general]
sessionTimeout=8h
pass4SymmKey = changeme
# Listen on IPv6 in addition to IPv4...
listenOnIPv6 = yes
# ...but make all outgoing TCP connections on IPv4 exclusively
connectUsingIpVersion = 4-only
# Turn on SSL:
[sslConfig]
enableSplunkdSSL = true
useClientSSLCompression = true
serverCert = $SPLUNK_HOME/etc/auth/server.pem
sslPassword = password
sslRootCAPath = $SPLUNK_HOME/etc/auth/cacert.pem
certCreateScript = genMyServerCert.sh
[proxyConfig]
http_proxy = http://proxy:80
https_proxy = http://proxy:80
proxy_rules = *
no_proxy = localhost, 127.0.0.1, ::1
######## SSO Example ########
# This example trusts all logins from the splunk web server and localhost
# Note that a proxy to the splunk web server should exist to enforce
# authentication
[general]
trustedIP = 127.0.0.1
####### Cascading Replication Example ######
[cascading_replication]
pass4SymmKey = someSecret
max_replication_threads = auto
max_replication_jobs = 5
cascade_replication_plan_reap_interval = 1h
cascade_replication_plan_age = 8h
cascade_replication_plan_fanout = auto
cascade_replication_plan_topology = size_balanced
cascade_replication_plan_select_policy = random
############################################################################
# Set this node to be a cluster manager.
############################################################################
[clustering]
mode = manager
replication_factor = 3
pass4SymmKey = someSecret
search_factor = 2
############################################################################
# Set this node to be a peer to cluster manager "SplunkManager01" on port
# 8089.
############################################################################
[clustering]
mode = peer
manager_uri = https://SplunkManager01.example.com:8089
pass4SymmKey = someSecret
############################################################################
# Set this node to be a searchhead to cluster manager "SplunkManager01" on
# port 8089.
############################################################################
[clustering]
mode = searchhead
manager_uri = https://SplunkManager01.example.com:8089
pass4SymmKey = someSecret
############################################################################
# Set this node to be a searchhead to multiple cluster managers -
# "SplunkManager01" with pass4SymmKey set to 'someSecret and "SplunkManager02"
# with no pass4SymmKey set here.
############################################################################
[clustering]
mode = searchhead
manager_uri = clustermanager:east, clustermanager:west
[clustermanager:east]
manager_uri = https://SplunkManager01.example.com:8089
pass4SymmKey=someSecret
[clustermanager:west]
manager_uri = https://SplunkManager02.example.com:8089
############################################################################
# Configuration file change tracker
# To enable the feature, set 'disabled=false'.
# Set 'mode=auto' to include all available features.
###############################################################################
[config_change_tracker]
disabled = false
mode = auto
denylist=peer-apps|savedsearches\.conf$
exclude_fields = server.conf:general:pass4SymmKey, authentication.conf:authentication:*
############################################################################
# Open an additional non-SSL HTTP REST port, bound to the localhost
# interface (and therefore not accessible from outside the machine) Local
# REST clients like the CLI can use this to avoid SSL overhead when not
# sending data across the network.
############################################################################
[httpServerListener:127.0.0.1:8090]
ssl = false
# Version 9.1.0.2
#
# Example 1
# Matches all clients and includes all apps in the server class
[global]
whitelist.0=*
# whitelist matches all clients.
[serverClass:AllApps]
[serverClass:AllApps:app:*]
# a server class that encapsulates all apps in the repositoryLocation
# Example 2
# Assign server classes based on dns names.
[global]
[serverClass:AppsForOps]
whitelist.0=*.ops.yourcompany.com
[serverClass:AppsForOps:app:unix]
[serverClass:AppsForOps:app:SplunkLightForwarder]
[serverClass:AppsForDesktops]
filterType=blacklist
# exclude everybody except the Windows desktop machines.
blacklist.0=*
whitelist.0=*.desktops.yourcompany.com
[serverClass:AppsForDesktops:app:SplunkDesktop]
# Example 3
# Deploy server class based on machine types
[global]
[serverClass:AppsByMachineType]
# Ensure this server class is matched by all clients. It is IMPORTANT to
# have a general filter here, and a more specific filter at the app level.
# An app is matched _only_ if the server class it is contained in was
# successfully matched!
whitelist.0=*
[serverClass:AppsByMachineType:app:SplunkDesktop]
# Deploy this app only to Windows boxes.
machineTypesFilter=windows-*
[serverClass:AppsByMachineType:app:unix]
# Deploy this app only to unix boxes - 32/64 bit.
machineTypesFilter=linux-i686, linux-x86_64
# Example 4
# Specify app update exclusion list.
[global]
# The local/ subdirectory within every app will not be touched upon update.
excludeFromUpdate=$app_root$/local
[serverClass:MyApps]
[serverClass:MyApps:app:SpecialCaseApp]
# For the SpecialCaseApp, both the local/ and lookups/ subdirectories will
# not be touched upon update.
excludeFromUpdate=$app_root$/local,$app_root$/lookups
# Example 5
# Control client reloads/restarts
[global]
restartSplunkd=false
restartSplunkWeb=true
# For this serverclass, we attempt to only reload the configuration files
# within the app, if we fail to reload ie if there's a conf in the app that
# requires a restart, the admin must restart the instance themselves
[serverClass:ReloadOnly]
issueReload=true
# This is an example of a best effort reloadable serverClass. ie we try to
# reload the app, but if there are files that require a restart, only then
# do we restart
[serverClass:tryReloadThenRestart]
issueReload=true
restartIfNeeded=true
# Example 6a
# Use (allow list|deny list) text file import.
[serverClass:MyApps]
whitelist.from_pathname = etc/system/local/clients.txt
# Example 6b
# Use (allow list|deny list) CSV file import to read all values from the Client
# field (ignoring all other fields).
[serverClass:MyApps]
whitelist.select_field = Client
whitelist.from_pathname = etc/system/local/clients.csv
# Example 6c
# Use (allow list|deny list) CSV file import to read some values from the Client
# field (ignoring all other fields) where ServerType is one of T1, T2, or
# starts with dc.
[serverClass:MyApps]
whitelist.select_field = Client
whitelist.from_pathname = etc/system/local/server_list.csv
whitelist.where_field = ServerType
whitelist.where_equals = T1, T2, dc*
# Example 6d
# Use (allow list|deny list) CSV file import to read some values from field 2
# (ignoring all other fields) where field 1 is one of T1, T2, or starts with
# dc.
[serverClass:MyApps]
whitelist.select_field = 2
whitelist.from_pathname = etc/system/local/server_list.csv
whitelist.where_field = 1
whitelist.where_equals = T1, T2, dc*
<?xml version="1.0" encoding="UTF-8"?>
<deployment name="root">
<serverClass name="spacecake_apps">
<app name="app_0">
<repositoryLocation>$SPLUNK_HOME/etc/myapps</repositoryLocation>
<!-- Download app_0 from the given location -->
<endpoint>splunk.com/spacecake/apps/app_0.tgz</endpoint>
</app>
<app name="app_1">
<repositoryLocation>$SPLUNK_HOME/etc/myapps</repositoryLocation>
<!-- Download app_1 from the given location -->
<endpoint>splunk.com/spacecake/apps/app_1.tgz</endpoint>
</app>
</serverClass>
<serverClass name="foobar_apps">
<!-- construct url for each location based on the scheme below and download each app -->
<endpoint>foobar.com:5556/services/streams/deployment?name=$serverClassName$_$appName$.bundle</endpoint>
<app name="app_0"/>
<app name="app_1"/>
<app name="app_2"/>
</serverClass>
<serverClass name="local_apps">
<endpoint>foo</endpoint>
<app name="app_0">
<!-- app present in local filesystem -->
<endpoint>file:/home/johndoe/splunk/ds/service_class_2_app_0.bundle</endpoint>
</app>
<app name="app_1">
<!-- app present in local filesystem -->
<endpoint>file:/home/johndoe/splunk/ds/service_class_2_app_1.bundle</endpoint>
</app>
<app name="app_2">
<!-- app present in local filesystem -->
<endpoint>file:/home/johndoe/splunk/ds/service_class_2_app_2.bundle</endpoint>
</app>
</serverClass>
</deployment>
# Version 9.1.0.2
#
# This file contains an example source-classifier.conf. Use this file to
# configure classification
# of sources into sourcetypes.
#
# To use one or more of these configurations, copy the configuration block
# into source-classifier.conf in $SPLUNK_HOME/etc/system/local/. You must
# restart Splunk to enable configurations.
#
# To learn more about configuration files (including precedence) please see
# the documentation located at
# http://docs.splunk.com/Documentation/Splunk/latest/Admin/Aboutconfigurationfiles
# terms to ignore when generating sourcetype model to prevent model from
# containing servernames
ignored_model_keywords = sun mon tue tues wed thurs fri sat sunday monday tuesday wednesday thursday friday saturday jan feb mar apr may jun jul aug sep oct nov dec january february march april may june july august september october november december 2003 2004 2005 2006 2007 2008 2009 am pm ut utc gmt cet cest cetdst met mest metdst mez mesz eet eest eetdst wet west wetdst msk msd ist jst kst hkt ast adt est edt cst cdt mst mdt pst pdt cast cadt east eadt wast wadt
# terms to ignore when comparing a sourcename against a known sourcename
ignored_filename_keywords = log logs com common event events little main message messages queue server splunk
# Version 9.1.0.2
#
# This file contains an example sourcetypes.conf. Use this file to configure
# sourcetype models.
#
# NOTE: sourcetypes.conf is a machine-generated file that stores the document
# models used by the file classifier for creating source types.
#
# Generally, you should not edit sourcetypes.conf, as most attributes are
# machine generated. However, there are two attributes which you can change.
#
# To use one or more of these configurations, copy the configuration block into
# sourcetypes.conf in $SPLUNK_HOME/etc/system/local/. You must restart Splunk
# to enable configurations.
#
# To learn more about configuration files (including precedence) please see the
# documentation located at
# http://docs.splunk.com/Documentation/Splunk/latest/Admin/Aboutconfigurationfiles
#
# This is an example of a machine-generated sourcetype models for a fictitious
# sourcetype cadcamlog.
#
[/Users/bob/logs/bnf.x5_Thu_Dec_13_15:59:06_2007_171714722]
_source = /Users/bob/logs/bnf.x5
_sourcetype = cadcamlog
L----------- = 0.096899
L-t<_EQ> = 0.016473
# Version 9.1.0.2
#
# This is an example tags.conf. Use this file to define tags for fields.
#
# To use one or more of these configurations, copy the configuration block into
# tags.conf in $SPLUNK_HOME/etc/system/local/. You must restart Splunk to
# enable configurations.
#
# To learn more about configuration files (including precedence) please see the
# documentation located at
# http://docs.splunk.com/Documentation/Splunk/latest/Admin/Aboutconfigurationfiles
#
# This first example presents a situation where the field is "host" and the
# three hostnames for which tags are being defined are "hostswitch,"
# "emailbox," and "devmachine." Each hostname has two tags applied to it, one
# per line. Note also that the "building1" tag has been applied to two hostname
# values (emailbox and devmachine).
[host=hostswitch]
pci = enabled
cardholder-dest = enabled
[host=emailbox]
email = enabled
building1 = enabled
[host=devmachine]
development = enabled
building1 = enabled
[src_ip=192.168.1.1]
firewall = enabled
[seekPtr=1cb58000]
EOF = enabled
NOT_EOF = disabled
# Version 9.1.0.2
#
# This is an example times.conf. Use this file to create custom time ranges
# that can be used while interacting with the search system.
#
# To use one or more of these configurations, copy the configuration block
# into times.conf in $SPLUNK_HOME/etc/system/local/. You must restart Splunk
# to enable configurations.
#
# To learn more about configuration files (including precedence) please see
# the documentation located at
# http://docs.splunk.com/Documentation/Splunk/latest/Admin/Aboutconfigurationfiles
# Note: These are examples. Replace the values with your own customizations.
# The stanza name is an alphanumeric string (no spaces) that uniquely
# identifies a time range.
[this_business_week]
# Define the label used in the time range control
label = This business week
# Define the label to be used in display headers. If omitted the 'label' key
# will be used with the first letter lowercased.
header_label = during this business week
earliest_time = +1d@w1
latest_time = +6d@w6
# Define the ordering sequence of this time range. All time ranges are
# sorted numerically, ascending. If the time range is in a sub menu and not
# in the main menu, this will determine the position within the sub menu.
order = 110
# a time range that only has a bound on the earliest time
#
[last_3_hours]
label = Last 3 hours
header_label = in the last 3 hours
earliest_time = -3h
order = 30
# Use epoch time notation to define the time bounds for the Fall Semester
# 2013, where earliest_time is 9/4/13 00:00:00 and latest_time is 12/13/13
# 00:00:00.
#
[Fall_2013]
label = Fall Semester 2013
earliest_time = 1378278000
latest_time = 1386921600
#
# Disable the realtime panel in the time range picker
[settings]
show_realtime = false
# Version 9.1.0.2
#
# This is an example transactiontypes.conf. Use this file as a template to
# configure transactions types.
#
# To use one or more of these configurations, copy the configuration block into
# transactiontypes.conf in $SPLUNK_HOME/etc/system/local/.
#
# To learn more about configuration files (including precedence) please see the
# documentation located at
# http://docs.splunk.com/Documentation/Splunk/latest/Admin/Aboutconfigurationfiles
[default]
maxspan = 5m
maxpause = 2s
match = closest
[purchase]
maxspan = 10m
maxpause = 5m
fields = userid
# Version 9.1.0.2
#
# This is an example transforms.conf. Use this file to create regexes and
# rules for transforms. Use this file in tandem with props.conf.
#
# To use one or more of these configurations, copy the configuration block
# into transforms.conf in $SPLUNK_HOME/etc/system/local/. You must restart
# Splunk to enable configurations.
#
# To learn more about configuration files (including precedence) please see
# the documentation located at
# http://docs.splunk.com/Documentation/Splunk/latest/Admin/Aboutconfigurationfiles
# Note: These are examples. Replace the values with your own customizations.
# Indexed field:
[netscreen-error]
REGEX = device_id=\[w+\](?<err_code>[^:]+)
FORMAT = err_code::$1
WRITE_META = true
# Override host:
[hostoverride]
DEST_KEY = MetaData:Host
REGEX = \s(\w*)$
FORMAT = host::$1
# Extracted fields:
[netscreen-error-field]
REGEX = device_id=\[w+\](?<err_code>[^:]+)
FORMAT = err_code::$1
# Index-time evaluations:
[discard-long-lines]
INGEST_EVAL = queue=if(length(_raw) > 500, "nullQueue", "indexQueue")
[split-into-sixteen-indexes-for-no-good-reason]
INGEST_EVAL = index="split_" . substr(md5(_raw),1,1)
[add-two-numeric-fields]
INGEST_EVAL = loglen_raw=ln(length(_raw)), loglen_src=ln(length(source))
# In this example the Splunk platform only creates the new index-time field if
# the hostname has a dot in it; assigning null() to a new field is a no-op:
[add-hostdomain-field]
INGEST_EVAL = hostdomain=if(host LIKE "%.%", replace(host,"^[^\\.]+\\.",""), null())
# Static lookup table
[mylookuptable]
filename = mytable.csv
# One-to-one lookup guarantees that the Splunk platform outputs a single
# lookup value for each input value. When no match exists, the Splunk platform
# uses the value for "default_match", which by default is nothing.
[mylook]
filename = mytable.csv
max_matches = 1
min_matches = 1
default_match =
# Lookup and filter results:
[myfilteredlookup]
filename = mytable.csv
filter = id<500 AND color="red"
# external command lookup table:
[myexternaltable]
external_cmd = testadapter.py blah
fields_list = foo bar
# Temporal based static lookup table:
[staticwtime]
filename = mytable.csv
time_field = timestamp
time_format = %d/%m/%y %H:%M:%S
# Mask sensitive data:
[session-anonymizer]
REGEX = (?m)^(.*)SessionId=\w+(\w{4}[&"].*)$
FORMAT = $1SessionId=########$2
DEST_KEY = _raw
# Route to an alternate index:
[AppRedirect]
REGEX = (Application)
DEST_KEY = _MetaData:Index
FORMAT = Verbose
# Extract comma-delimited values into fields:
# This example assigns extracted values that do not have file names
# from _raw to field1, field2 and field3, in the order that the
# fields are extracted.
#If the Splunk platform extracts more than three values that do not
# have field names, then the Splunk platform ignores those values.
[extract_csv]
DELIMS = ","
FIELDS = "field1", "field2", "field3"
# This example extracts key-value pairs which are separated by '|'
# while the key is delimited from value by '='
[pipe_eq]
DELIMS = "|", "="
# This example extracts key-value pairs which are separated by '|' or
# ';', while the key is delimited from value by '=' or ':'
[multiple_delims]
DELIMS = "|;", "=:"
###### BASIC MODULAR REGULAR EXPRESSIONS DEFINITION START ###########
# When you add a new basic modular regex you must add a comment that
# lists the fields that it extracts as named capturing groups.
# If there are no field names, note the placeholders
# for the group name as: Extracts: field1, field2....
[all_lazy]
REGEX = .*?
[all]
REGEX = .*
[nspaces]
# Matches one or more NON space characters:
REGEX = \S+
[alphas]
# Matches a string containing only letters a-zA-Z:
REGEX = [a-zA-Z]+
[alnums]
# Matches a string containing letters + digits:
REGEX = [a-zA-Z0-9]+
[qstring]
# Matches a quoted "string" and extracts an unnamed variable
# Name MUST be provided as: [[qstring:name]]
# Extracts: empty-name-group (needs name)
REGEX = "(?<>[^"]*+)"
[sbstring]
# Matches a string enclosed in [] and extracts an unnamed variable
# Name must be provided as: [[sbstring:name]]
# Extracts: empty-name-group (needs name)
REGEX = \[(?<>[^\]]*+)\]
[digits]
REGEX = \d+
[int]
# Matches an integer or a hex number:
REGEX = 0x[a-fA-F0-9]+|\d+
[float]
# Matches a float (or an int):
REGEX = \d*\.\d+|[[int]]
[octet]
# Matches only numbers from 0-255 (one octet in an ip):
REGEX = (?:2(?:5[0-5]|[0-4][0-9])|[0-1][0-9][0-9]|[0-9][0-9]?)
[ipv4]
# Matches a valid IPv4 optionally followed by :port_num. The octets in the IP
# are also be validated in the 0-255 range.
# Extracts: ip, port
REGEX = (?<ip>[[octet]](?:\.[[octet]]){3})(?::[[int:port]])?
[simple_url]
# Matches a url of the form proto://domain.tld/uri
# Extracts: url, domain
REGEX = (?<url>\w++://(?<domain>[a-zA-Z0-9\-.:]++)(?:/[^\s"]*)?)
[url]
# Matches a url in the form of: proto://domain.tld/uri
# Extracts: url, proto, domain, uri
REGEX = (?<url>[[alphas:proto]]://(?<domain>[a-zA-Z0-9\-.:]++)(?<uri>/[^\s"]*)?)
[simple_uri]
# Matches a uri in the form of: /path/to/resource?query
# Extracts: uri, uri_path, uri_query
REGEX = (?<uri>(?<uri_path>[^\s\?"]++)(?:\\?(?<uri_query>[^\s"]+))?)
[uri]
# uri = path optionally followed by query [/this/path/file.js?query=part&other=var]
# path = root part followed by file [/root/part/file.part]
# Extracts: uri, uri_path, uri_root, uri_file, uri_query, uri_domain (optional if in proxy mode)
REGEX = (?<uri>(?:\w++://(?<uri_domain>[^/\s]++))?(?<uri_path>(?<uri_root>/+(?:[^\s\?;=/]*+/+)*)(?<uri_file>[^\s\?;=?/]*+))(?:\?(?<uri_query>[^\s"]+))?)
[hide-ip-address]
# When you make a clone of an event with the sourcetype masked_ip_address, the clone's
# text is changed to mask the IP address.
# The cloned event is further processed by index-time transforms and
# SEDCMD expressions according to its new sourcetype.
# In most scenarios an additional transform directs the
# masked_ip_address event to a different index than the original data.
REGEX = ^(.*?)src=\d+\.\d+\.\d+\.\d+(.*)$
FORMAT = $1src=XXXXX$2
DEST_KEY = _raw
CLONE_SOURCETYPE = masked_ip_addresses
# Set repeat_match to true to repeatedly match the regex in the data.
# When repeat_match is set to true, regex is added as indexed
# fields: a, b, c, d, e, etc. For example: 1483382050 a=1 b=2 c=3 d=4 e=5
# If repeat_match is not set, the match stops at a=1.
[repeat_regex]
REGEX = ([a-z])=(\d+)
FORMAT = $1::$2
REPEAT_MATCH = true
WRITE_META = true
###### BASIC MODULAR REGULAR EXPRESSIONS DEFINITION END ###########
# Statsd dimensions extraction:
# In most cases the Splunk platform needs only one regex to run per
# sourcetype. By default the Splunk platform would look for the sourcetype
# name in transforms.conf. There there is no need to provide
# the STATSD-DIM-TRANSFORMS setting in props.conf.
# For example, these two stanzas would extract dimensions as ipv4=10.2.3.4
# and os=windows from statsd data=mem.percent.used.10.2.3.4.windows:33|g
[statsd-dims:regex_stanza1]
REGEX = (?<ipv4>\d{1,3}.\d{1,3}.\d{1,3}.\d{1,3})
REMOVE_DIMS_FROM_METRIC_NAME = true
[statsd-dims:regex_stanza2]
REGEX = \S+\.(?<os>\w+):
REMOVE_DIMS_FROM_METRIC_NAME = true
[statsd-dims:metric_sourcetype_name]
# In this example, we extract both ipv4 and os dimension using a single regex:
REGEX = (?<ipv4>\d{1,3}.\d{1,3}.\d{1,3}.\d{1,3})\.(?<os>\w+):
REMOVE_DIMS_FROM_METRIC_NAME = true
# In this metrics example, we start with this log line:
#
# 01-26-2018 07:49:49.030 -0800 INFO Metrics - group=queue, name=aggqueue, max_size_kb=1024, current_size_kb=1,
# current_size=3, largest_size=49, smallest_size=0, dc_latitude=37.3187706, dc_longitude=-121.9515042
#
# The following stanza converts that single event into multiple metrics at
# index-time. It deny lists the "dc_latitude" and "dc_longitude" dimensions,
# which means they are omitted from the generated metric data points. It also
# allow lists the "name" and "dc_latitude" dimensions, which means that those
# dimensions potentially are the only dimensions that appear in the
# generated metric data points.
# When a log-to-metrics configuration simultaneously includes allow list and
# deny list dimensions, the Splunk platform includes the dimensions that
# appear in the allow list and also do not appear in the deny list
# for the generated metric data points. For example, "dc_latitude" appears in
# the allow list, but also in the deny list, so it is not included in the generated
# metric data points. The metric data points generated by this configuration
# have "name" as their sole dimension.
[metric-schema:logtometrics]
METRIC-SCHEMA-MEASURES-queue = max_size_kb,current_size_kb,current_size,largest_size,smallest_size
METRIC-SCHEMA-BLACKLIST-DIMS-queue = dc_latitude,dc_longitude
METRIC-SCHEMA-WHITELIST-DIMS-queue = name,dc_latitude
# Here are the metrics generated by that stanza:
# {'metric_name' : 'queue.max_size_kb', '_value' : 1024, 'name': 'aggqueue'},
# {'metric_name' : 'queue.current_size_kb, '_value' : 1, 'name': 'aggqueue'},
# {'metric_name' : 'queue.current_size', '_value' : 3, 'name': 'aggqueue'},
# {'metric_name' : 'queue.largest_size', '_value' : 49, 'name': 'aggqueue'},
# {'metric_name' : 'queue.smallest_size', '_value' : 0, 'name': 'aggqueue'}
# You can use wildcard characters ('*') in METRIC-SCHEMA configurations. In
# the preceding example, '*_size' matches 'current_size', 'largest_size', and
# 'smallest_size'. The following configuration uses a wildcard to include all
# three of those fields without individually listing each one.
# METRIC-SCHEMA-MEASURES-queue = max_size_kb,current_size_kb,*_size
# In the sample log above, group=queue represents the unique metric name prefix. Hence, it needs to be
# formatted and saved as metric_name::queue for Splunk to identify queue as a metric name prefix.
[extract_group]
REGEX = group=(\w+)
FORMAT = metric_name::$1
WRITE_META = true
[extract_name]
REGEX = name=(\w+)
FORMAT = name::$1
WRITE_META = true
[extract_max_size_kb]
REGEX = max_size_kb=(\w+)
FORMAT = max_size_kb::$1
WRITE_META = true
[extract_current_size_kb]
REGEX = current_size_kb=(\w+)
FORMAT = current_size_kb::$1
WRITE_META = true
[extract_current_size]
REGEX = max_size_kb=(\w+)
FORMAT = max_size_kb::$1
WRITE_META = true
[extract_largest_size]
REGEX = largest_size=(\w+)
FORMAT = largest_size::$1
WRITE_META = true
[extract_smallest_size]
REGEX = smallest_size=(\w+)
FORMAT = smallest_size::$1
WRITE_META = true
# Version 9.1.0.2
#
# This file contains example of ui preferences for a view.
#
# To use one or more of these configurations, copy the configuration block into
# ui-prefs.conf in $SPLUNK_HOME/etc/system/local/. You must restart Splunk to
# enable configurations.
#
# To learn more about configuration files (including precedence) please see the
# documentation located at
# http://docs.splunk.com/Documentation/Splunk/latest/Admin/Aboutconfigurationfiles
#
# The following ui preferences will default timerange picker on the search page
# from All time to Today We will store this ui-prefs.conf in
# $SPLUNK_HOME/etc/apps/search/local/ to only update search view of search app.
[search]
dispatch.earliest_time = @d
dispatch.latest_time = now
# Version 9.1.0.2
#
# This file contains the tours available for Splunk Onboarding
#
# To update tours, copy the configuration block into
# ui-tour.conf in $SPLUNK_HOME/etc/system/local/. Restart the Splunk software to
# see the changes.
#
# To learn more about configuration files (including precedence) see the
# documentation located at
# http://docs.splunk.com/Documentation/Splunk/latest/Admin/Aboutconfigurationfiles
#
# Image Tour
[tour-name]
type = image
imageName1 = TourStep1.png
imageCaption1 = This is the first caption
imageName2 = TourStep2.png
imageCaption2 = This is the second caption
imgPath = /testtour
context = system
doneText = Continue to Tour Page
doneURL = app/toursapp/home
# Interactive Tour
[test-interactive-tour]
type = interactive
tourPage = reports
urlData = data=foo&moredata=bar
label = Interactive Tour Test
stepText1 = Welcome to this test tour
stepText2 = This is the first step in the tour
stepElement2 = .test-selector
stepText3 = This is the second step in the tour
stepElement3 = .test-selector
stepClickEvent3 = mousedown
stepClickElement3 = .test-click-element
forceTour = 1
# Version 9.1.0.2
#
# This is an example user-prefs.conf. Use this file to configure settings
# on a per-user basis for use by the Splunk Web UI.
#
# To use one or more of these configurations, copy the configuration block
# into user-prefs.conf in $SPLUNK_HOME/etc/system/local/. You must restart
# Splunk to enable configurations.
#
# To learn more about configuration files (including precedence) please see
# the documentation located at
# http://docs.splunk.com/Documentation/Splunk/latest/Admin/Aboutconfigurationfiles
# Note: These are examples. Replace the values with your own
# customizations.
# EXAMPLE: Setting the default timezone to GMT for all Power and User role
# members, and setting a different language preference for each.
[role_power]
tz = GMT
lang = en-US
[role_user]
tz = GMT
lang = fr-FR,fr-CA;q=0
# Version 9.1.0.2
#
# This is an example user-seed.conf. Use this file to create an initial login.
#
# NOTE: When starting Splunk for first time, hash of password is stored in
# $SPLUNK_HOME/etc/system/local/user-seed.conf and password file is seeded
# with this hash. This file can also be used to set default username and
# password, if $SPLUNK_HOME/etc/passwd is not present. If the $SPLUNK_HOME/etc/passwd
# file is present, the settings in this file (user-seed.conf)
# are not used.
#
# To use this configuration, copy the configuration block into user-seed.conf
# in $SPLUNK_HOME/etc/system/local/. You must restart Splunk to enable configurations.
#
# To learn more about configuration files (including precedence) please see the documentation
# located at http://docs.splunk.com/Documentation/Splunk/latest/Admin/Aboutconfigurationfiles
[user_info]
USERNAME = admin
HASHED_PASSWORD = $<REMOVED>
# Version 9.1.0.2
#
# This is an example viewstates.conf.
#
# To learn more about configuration files (including precedence) please see
# the documentation located at
# http://docs.splunk.com/Documentation/Splunk/latest/Admin/Aboutconfigurationfiles
[charting:g3b5fa7l]
ChartTypeFormatter_0_7_0.default = area
Count_0_6_0.count = 10
LegendFormatter_0_13_0.default = right
LineMarkerFormatter_0_10_0.default = false
NullValueFormatter_0_12_0.default = gaps
[*:g3jck9ey]
Count_0_7_1.count = 20
DataOverlay_0_12_0.dataOverlayMode = none
DataOverlay_1_13_0.dataOverlayMode = none
FieldPicker_0_6_1.fields = host sourcetype source date_hour date_mday date_minute date_month
FieldPicker_0_6_1.sidebarDisplay = True
FlashTimeline_0_5_0.annotationSearch = search index=twink
FlashTimeline_0_5_0.enableAnnotations = true
FlashTimeline_0_5_0.minimized = false
MaxLines_0_13_0.maxLines = 10
RowNumbers_0_12_0.displayRowNumbers = true
RowNumbers_1_11_0.displayRowNumbers = true
RowNumbers_2_12_0.displayRowNumbers = true
Segmentation_0_14_0.segmentation = full
SoftWrap_0_11_0.enable = true
[dashboard:_current]
TimeRangePicker_0_1_0.selected = All time
# Version 9.1.0.2
#
# You can configure Splunk Web features for your custom application.
#
# To use one or more of these configurations, copy the configuration block into
# the web-features.conf file located in $SPLUNK_HOME/etc/system/local/. You must restart
# Splunk software after you make changes to this setting to enable configurations.
#
# For more information on configuration files, including precedence, search for
# "Use Splunk Web to manage configuration files" in the Admin Manual in the Splunk Docs.
[feature:search_v2_endpoint]
enable_search_v2_endpoint = false
[feature:quarantine_files]
enable_jQuery2 = false
enable_unsupported_hotlinked_imports = false
[feature:dashboards_csp]
enable_dashboards_external_content_restriction = true
enable_dashboards_redirection_restriction = true
dashboards_trusted_domain.splunk = *.splunk.com
dashboards_trusted_domain.example = www.example.com
[feature:page_migration]
enable_triggered_alerts_vnext = false
enable_home_vnext = false
[feature:dashboard_studio]
enable_inputs_on_canvas = true
enable_show_hide = true
[feature:dashboard_inputs_localization]
enable_dashboard_inputs_localization = false
[feature:share_job]
enable_share_job_control = true
[feature:search_auto_format]
enable_autoformatted_comments = false
[feature:ui_prefs_optimizations]
optimize_ui_prefs_performance = true
# Version 9.1.0.2
#
# This is an example web.conf. Use this file to configure data web
# settings.
#
# To use one or more of these configurations, copy the configuration block
# into web.conf in $SPLUNK_HOME/etc/system/local/. You must restart Splunk
# to enable configurations.
#
# To learn more about configuration files (including precedence) please see
# the documentation located at
# http://docs.splunk.com/Documentation/Splunk/latest/Admin/Aboutconfigurationfiles
# This stanza heading must precede any changes.
[settings]
# Change the default port number:
httpport = 12800
# Also run the python application server on a non-default port:
appServerPorts = 12801
# Turn on SSL:
enableSplunkWebSSL = true
# absolute paths may be used here.
privKeyPath = /home/user/certs/myprivatekey.pem
serverCert = /home/user/certs/mycacert.pem
# NOTE: non-absolute paths are relative to $SPLUNK_HOME
# First party apps:
splunk_dashboard_app_name = splunk-dashboard-app
# Allowing embedabble content in dashboards
# Embed tags will appear as is in the dashboard source
dashboard_html_allow_embeddable_content = true
dashboard_html_wrap_embed = false
# Allowing remote images from trusted hosts in simple XML dashboards
pdfgen_trusted_hosts = *.splunk.com, 192.0.2.0/24
# Version 9.1.0.2
#
# This is an example wmi.conf. These settings are used to control inputs
# from WMI providers. Refer to wmi.conf.spec and the documentation at
# splunk.com for more information about this file.
#
# To use one or more of these configurations, copy the configuration block
# into wmi.conf in $SPLUNK_HOME\etc\system\local\. You must restart Splunk
# to enable configurations.
#
# To learn more about configuration files (including precedence) please see
# the documentation located at
# http://docs.splunk.com/Documentation/Splunk/latest/Admin/Aboutconfigurationfiles
# This stanza specifies runtime parameters.
[settings]
initial_backoff = 5
max_backoff = 20
max_retries_at_max_backoff = 2
checkpoint_sync_interval = 2
# Pull events from the Application, System and Security event logs from the
# local system every 10 seconds. Store the events in the "wmi_eventlog"
# Splunk index.
[WMI:LocalApplication]
interval = 10
event_log_file = Application
disabled = 0
index = wmi_eventlog
[WMI:LocalSystem]
interval = 10
event_log_file = System
disabled = 0
index = wmi_eventlog
[WMI:LocalSecurity]
interval = 10
event_log_file = Security
disabled = 0
index = wmi_eventlog
# Gather disk and memory performance metrics from the local system every
# second. Store event in the "wmi_perfmon" Splunk index.
[WMI:LocalPhysicalDisk]
interval = 1
wql = select Name, DiskBytesPerSec, PercentDiskReadTime, PercentDiskWriteTime, PercentDiskTime from Win32_PerfFormattedData_PerfDisk_PhysicalDisk
disabled = 0
index = wmi_perfmon
[WMI:LocalMainMemory]
interval = 10
wql = select CommittedBytes, AvailableBytes, PercentCommittedBytesInUse, Caption from Win32_PerfFormattedData_PerfOS_Memory
disabled = 0
index = wmi_perfmon
# Collect all process-related performance metrics for the splunkd process,
# every second. Store those events in the "wmi_perfmon" index.
[WMI:LocalSplunkdProcess]
interval = 1
wql = select * from Win32_PerfFormattedData_PerfProc_Process where Name = "splunkd"
disabled = 0
index = wmi_perfmon
# Listen from three event log channels, capturing log events that occur only
# while Splunk is running, every 10 seconds. Gather data from three remote
# servers srv1, srv2 and srv3.
[WMI:TailApplicationLogs]
interval = 10
event_log_file = Application, Security, System
server = srv1, srv2, srv3
disabled = 0
current_only = 1
batch_size = 10
# Listen for process-creation events on a remote machine, once a second.
[WMI:ProcessCreation]
interval = 1
server = remote-machine
wql = select * from __InstanceCreationEvent within 1 where TargetInstance isa 'Win32_Process'
disabled = 0
current_only = 1
batch_size = 10
# Receive events whenever someone connects or removes a USB device on
# the computer, once a second.
[WMI:USBChanges]
interval = 1
wql = select * from __InstanceOperationEvent within 1 where TargetInstance ISA 'Win32_PnPEntity' and TargetInstance.Description='USB Mass Storage Device'
disabled = 0
current_only = 1
batch_size = 10
# Version 9.1.0.2
#
# This is an example workflow_actions.conf. These settings are used to
# create workflow actions accessible in an event viewer. Refer to
# workflow_actions.conf.spec and the documentation at splunk.com for more
# information about this file.
#
# To use one or more of these configurations, copy the configuration block
# into workflow_actions.conf in $SPLUNK_HOME/etc/system/local/, or into your
# application's local/ folder. You must restart Splunk to enable
# configurations.
#
# To learn more about configuration files (including precedence) please see
# the documentation located at
# http://docs.splunk.com/Documentation/Splunk/latest/Admin/Aboutconfigurationfiles
# These are the default workflow actions and make extensive use of the
# special parameters: $@namespace$, $@sid$, etc.
[show_source]
type=link
fields = _cd, source, host, index
display_location = event_menu
label = Show Source
link.uri = /app/$@namespace$/show_source?sid=$@sid$&offset=$@offset$&latest_time=$@latest_time$
[ifx]
type = link
display_location = event_menu
label = Extract Fields
link.uri = /ifx?sid=$@sid$&offset=$@offset$&namespace=$@namespace$
[etb]
type = link
display_location = event_menu
label = Build Eventtype
link.uri = /etb?sid=$@sid$&offset=$@offset$&namespace=$@namespace$
# This is an example workflow action which will be displayed in a specific
# field menu (clientip).
[whois]
display_location = field_menu
fields = clientip
label = Whois: $clientip$
link.method = get
link.target = blank
link.uri = http://ws.arin.net/whois/?queryinput=$clientip$
type = link
# This is an example field action which will allow a user to search every
# field value in Google.
[Google]
display_location = field_menu
fields = *
label = Google $@field_name$
link.method = get
link.uri = http://www.google.com/search?q=$@field_value$
type = link
# This is an example post link that will send its field name and field value
# to a fictional bug tracking system.
[Create JIRA issue]
display_location = field_menu
fields = error_msg
label = Create JIRA issue for $error_class$
link.method = post
link.postargs.1.key = error
link.postargs.1.value = $error_msg$
link.target = blank
link.uri = http://127.0.0.1:8000/jira/issue/create
type = link
# This is an example search workflow action that will be displayed in an
# event's menu, but requires the field "controller" to exist in the event in
# order for the workflow action to be available for that event.
[Controller req over time]
display_location = event_menu
fields = controller
label = Requests over last day for $controller$
search.earliest = -3d
search.search_string = sourcetype=rails_app controller=$controller$ | timechart span=1h count
search.target = blank
search.view = charting
type = search
# Enable the admission rules defined in workload_rules.conf.
[search_admission_control]
admission_rules_enabled = 1
# Version 9.1.0.2
# CAUTION: Do not alter the settings in workload_pools.conf unless you know what you are doing.
# Improperly configured workloads may result in splunkd crashes and/or memory overuse.
[general]
enabled = false
default_pool = pool_1
ingest_pool = pool_2
workload_pool_base_dir_name = splunk
[workload_category:search]
cpu_weight = 70
mem_weight = 70
[workload_category:ingest]
cpu_weight = 20
mem_weight = 20
[workload_category:misc]
cpu_weight = 10
mem_weight = 10
[workload_pool:pool_1]
cpu_weight = 40
mem_weight = 40
category = search
default_category_pool = 1
[workload_pool:pool_2]
cpu_weight = 30
mem_weight = 30
category = ingest
default_category_pool = 1
[workload_pool:pool_3]
cpu_weight = 20
mem_weight = 20
category = misc
default_category_pool = 1
[workload_pool:pool_4]
cpu_weight = 10
mem_weight = 10
category = search
default_category_pool = 0
[workload_rules_order]
rules = my_analyst_rule,my_app_rule,my_user_rule,my_index_rule
[workload_rule:my_app_rule]
predicate = app=search
workload_pool = my_app_pool
[workload_rule:my_analyst_rule]
predicate = role=analyst
workload_pool = my_analyst_pool
schedule = always_on
[workload_rule:my_user_rule]
predicate = user=admin
workload_pool = my_user_pool
schedule = always_on
[workload_rule:my_index_rule]
predicate = index=_internal
workload_pool = my_index_pool
schedule = time_range
start_time = 2019-01-01T04:00:00-08:00
end_time = 2019-01-05T04:00:00-08:00
[workload_rule:my_search_type_rule]
predicate = search_type=adhoc
workload_pool = my_adhoc_pool
schedule = every_day
start_time = 10
end_time = 15
[workload_rule:my_logical_rule_1]
predicate = app=search AND (NOT index=_internal)
workload_pool = my_logical_pool_1
schedule = every_week
start_time = 10
end_time = 23
every_week_days = [0,4,6]
[workload_rule:my_logical_rule_2]
predicate = NOT role=power OR user=admin
workload_pool = my_logical_pool_2
schedule = every_month
start_time = 1
end_time = 2
every_month_days = [1,5,16,31]
Sign up for free to join this conversation on GitHub. Already have an account? Sign in to comment