Skip to content

Instantly share code, notes, and snippets.

@adamjacobmuller
Created June 11, 2014 04:38
Show Gist options
  • Save adamjacobmuller/56a299f38004a0fa63b5 to your computer and use it in GitHub Desktop.
Save adamjacobmuller/56a299f38004a0fa63b5 to your computer and use it in GitHub Desktop.
[server:main]
# host = /tmp/cdn_node1.sock
proctitle = isprime-proxy
host = 127.0.0.1
port = 5050
processes = 1
cores = 1
listen_queue = 128
home = /Users/adam/.virtualenvs/cdn-node
[app:main]
debug = True
debug.demo = False
debug.print_request = False
proctitle = isprime-proxy
# Application middleware - docs/middleware.md
middleware = cdn_node.middleware.HTTPWorkerProxyMiddleware
cdn_node.middleware.CacheResponseMiddleware
cdn_node.middleware.StaleCheckMiddleware
cdn_node.middleware.QueryIndexMiddleware
cdn_node.middleware.CacheKeyMiddleware
cdn_node.middleware.GeoInformationMiddleware
cdn_node.middleware.ProxyAuthenticationMiddleware
cdn_node.middleware.TokenAuthenticationMiddleware
cdn_node.middleware.AccessControlListMiddleware
cdn_node.middleware.ClientHeaderMiddleware
cdn_node.middleware.ConfigurationMiddleware
cdn_node.middleware.DebugMiddleware
[service:configuration]
api.url = https://api.cdn.isprime.com/?method=services
config.path = /Users/adam/Scripts/apps/cdn-node/conf/nginx.sites.conf
# enables / disables the configuration synchronization with CDN API
api.sync = True
# the `status` and `product` are removed by the ConfigurationSyncService
api.filter.keys = access_groups_name, customerid, created, service_resource, installDate, level3Origin, 0, 1
api.filter.products = CACHING
# redis configuration cache
redis.host = 127.0.0.1
redis.port = 6379
redis.db = 0
redis.hash.configuration = isprime:cdn:config
[service:cache]
proctitle.router = isprime-proxy
proctitle.worker = isprime-worker
cache.router.url = tcp://127.0.0.1:6000
# number of cache worker threads per disk
cache.worker.count = 1
# cache decision based on client/origin headers
cache.decision.client_control = False
cache.decision.client_auth = False
cache.decision.origin_cookie = False
cache.decision.origin_auth = True
resource_index.host = mongodb://127.0.0.1:27017/
resource_index.database = isprime-cdn-database
resource_index.collection = resource-record-collection
# time in seconds when to run the content supervisor
supervisor.interval = 60
# max time to allow stale content to be served during revalidation (seconds)
timeout.stale = 14400
# default max expiration time of cached content if no expiration header set
cache.expiration.default = 86400
# min and max expiration time for cached records
cache.expiration.min = 3600
cache.expiration.max = 31536000
# if no expiration date is set estimate the expiration time based on
# X% of the time between `Date` vs `Last-Modified`
cache.expiration.age_factor = 0.1
cache_path.hot = redis://127.0.0.1:6379/0
# default zone to use for initial cache writes
cache_path.zone.default = sata
cache_path.zone.alternate = ssd
cache_path.ssd = /www/ssd/cache1
/www/ssd/cache2
/www/ssd/cache3
/www/ssd/cache4
/www/ssd/cache5
/www/ssd/cache6
# object size limit in bytes for this zone
# will attempt alternate zone or not cache
cache_path.ssd.size_filter = max
cache_path.ssd.size_limit = 107374182400
cache_path.sata = /www/sata/cache1
/www/sata/cache2
/www/sata/cache3
/www/sata/cache4
/www/sata/cache5
/www/sata/cache6
# object size limit in bytes for this zone
# will attempt alternate zone or not cache
cache_path.sata.size_filter = min
cache_path.sata.size_limit = 2097152
[service:proxy]
# HTTP proxy connection pool
connection.pool = 10
# HTTP proxy time out
timeout.connection = 5
timeout.network = 5
# chunk size in KiloBytes
chunk_size.read = 16
chunk_size.write = 16
[service:geo]
database = data/GeoLite2-City.mmdb
[service:proxy]
request_header_ignore = QUERY_STRING,
REQUEST_METHOD,
CONTENT_TYPE,
CONTENT_LENGTH,
REQUEST_URI,
PATH_INFO,
DOCUMENT_ROOT,
SERVER_PROTOCOL,
REMOTE_ADDR,
REMOTE_PORT,
SERVER_ADDR,
SERVER_PORT,
SERVER_NAME,
SCRIPT_NAME,
SERVER_SOFTWARE,
HTTP_ACCEPT_ENCODING,
GATEWAY_INTERFACE
##
## logging configuration
[loggers]
keys = root,cdn_node,requests,requests_verbose,root_qual
[handlers]
keys = consoleHandler,systemlog
[formatters]
keys = generic,verbose
[logger_root]
level = WARN
handlers = consoleHandler,systemlog
[logger_cdn_node]
level = INFO
handlers =
qualname = cdn_node
[logger_root_qual]
level = INFO
handlers =
propagate = 0
qualname = root
[logger_requests]
level = WARN
handlers = consoleHandler
qualname = requests.packages.urllib3.connectionpool
[logger_requests_verbose]
level = DEBUG
propagate = 0
handlers = systemlog
qualname = requests.packages.urllib3.connectionpool
[handler_consoleHandler]
class = StreamHandler
level = INFO
formatter = generic
args = (sys.stdout,)
[handler_systemlog]
class = FileHandler
level = DEBUG
formatter = verbose
args = ('system.log',)
[formatter_generic]
format = %(asctime)s %(levelname)-5.5s [%(process)d][%(name)s][%(funcName)s] %(message)s
datefmt =
[formatter_verbose]
format = %(asctime)s %(levelname)-5.5s [%(process)d][%(name)s][%(funcName)s] %(message)s
datefmt =
[formatter_exc_formatter]
format = %(asctime)s %(message)s
[service:proxy]
proctitle = beluga-proxy
# allow to force overwrite the connecting origin server
host_overwrite = isprime.com.fpbns.net
# number of proxy cache worker processes to spawn
worker.processes = 1
# min number of cache worker threads at beginning
worker.cores.min = 10
# max number of cache worker thread
worker.cores.max = 20
# if more then that cores are busy a warning is issued
worker.cores.warn = 100
# min loop time in `ms` / heartbeat of the core
# if a request is being processed this time will be larger
worker.heartbeat = 50
# ZMQ address of proxy cache worker
worker.address = ipc:///tmp/isprime.cdn.worker.%s
worker.backend = inproc:///tmp/isprime.cdn.worker.core.%s
worker.backend_tunnel = inproc:///tmp/isprime.cdn.worker.tunnel.%s
# min object size after which to set fadvise flags on
cache.write.fadvise_min = 10485760
# buffer response size in bytes
worker.cache.buffer.size = 16384
worker.cache.force_seek = False
worker.cache.force_truncate = False
worker.cache.object_resize_factor = 10
# connection pool sizes
worker.pool.hosts = 10
worker.pool.http = 100
worker.pool.http.timeout = 10
worker.pool.mongodb = 10
worker.pool.redis = 10
# number of internal attempts are made to process a request (aka re-play request)
worker.connection.attempts = 2
# follow HTTP redirects otherwise return status as is
worker.connection.redirects = True
# timeout in seconds for HTTP proxy calls
# combined timeout for serving a request
worker.proxy.timeout = 30
worker.proxy.timeout.connect = 4
worker.proxy.timeout.read = 5
worker.proxy.close_delay = 60
# time in ms after which a accept request is logged as warning
worker.alert.accept_time = 1000
# list of headers not to forward to orgin server
request_header_ignore = QUERY_STRING,
REQUEST_METHOD,
CONTENT_TYPE,
CONTENT_LENGTH,
REQUEST_URI,
PATH_INFO,
DOCUMENT_ROOT,
SERVER_PROTOCOL,
REMOTE_ADDR,
REMOTE_PORT,
SERVER_ADDR,
SERVER_PORT,
SERVER_NAME,
SCRIPT_NAME,
SERVER_SOFTWARE,
HTTP_ACCEPT_ENCODING,
GATEWAY_INTERFACE
[service:cache]
# cache decision based on client/origin headers
cache.decision.client_control = False
cache.decision.client_auth = False
cache.decision.origin_cookie = False
cache.decision.origin_auth = True
cache.decision.empty_body = False
# resource_index.host = mongodb://64.188.56.225:27017/
resource_index.host = mongodb://127.0.0.1:27017/
resource_index.database = isprime-cdn-database
resource_index.collection = resource-record-collection
# time in seconds when to run the content supervisor
supervisor.interval = 60
# max time to allow stale content to be served during revalidation (seconds)
timeout.stale = 14400
# default max expiration time of cached content if no expiration header set
cache.expiration.default = 86400
# min and max expiration time for cached records
cache.expiration.min = 3600
cache.expiration.max = 31536000
# if no expiration date is set estimate the expiration time based on
# X% of the time between `Date` vs `Last-Modified`
cache.expiration.age_factor = 0.1
cache_path.hot = redis://127.0.0.1:6379/0
# default zone to use for initial cache writes
cache_path.zone.default = sata
cache_path.zone.alternate = ssd
cache_path.ssd = /storage/ssd/1
/storage/ssd/2
/storage/ssd/3
/storage/ssd/4
/storage/ssd/5
/storage/ssd/6
/storage/ssd/7
/storage/ssd/8
/storage/ssd/9
/storage/ssd/10
/storage/ssd/11
/storage/ssd/12
/storage/ssd/13
/storage/ssd/14
/storage/ssd/15
/storage/ssd/16
/storage/ssd/17
/storage/ssd/18
/storage/ssd/19
/storage/ssd/20
/storage/ssd/21
/storage/ssd/22
/storage/ssd/23
/storage/ssd/24
# object size limit in bytes for this zone
# will attempt alternate zone or not cache
cache_path.ssd.size_filter = max
cache_path.ssd.size_limit = 107374182400
cache_path.sata = /storage/sata/1
/storage/sata/2
/storage/sata/3
/storage/sata/4
/storage/sata/5
/storage/sata/6
/storage/sata/7
/storage/sata/8
/storage/sata/9
/storage/sata/10
/storage/sata/11
/storage/sata/12
/storage/sata/13
/storage/sata/14
/storage/sata/15
/storage/sata/16
/storage/sata/17
/storage/sata/18
/storage/sata/19
/storage/sata/20
/storage/sata/21
/storage/sata/22
/storage/sata/23
/storage/sata/24
/storage/sata/25
/storage/sata/26
/storage/sata/27
/storage/sata/28
/storage/sata/29
/storage/sata/30
/storage/sata/31
/storage/sata/32
/storage/sata/33
/storage/sata/34
/storage/sata/35
/storage/sata/36
/storage/sata/37
/storage/sata/38
/storage/sata/39
/storage/sata/40
/storage/sata/41
/storage/sata/42
/storage/sata/43
/storage/sata/44
/storage/sata/45
# object size limit in bytes for this zone
# will attempt alternate zone or not cache
cache_path.sata.size_filter = min
cache_path.sata.size_limit = 2097152
[client:proxy]
# timeout in `ms` for initial HTTP connection call (header information)
connection.open.timeout = 35000
# in case no proxy server responded in X `open` timeout
# re-attempt X with other worker process
connection.open.attempts = 2
# number of seconds to keep a worker (addressable) running after `open` call
# to allow for proxy / pass-through `fetch` requests
# co-routines are used (no threads) and the polling of a ZMQ connection
client.proxy.linger_timeout = 60
# chunk size in KB of proxy requests
client.chunk.size_read = 64
# time (ms) to wait for an object to have data available
client.proxy.chunk.timeout = 30000
# number of attempts to request a object after timeout expired
client.proxy.chunk.attempts = 1
Sign up for free to join this conversation on GitHub. Already have an account? Sign in to comment