Skip to content

Instantly share code, notes, and snippets.

@karmi
Last active November 2, 2018 07:48
Show Gist options
  • Star 0 You must be signed in to star a gist
  • Fork 1 You must be signed in to fork a gist
  • Save karmi/bf86cccea7e99778808f05d95334b0ef to your computer and use it in GitHub Desktop.
Save karmi/bf86cccea7e99778808f05d95334b0ef to your computer and use it in GitHub Desktop.
# Setup
#
mkdir -p ./tmp/nginx/log/
tree -L 1 ./tmp
# Install and run Nginx
#
# 1. Homebrew (brew install nginx)
#
nginx -p "$PWD/tmp/nginx" -c $PWD/nginx.conf
#
# 2. Docker
#
docker run \
--name nginx \
--publish 8080:8080 \
--volume $PWD/nginx.conf:/etc/nginx/nginx.conf:ro \
--volume $PWD/tmp/nginx/log/:/etc/nginx/log/ \
--rm \
nginx:1.15
# -----------------------------------------------------------------------------
# Download packages for Mac OS X
echo $( cd tmp && curl -O -# https://artifacts.elastic.co/downloads/elasticsearch/elasticsearch-6.4.2.tar.gz )
echo $( cd tmp && curl -O -# https://artifacts.elastic.co/downloads/kibana/kibana-6.4.2-darwin-x86_64.tar.gz )
echo $( cd tmp && curl -O -# https://artifacts.elastic.co/downloads/beats/filebeat/filebeat-6.4.2-darwin-x86_64.tar.gz )
echo $( cd tmp && curl -O -# https://artifacts.elastic.co/downloads/beats/metricbeat/metricbeat-6.4.2-darwin-x86_64.tar.gz )
echo $( cd tmp && curl -O -# https://artifacts.elastic.co/downloads/beats/heartbeat/heartbeat-6.4.2-darwin-x86_64.tar.gz )
echo $( cd tmp && curl -O https://artifacts.elastic.co/downloads/elasticsearch-plugins/ingest-geoip/ingest-geoip-6.4.2.zip )
echo $( cd tmp && curl -O https://artifacts.elastic.co/downloads/elasticsearch-plugins/ingest-user-agent/ingest-user-agent-6.4.2.zip )
echo $(cd tmp && for f in *.tar.gz; do tar xf $f; done)
# Install Elasticsearch plugins
#
./tmp/elasticsearch-6.4.2/bin/elasticsearch-plugin install --batch file://$PWD/tmp/ingest-geoip-6.4.2.zip
./tmp/elasticsearch-6.4.2/bin/elasticsearch-plugin install --batch file://$PWD/tmp/ingest-user-agent-6.4.2.zip
# Start Elasticsearch
#
cp ./elasticsearch.yml ./tmp/elasticsearch-6.4.2/config/elasticsearch.yml
./tmp/elasticsearch-6.4.2/bin/elasticsearch
# Start Kibana
#
cp kibana.yml ./tmp/kibana-6.4.2-darwin-x86_64/config/kibana.yml
./tmp/kibana-6.4.2-darwin-x86_64/bin/kibana
# Run filebeat against `/tmp/nginx/log/`
#
./tmp/filebeat-6.4.2-darwin-x86_64/filebeat -v -e -c filebeat.yml setup
./tmp/filebeat-6.4.2-darwin-x86_64/filebeat -v -e -c filebeat.yml run
# Drive some traffic to Nginx...
#
for i in `seq 1 1000`; do curl http://localhost:8080/; sleep 0.25; done
for i in `seq 1 250`; do curl -X POST http://localhost:8080/400; sleep 0.1; done
# ... or use eg. Siege (brew install siege):
#
siege --concurrent=1 --delay=0.5 --time=5m --internet --file=siege.urls
# Look at the data in the Kibana "Discover" tab
# Open the Filebeat Nginx dashboard in Kibana
# Explore the data in detail
#
curl http://localhost:8080/test
tail -n 1 tmp/nginx/log/access.log
curl -s 'http://localhost:9200/filebeat*/_search?size=1&sort=offset:desc&pretty' | jq '.hits.hits[0]._source'
# Explore the ingest pipeline for Nginx
#
curl -s 'http://localhost:9200/_ingest/pipeline/filebeat-6.4.2-nginx-access-default?pretty' | jq '.'
# Run custom aggregation on the Nginx data
#
curl -s -H 'Content-Type: application/json' 'http://localhost:9200/filebeat*/_search?pretty' -d '
{
"size" : 0,
"aggregations" : {
"http_methods" : {
"terms" : {
"field" : "nginx.access.method"
}
}
}
}
' | jq '.aggregations.http_methods.buckets'
# Run Metricbeat
#
./tmp/metricbeat-6.4.2-darwin-x86_64/metricbeat -e -c metricbeat.yml setup
./tmp/metricbeat-6.4.2-darwin-x86_64/metricbeat -e -c metricbeat.yml run
# Run Heartbeat
#
./tmp/heartbeat-6.4.2-darwin-x86_64/heartbeat -e -c heartbeat.yml setup
./tmp/heartbeat-6.4.2-darwin-x86_64/heartbeat -e -c heartbeat.yml run
# Activate "Platinum" license trial for SQL and Watcher
#
curl -X POST 'http://localhost:9200/_xpack/license/start_trial?acknowledge=true&pretty'
# Run the aggregation as SQL
#
curl -H 'Content-Type: application/json' 'http://localhost:9200/_xpack/sql?format=txt' -d '
{
"query" : "SELECT nginx.access.method AS method, COUNT(*) FROM filebeat* GROUP BY nginx.access.method"
}
'
# Create an alert for Nginx 5xx errors
#
curl -X PUT -H 'Content-Type: application/json' 'http://localhost:9200/_xpack/watcher/watch/nginx-5xx-errors' -d @watcher-nginx-5xx-errors.json
cluster:
name: elastic-stack-demo
routing.allocation.disk.threshold_enabled: false
xpack.notification.email.account:
default:
profile: gmail
smtp:
auth: true
starttls.enable: true
host: smtp.gmail.com
port: 587
user: ${GMAIL_USERNAME}
password: ${GMAIL_PASSWORD}
filebeat.modules:
- module: nginx
access:
var.paths: ["${PWD}/tmp/nginx/log/access.log*"]
error:
var.paths: ["${PWD}/tmp/nginx/log/error.log*"]
output.elasticsearch:
hosts: ["localhost"]
xpack.monitoring.enabled: true
heartbeat.monitors:
- type: http
schedule: '@every 5s'
urls: ["http://localhost:9200"]
check.request:
method: "GET"
check.response:
status: 200
tags: ["elasticsearch"]
- type: http
schedule: '@every 5s'
urls: ["http://localhost:8080"]
check.request:
method: "GET"
check.response:
status: 200
tags: ["nginx"]
output.elasticsearch:
hosts: ["http://localhost:9200"]
setup.kibana.host: "http://localhost:5601"
xpack.monitoring.enabled: true
elasticsearch.url: http://localhost:9200
metricbeat.modules:
- module: system
metricsets:
- cpu
- load
- memory
- network
- process
- process_summary
- module: nginx
enabled: true
metricsets: ["stubstatus"]
hosts: ["http://localhost:8080"]
# - module: docker
# metricsets:
# - "container"
# - "cpu"
# - "diskio"
# - "healthcheck"
# - "info"
# - "image"
# - "memory"
# - "network"
# hosts: ["unix:///var/run/docker.sock"]
output.elasticsearch:
hosts: ["localhost"]
events {
worker_connections 1024;
}
http {
server {
listen 8080;
access_log log/access.log combined;
error_log log/error.log error;
default_type text/plain;
location / {
return 200 'Hello World\n';
}
location /400 {
return 400 'Bad Request\n';
}
location /500 {
return 500 'Server Error\n';
}
location /server-status {
stub_status on;
access_log off;
allow 127.0.0.1;
deny all; # Comment out when running in Docker
}
location = /favicon.ico {
access_log off;
return 204;
}
}
}
http://localhost:8080/
http://localhost:8080?foo=bar
http://localhost:8080/400
http://localhost:8080/500
http://localhost:8080/500 POST
http://localhost:8080/400 POST
http://localhost:8080/400 POST foo=bar
{
"trigger": {
"schedule": {
"interval": "60s"
}
},
"input": {
"search": {
"request": {
"indices": ["filebeat-*"],
"body": {
"size": 0,
"query": {
"bool": {
"must": [
{
"range": {
"nginx.access.response_code": {
"gte": 500
}
}
},
{
"range": {
"@timestamp": {
"from": "{{ctx.trigger.scheduled_time}}||-60s",
"to": "{{ctx.trigger.triggered_time}}"
}
}
}
]
}
},
"aggregations" : {
"top_urls" : {
"terms" : {
"field" : "nginx.access.url"
}
}
}
}
}
}
},
"condition": {
"compare": {
"ctx.payload.hits.total": {
"gt": 0
}
}
},
"actions": {
"debug": {
"logging": {
"level": "info",
"text": "ALERT | There are [{{ctx.payload.hits.total}}] Nginx 50x errors in the last minute."
}
},
"send_email" : {
"transform": {
"script": {
"lang": "painless",
"inline": "[ 'total': ctx.payload.hits.total, 'top_urls': ctx.payload.aggregations.top_urls.buckets.collect(bucket -> [ 'url': bucket.key, 'errors': bucket.doc_count ]) ]"
}
},
"email": {
"account": "default",
"from" : "alerts@example.org",
"to" : "watcherdemo@mailinator.com",
"subject" : "[watcher] Nginx 500",
"body" : "There are [{{ctx.payload.total}}] Nginx 50x Errors in the last minute.\n\nTop URLs:\n\n{{#ctx.payload.top_urls}}* {{url}} ({{errors}} errors)\n{{/ctx.payload.top_urls}}\n\nMore info: http://localhost:5601/goto/<REPLACE>"
}
}
}
}
Sign up for free to join this conversation on GitHub. Already have an account? Sign in to comment