Skip to content

Instantly share code, notes, and snippets.

#!/usr/bin/env python
import requests
url = "https://testngo-be-rz.femi.com/login"
headers = {'accept': 'application/json, text/plain, */*',
'accept-language': 'en-US,en;q=0.9,he;q=0.8',
'authorization': 'Bearer',
'content-type': 'application/json; charset=UTF-8',
version: '2'
services:
proxysql:
image: perconalab/proxysql
ports:
- "3306:3306"
- "6032:6032"
@Dnile
Dnile / grouting_am.txt
Created May 29, 2016 17:36
prometheus issue #1681
goroutine 37040 [running]:
runtime/pprof.writeGoroutineStacks(0x7efd7412e278, 0xc82182a630, 0x0, 0x0)
/usr/local/go/src/runtime/pprof/pprof.go:516 +0x84
runtime/pprof.writeGoroutine(0x7efd7412e278, 0xc82182a630, 0x2, 0x0, 0x0)
/usr/local/go/src/runtime/pprof/pprof.go:505 +0x46
runtime/pprof.(*Profile).WriteTo(0xfcab60, 0x7efd7412e278, 0xc82182a630, 0x2, 0x0, 0x0)
/usr/local/go/src/runtime/pprof/pprof.go:236 +0xd4
net/http/pprof.handler.ServeHTTP(0xc82489aeb1, 0x9, 0x7efd7414a8e8, 0xc82182a630, 0xc821b54000)
/usr/local/go/src/net/http/pprof/pprof.go:199 +0x37e
net/http/pprof.Index(0x7efd7414a8e8, 0xc82182a630, 0xc821b54000)
@Dnile
Dnile / netstat_pen.txt
Created May 27, 2016 21:33
netstat_pen.txt
sudo netstat -pen | grep alertmanager
tcp6 1 0 127.0.0.1:9093 127.0.0.1:29050 CLOSE_WAIT 1004 286419317 12457/alertmanager
tcp6 1 0 127.0.0.1:9093 127.0.0.1:24856 CLOSE_WAIT 1004 286410656 12457/alertmanager
tcp6 1 0 127.0.0.1:9093 127.0.0.1:28188 CLOSE_WAIT 1004 286414536 12457/alertmanager
tcp6 1 0 127.0.0.1:9093 127.0.0.1:20016 CLOSE_WAIT 1004 286404398 12457/alertmanager
tcp6 1 0 127.0.0.1:9093 127.0.0.1:21632 CLOSE_WAIT 1004 286408310 12457/alertmanager
tcp6 1 0 127.0.0.1:9093 127.0.0.1:19387 CLOSE_WAIT 1004 286406190 12457/alertmanager
tcp6 1 0 127.0.0.1:9093 127.0.0.1:27235 CLOSE_WAIT 1004 286414449 12457/alertmanager
tcp6 1 0 127.0.0.1:9093 127.0.0.1:26576 CLOSE_WAIT 1004 286414331 12457/alertmanager
tcp6
@Dnile
Dnile / ls_l_proc.txt
Created May 27, 2016 21:17
prometheus issue #143
lrwx------ 1 prometheus prometheus 64 May 27 14:16 0 -> /dev/null
lrwx------ 1 prometheus prometheus 64 May 27 14:16 1 -> /dev/pts/3
lrwx------ 1 prometheus prometheus 64 May 27 14:16 10 -> socket:[285801553]
lrwx------ 1 prometheus prometheus 64 May 27 14:16 100 -> socket:[285878671]
lrwx------ 1 prometheus prometheus 64 May 27 14:16 101 -> socket:[285878812]
lrwx------ 1 prometheus prometheus 64 May 27 14:16 102 -> socket:[285877980]
lrwx------ 1 prometheus prometheus 64 May 27 14:16 103 -> socket:[285881817]
lrwx------ 1 prometheus prometheus 64 May 27 14:16 104 -> socket:[285882535]
lrwx------ 1 prometheus prometheus 64 May 27 14:16 105 -> socket:[285882580]
lrwx------ 1 prometheus prometheus 64 May 27 14:16 106 -> socket:[285884665]
@Dnile
Dnile / alertmanager.log
Created May 27, 2016 19:15
prometheus, no alerts log
This file has been truncated, but you can view the full file.
2016/05/27 06:36:09 http: Accept error: accept tcp [::]:9093: accept4: too many open files; retrying in 1s
2016/05/27 06:36:10 http: Accept error: accept tcp [::]:9093: accept4: too many open files; retrying in 1s
2016/05/27 06:36:11 http: Accept error: accept tcp [::]:9093: accept4: too many open files; retrying in 1s
2016/05/27 06:36:12 http: Accept error: accept tcp [::]:9093: accept4: too many open files; retrying in 1s
2016/05/27 06:36:13 http: Accept error: accept tcp [::]:9093: accept4: too many open files; retrying in 1s
2016/05/27 06:36:14 http: Accept error: accept tcp [::]:9093: accept4: too many open files; retrying in 1s
2016/05/27 06:36:15 http: Accept error: accept tcp [::]:9093: accept4: too many open files; retrying in 1s
2016/05/27 06:36:16 http: Accept error: accept tcp [::]:9093: accept4: too many open files; retrying in 1s
2016/05/27 06:36:17 http: Accept error: accept tcp [::]:9093: accept4: too many open files; retrying in 1s
2016/05/27 06:36:18 http: Accept error: accept tcp [::]:9093:
@Dnile
Dnile / alertmanager.yml
Last active May 27, 2016 18:11
alertmanager config
global:
slack_api_url: <hidden>
pagerduty_url: "https://events.pagerduty.com/generic/2010-04-15/create_event.json"
route:
# The labels by which incoming alerts are grouped together. For example,
# multiple alerts coming in for cluster=A and alertname=LatencyHigh would
# be batched into a single group.
group_by: ['alertname','host']
group_wait: 30s
@Dnile
Dnile / mydumper_restore.sh
Last active April 21, 2017 01:55
my dumper load from backups
#!/usr/bin/env sh
cd $BACKUP_DIR
export DESTHOST=127.0.0.1
export BACKUP_DIR=/vol_mysql/backups
mysqld --skip-grant-tables &
for i in `ls -1 *-schema.dump.gz | cut -d'-' -f1`; do mysql -h $DESTHOST -e "CREATE DATABASE IF NOT EXISTS $i"; zcat $i-schema.dump.gz | mysql -h $DESTHOST $i; zcat $i-schema-post.dump.gz | mysql -h $DESTHOST $i; done
/usr/bin/myloader --host=$DESTHOST --directory=$BACKUP_DIR --enable-binlog --threads=10 --queries-per-transaction=20 -v 3
chown -R mysql:mysql /var/lib/mysql/
# my global config
global:
scrape_interval: 15s # By default, scrape targets every 15 seconds.
evaluation_interval: 15s # By default, scrape targets every 15 seconds.
# scrape_timeout is set to the global default (10s).
# Attach these extra labels to all timeseries collected by this Prometheus instance.
external_labels:
monitor: 'codelab-monitor'
#host running out of memory!
ALERT HighMem
IF 100 -(node_memory_MemFree + node_memory_Buffers + node_memory_Cached) / node_memory_MemTotal* 100 > 80
FOR 1m
WITH {
severity="page"
}
SUMMARY "Instance {{$labels.host}} has high memory consumption"
DESCRIPTION "{{$labels.host}} of job {{$labels.job}} has less than 40% of memory available for more than 1 minutes."