Skip to content

Instantly share code, notes, and snippets.

@dpb587
Created January 29, 2015 14:28
Show Gist options
  • Save dpb587/0e204271fb8a68c66616 to your computer and use it in GitHub Desktop.
Save dpb587/0e204271fb8a68c66616 to your computer and use it in GitHub Desktop.
package main
// ignore checks which are currently okay
import (
"os"
"github.com/dpb587/nocommit-experiments/healthchecks/check"
"github.com/dpb587/nocommit-experiments/healthchecks/handler"
)
func mhandle(cs check.Status, emit handler.Emitter) {
if check.CHECK_OKAY != cs.Check.GetStatus() {
emit(cs)
}
}
func main() {
v := handler.Handler{os.Stdin, mhandle}
v.Run()
}
package main
// emit all the active disks, including their usage and free space
//
// {
// "check": {
// "name": "disk_usage/persistent",
// "owner": "bosh-cityindex-logsearch-io/cityindex.logsearch.io/api/0",
// "status": "OKAY"
// },
// "check_data": {
// "extra": {
// "free_bytes": 12986310656.0,
// "used_bytes": 2908782592.0
// },
// "threshold": 70,
// "units": "percent",
// "value": 18.29987749437077
// }
// }
//
import (
"fmt"
"flag"
"encoding/json"
"io/ioutil"
"net/http"
"strings"
"errors"
"time"
"strconv"
"github.com/dpb587/nocommit-experiments/healthchecks/check"
"github.com/dpb587/nocommit-experiments/healthchecks/handlers/logsearch-shipper-diskusage"
)
var flagElasticsearch string
var flagEphemeral string
var flagPersistent string
var flagSystem string
func init() {
flag.StringVar(&flagElasticsearch, "es", "localhost:9200", "Elasticsearch endpoint")
flag.StringVar(&flagEphemeral, "ephemeral", "90.00", "Ephemeral Usage Threshold")
flag.StringVar(&flagSystem, "system", "90.00", "Systemt Usage Threshold")
flag.StringVar(&flagPersistent, "persistent", "80.00", "Persistent Usage Threshold")
}
func main() {
flag.Parse()
// ignoring err :(
flagEphemeralFloat, _ := strconv.ParseFloat(flagEphemeral, 64)
flagSystemFloat, _ := strconv.ParseFloat(flagSystem, 64)
flagPersistentFloat, _ := strconv.ParseFloat(flagPersistent, 64)
s := strings.NewReader(ESDATA)
resp, err := http.Post(
fmt.Sprintf("http://%s/logstash-%s/metric/_search", flagElasticsearch, time.Now().Format("2006.01.02")),
"application/json",
s,
)
if err != nil {
panic(err)
}
defer resp.Body.Close()
body, err := ioutil.ReadAll(resp.Body)
if err != nil {
panic(err)
}
var esres map[string]interface{}
err = json.Unmarshal(body, &esres)
if err != nil {
panic(err)
}
for _, director := range esres["aggregations"].(map[string]interface{})["director"].(map[string]interface{})["buckets"].([]interface{}) {
directormap := director.(map[string]interface{})
for _, deployment := range directormap["deployment"].(map[string]interface{})["buckets"].([]interface{}) {
deploymentmap := deployment.(map[string]interface{})
for _, job := range deploymentmap["job"].(map[string]interface{})["buckets"].([]interface{}) {
jobmap := job.(map[string]interface{})
owner := fmt.Sprintf("%s/%s/%s", directormap["key"], deploymentmap["key"], jobmap["key"])
generateCheckStatust(owner, "system", extractDiskUsage(jobmap, "system"), flagSystemFloat)
generateCheckStatust(owner, "ephemeral", extractDiskUsage(jobmap, "ephemeral"), flagEphemeralFloat)
generateCheckStatust(owner, "persistent", extractDiskUsage(jobmap, "persistent"), flagPersistentFloat)
}
}
}
}
func generateCheckStatust(owner string, disk string, du logsearchshipperdiskusage.DiskUsage, threshold float64) {
if du.IsMissingData() {
return
}
value := du.GetUsedPct()
cs := check.CHECK_FAIL
if value < threshold {
cs = check.CHECK_OKAY
}
mc := check.Status{
Check : check.Check{
Owner : owner,
Name : fmt.Sprintf("disk_usage/%s", disk),
Status : cs,
},
CheckData : check.CheckData{
Threshold : threshold,
Value : value,
Units : "percent",
Extra : map[string]float64{
"free_bytes" : du.GetFree(),
"used_bytes" : du.GetUsed(),
},
},
}
mcjs, err := json.Marshal(mc)
if nil != err {
panic(err)
}
fmt.Println(string(mcjs))
}
func extractDiskUsage(d map[string]interface{}, name string) (du logsearchshipperdiskusage.DiskUsage) {
used, err1 := extractTopHitValue(d[name + "_used"])
free, err2 := extractTopHitValue(d[name + "_free"])
return logsearchshipperdiskusage.DiskUsage{ err1 != nil || err2 != nil, used, free }
}
func extractTopHitValue(d interface{}) (val float64, err error) {
sd := d.(map[string]interface{})["value"].(map[string]interface{})["hits"].(map[string]interface{})["hits"].([]interface{})
if 0 == len(sd) {
err = errors.New("No top hit available")
return
}
val = sd[0].(map[string]interface{})["_source"].(map[string]interface{})["value"].(float64)
return
}
const ESDATA string = `
{
"aggregations" : {
"director" : {
"terms" : {
"field" : "@source.bosh_director",
"size" : 0,
"order" : {
"_term" : "asc"
}
},
"aggregations" : {
"deployment" : {
"terms" : {
"field" : "@source.bosh_deployment",
"size" : 0,
"order" : {
"_term" : "asc"
}
},
"aggregations" : {
"job" : {
"terms" : {
"field" : "@source.bosh_job",
"size" : 0,
"order" : {
"_term" : "asc"
}
},
"aggregations" : {
"system_free" : {
"filter" : {
"term" : {
"name" : "host.df_xvda1.df_complex_free"
}
},
"aggregations" : {
"value" : {
"top_hits" : {
"sort" : {
"@timestamp" : "desc"
},
"size" : 1,
"_source" : [
"value"
]
}
}
}
},
"system_used" : {
"filter" : {
"term" : {
"name" : "host.df_xvda1.df_complex_used"
}
},
"aggregations" : {
"value" : {
"top_hits" : {
"sort" : {
"@timestamp" : "desc"
},
"size" : 1,
"_source" : [
"value"
]
}
}
}
},
"ephemeral_free" : {
"filter" : {
"term" : {
"name" : "host.df_xvdb2.df_complex_free"
}
},
"aggregations" : {
"value" : {
"top_hits" : {
"sort" : {
"@timestamp" : "desc"
},
"size" : 1,
"_source" : [
"value"
]
}
}
}
},
"ephemeral_used" : {
"filter" : {
"term" : {
"name" : "host.df_xvdb2.df_complex_used"
}
},
"aggregations" : {
"value" : {
"top_hits" : {
"sort" : {
"@timestamp" : "desc"
},
"size" : 1,
"_source" : [
"value"
]
}
}
}
},
"persistent_free" : {
"filter" : {
"term" : {
"name" : "host.df_xvdf1.df_complex_free"
}
},
"aggregations" : {
"value" : {
"top_hits" : {
"sort" : {
"@timestamp" : "desc"
},
"size" : 1,
"_source" : [
"value"
]
}
}
}
},
"persistent_used" : {
"filter" : {
"term" : {
"name" : "host.df_xvdf1.df_complex_used"
}
},
"aggregations" : {
"value" : {
"top_hits" : {
"sort" : {
"@timestamp" : "desc"
},
"size" : 1,
"_source" : [
"value"
]
}
}
}
}
}
}
}
}
}
}
},
"size" : 0
}
`
package main
// emit all the installed monit services on bosh machines, reporting whether they're running
//
// {
// "check": {
// "name": "monit_status/elasticsearch",
// "owner": "bosh-cityindex-logsearch-io/cityindex.logsearch.io/elasticsearch_eu-west-1b/0",
// "status": "OKAY"
// },
// "check_data": {
// "threshold": 1,
// "units": "boolean",
// "value": 0
// }
// }
//
import (
"fmt"
"flag"
"encoding/json"
"io/ioutil"
"net/http"
"strings"
"errors"
"time"
"regexp"
"github.com/dpb587/nocommit-experiments/healthchecks/check"
)
var flagElasticsearch string
func init() {
flag.StringVar(&flagElasticsearch, "es", "localhost:9200", "Elasticsearch endpoint")
}
func main() {
flag.Parse()
s := strings.NewReader(ESDATA)
resp, err := http.Post(
fmt.Sprintf("http://%s/logstash-%s/metric/_search", flagElasticsearch, time.Now().Format("2006.01.02")),
"application/json",
s,
)
if err != nil {
panic(err)
}
defer resp.Body.Close()
body, err := ioutil.ReadAll(resp.Body)
if err != nil {
panic(err)
}
var esres map[string]interface{}
err = json.Unmarshal(body, &esres)
if err != nil {
panic(err)
}
rem := regexp.MustCompile("monit.([^\\.]+).status")
for _, director := range esres["aggregations"].(map[string]interface{})["filtered"].(map[string]interface{})["director"].(map[string]interface{})["buckets"].([]interface{}) {
directormap := director.(map[string]interface{})
for _, deployment := range directormap["deployment"].(map[string]interface{})["buckets"].([]interface{}) {
deploymentmap := deployment.(map[string]interface{})
for _, job := range deploymentmap["job"].(map[string]interface{})["buckets"].([]interface{}) {
jobmap := job.(map[string]interface{})
for _, job := range jobmap["service"].(map[string]interface{})["buckets"].([]interface{}) {
metricmap := job.(map[string]interface{})
value, err := extractTopHitValue(metricmap)
if nil != err {
panic(err)
}
owner := fmt.Sprintf("%s/%s/%s", directormap["key"], deploymentmap["key"], jobmap["key"])
srvname := rem.ReplaceAllString(metricmap["key"].(string), "$1")
cs := check.CHECK_FAIL
if value == 0 {
cs = check.CHECK_OKAY
}
mc := check.Status{
Check : check.Check{
Owner : owner,
Name : fmt.Sprintf("monit_status/%s", srvname),
Status : cs,
},
CheckData : check.CheckData{
Threshold : 1,
Value : value,
Units : "boolean",
},
}
mcjs, err := json.Marshal(mc)
if nil != err {
panic(err)
}
fmt.Println(string(mcjs))
}
}
}
}
}
func extractTopHitValue(d interface{}) (val float64, err error) {
sd := d.(map[string]interface{})["value"].(map[string]interface{})["hits"].(map[string]interface{})["hits"].([]interface{})
if (0 == len(sd)) {
err = errors.New("No top hit available")
return
}
val = sd[0].(map[string]interface{})["_source"].(map[string]interface{})["value"].(float64)
return
}
const ESDATA string = `
{
"size" : 0,
"aggs" : {
"filtered" : {
"filter" : {
"regexp" : {
"name" : "monit\\..*\\.status"
}
},
"aggs" : {
"director" : {
"terms" : {
"field" : "@source.bosh_director",
"size" : 250
},
"aggs" : {
"deployment" : {
"terms" : {
"field" : "@source.bosh_deployment",
"size" : 250
},
"aggs" : {
"job" : {
"terms" : {
"field" : "@source.bosh_job",
"size" : 250
},
"aggs" : {
"service" : {
"terms" : {
"field" : "name",
"size" : 100
},
"aggs" : {
"value" : {
"top_hits" : {
"sort" : {
"@timestamp" : {
"order" : "desc"
},
"name" : {
"order" : "desc"
}
},
"_source" : {
"include" : [
"value"
]
},
"size" : 1
}
}
}
}
}
}
}
}
}
}
}
}
}
}
`
Sign up for free to join this conversation on GitHub. Already have an account? Sign in to comment