Skip to content

Instantly share code, notes, and snippets.

@noahcrowley
Last active September 5, 2018 01:02
Show Gist options
  • Save noahcrowley/dd25ab45e233adc732da51f5c988e3aa to your computer and use it in GitHub Desktop.
Save noahcrowley/dd25ab45e233adc732da51f5c988e3aa to your computer and use it in GitHub Desktop.
OpenCensus Quickstart with Prometheus Exporter
package main
import (
"bufio"
"bytes"
"context"
"fmt"
"io"
"log"
"net/http"
"os"
"time"
"go.opencensus.io/exporter/prometheus"
"go.opencensus.io/stats"
"go.opencensus.io/stats/view"
"go.opencensus.io/tag"
)
var (
// The latency in milliseconds
MLatencyMs = stats.Float64("repl/latency", "The latency in milliseconds per REPL loop", "ms")
// Counts the number of lines read in from standard input
MLinesIn = stats.Int64("repl/lines_in", "The number of lines read in", "1")
// Encounters the number of non EOF(end-of-file) errors.
MErrors = stats.Int64("repl/errors", "The number of errors encountered", "1")
// Counts/groups the lengths of lines read in.
MLineLengths = stats.Int64("repl/line_lengths", "The distribution of line lengths", "By")
)
var (
KeyMethod, _ = tag.NewKey("method")
)
var (
LatencyView = &view.View{
Name: "demo/latency",
Measure: MLatencyMs,
Description: "The distribution of the latencies",
// Latency in buckets:
// [>=0ms, >=25ms, >=50ms, >=75ms, >=100ms, >=200ms, >=400ms, >=600ms, >=800ms, >=1s, >=2s, >=4s, >=6s]
Aggregation: view.Distribution(0, 25, 50, 75, 100, 200, 400, 600, 800, 1000, 2000, 4000, 6000),
TagKeys: []tag.Key{KeyMethod}}
LineCountView = &view.View{
Name: "demo/lines_in",
Measure: MLinesIn,
Description: "The number of lines from standard input",
Aggregation: view.Count(),
}
ErrorCountView = &view.View{
Name: "demo/errors",
Measure: MErrors,
Description: "The number of errors encountered",
Aggregation: view.Count(),
}
LineLengthView = &view.View{
Name: "demo/line_lengths",
Description: "Groups the lengths of keys in buckets",
Measure: MLineLengths,
// Lengths: [>=0B, >=5B, >=10B, >=15B, >=20B, >=40B, >=60B, >=80, >=100B, >=200B, >=400, >=600, >=800, >=1000]
Aggregation: view.Distribution(0, 5, 10, 15, 20, 40, 60, 80, 100, 200, 400, 600, 800, 1000),
}
)
func main() {
prometheus, err := prometheus.NewExporter(prometheus.Options{Namespace: "demo"})
if err != nil {
log.Fatal(err)
}
// Register the stats exporter
view.RegisterExporter(prometheus)
// Register the views
if err := view.Register(LatencyView, LineCountView, ErrorCountView, LineLengthView); err != nil {
log.Fatalf("Failed to register views: %v", err)
}
// In a seperate go routine, run the Prometheus metrics scraping handler
go func() {
http.Handle("/metrics", prometheus)
log.Fatal(http.ListenAndServe(":9091", nil))
}()
// In a REPL:
// 1. Read input
// 2. process input
br := bufio.NewReader(os.Stdin)
// repl is the read, evaluate, print, loop
for {
if err := readEvaluateProcess(br); err != nil {
if err == io.EOF {
return
}
log.Fatal(err)
}
}
}
// readEvaluateProcess reads a line from the input reader and
// then processes it. It returns an error if any was encountered.
func readEvaluateProcess(br *bufio.Reader) error {
ctx, err := tag.New(context.Background(), tag.Insert(KeyMethod, "repl"))
if err != nil {
return err
}
fmt.Printf("> ")
line, _, err := br.ReadLine()
if err != nil {
if err != io.EOF {
stats.Record(ctx, MErrors.M(1))
}
return err
}
out, err := processLine(ctx, line)
if err != nil {
stats.Record(ctx, MErrors.M(1))
return err
}
fmt.Printf("< %s\n\n", out)
return nil
}
// processLine takes in a line of text and
// transforms it. Currently it just capitalizes it.
func processLine(ctx context.Context, in []byte) (out []byte, err error) {
startTime := time.Now()
defer func() {
ms := float64(time.Since(startTime).Nanoseconds()) / 1e6
stats.Record(ctx, MLinesIn.M(1), MLatencyMs.M(ms), MLineLengths.M(int64(len(in))))
}()
return bytes.ToUpper(in), nil
}
Sign up for free to join this conversation on GitHub. Already have an account? Sign in to comment