Skip to content

Instantly share code, notes, and snippets.

@anguslees
Last active January 3, 2020 05:28
Show Gist options
  • Save anguslees/c2254bad4b7f7849b2d9704dc151710e to your computer and use it in GitHub Desktop.
Save anguslees/c2254bad4b7f7849b2d9704dc151710e to your computer and use it in GitHub Desktop.
jkcfg 'micro-service' example, rewritten in kubecfg/jsonnet
//
// A reimplementation of the "billing" micro-service example from
// https://github.com/jkcfg/jk/tree/master/examples/kubernetes/micro-service
//
// To use:
// 1. Install https://github.com/bitnami/kubecfg
// 2. Run:
// gist=https://gist.githubusercontent.com/anguslees/c2254bad4b7f7849b2d9704dc151710e/raw/acdda140ea24d688e700b19f84a1f2d30fccb8ac
// kubecfg -U $gist show billing.jsonnet # or other kubecfg subcommands
//
local microservice = import "micro-service.libsonnet";
local billing = {
name: "billing",
description: "Provides the /api/billing endpoints for frontend.",
maintainer: "damien@weave.works",
namespace: "billing",
port: 80,
image: "quay.io/acmecorp/billing:master-fd986f62",
ingress: {
path: "/api/billing",
},
dashboards: [
microservice.dashboards.RPSHttp,
],
alerts: [
microservice.alerts.HighErrorRate,
],
};
microservice.MicroService(billing)
// Reimplementations of
// https://github.com/jkcfg/jk/blob/master/examples/kubernetes/micro-service/kubernetes.js
// using my favourite other library (kube.libsonnet).
//
// Usually these declarations would all be in separate reusable file(s)
// somewhere.
local kube = import "https://raw.githubusercontent.com/bitnami-labs/kube-libsonnet/96b30825c33b7286894c095be19b7b90687b1ede/kube.libsonnet";
local grafana = import "https://raw.githubusercontent.com/grafana/grafonnet-lib/f3ee1d810858cf556d25f045b53cb0f1fd10b94e/grafonnet/grafana.libsonnet";
local kubecfg = import "kubecfg.libsonnet";
local Namespace(service) = kube.Namespace(service.namespace);
local Deployment(service) = kube.Deployment(service.name) {
metadata+: {
namespace: service.namespace,
labels: {
app: service.name,
maintainer: service.maintainer,
},
},
spec+: {
[if std.objectHas(service, "replicas") then "replicas"]: service.replicas,
revisionHistoryLimit: 2,
template+: {
spec+: {
containers_+: {
default: kube.Container(service.name) {
image: service.image,
ports_+: {
port: {containerPort: service.port},
},
},
},
},
},
},
};
local Service(service) = kube.Service(service.name) {
metadata+: {
namespace: service.namespace,
labels: {
app: service.name,
maintainer: service.maintainer,
},
},
spec+: {
selector: {app: service.name},
ports: [{port: service.port}],
},
};
local Ingress(service) = kube.Ingress(service.name) {
metadata+: {
namespace: service.namespace,
labels: {
app: service.name,
maintainer: service.maintainer,
},
annotations: {
"nginx.ingress.kubernetes.io/rewrite-target": "/",
},
},
spec+: {
rules: [{
http: {
paths: [{
path: service.ingress.path,
backend: {
serviceName: service.name,
servicePort: service.port,
},
}],
},
}],
},
};
local ConfigMap(service, name, data) = kube.ConfigMap(name) {
metadata+: {
namespace: service.namespace,
labels: {
app: service.name,
maintainer: service.maintainer,
},
},
data: data,
};
local alerts = {
local selector(service) = "job=" + service.name,
local ErrorRate(selector) = (
'rate(http_request_total{%(selector)s,code=~"5.."}[%(r)s]) / rate(http_request_duration_seconds_count{%(selector)s}[%(r)s])' % {selector: selector, r: "2m"}
),
HighErrorRate(service): {
alert: "HighErrorRate",
expr: ErrorRate(selector(service)),
"for": "5m",
labels: {severity: "critical"},
annotations: {
service: service.name,
description: "More than 10%% of requests to the %s service are failing with 5xx errors" % service.name,
details: '{{$value | printf "%.1f"}}% errors for more than 5m',
},
},
};
local dashboards = {
local selector(service) = "job='%s'" % service.name,
local ServiceRPS(selector) = (
"sum by (code)(sum(irate(http_requests_total{%(selector)s}[%(r)s])))" % {selector: selector, r: "2m"}
),
local ServiceLatency(selector) = (
local vars = {selector: selector, r: "2m"};
[
"histogram_quantile(0.99, sum(rate(http_request_duration_seconds_bucket{%(selector)s}[%(r)s])) by (route)) * 1e3" % vars,
"histogram_quantile(0.50, sum(rate(http_request_duration_seconds_bucket{%(selector)s}[%(r)s])) by (route)) * 1e3" % vars,
"sum(rate(http_request_total{%(selector)s}[%(r)s])) / sum(rate(http_request_duration_seconds_count{%(selector)s}[%(r)s])) * 1e3" % vars,
]
),
RPSHttp(service): grafana.dashboard.new(
"Service > " + service.name,
)
.addPanel(
grafana.graphPanel.new(
"%s RPS" % service.name,
datasource = "$PROMETHEUS_DS",
).addTargets([
grafana.prometheus.target(ServiceRPS(selector(service)), legendFormat = "{{code}}"),
]),
{x: 0, y: 0, w: 12, h: 7},
)
.addPanel(
grafana.graphPanel.new(
"%s Latency" % service.name,
datasource = "$PROMETHEUS_DS",
)
.addYaxis(format = "ms")
.addYaxis()
.addTargets([
grafana.prometheus.target(ServiceLatency(selector(service))[0], legendFormat = "99th percentile"),
grafana.prometheus.target(ServiceLatency(selector(service))[1], legendFormat = "median"),
grafana.prometheus.target(ServiceLatency(selector(service))[2], legendFormat = "mean"),
]),
{x: 12, y: 0, w: 12, h: 7},
)
};
local PrometheusRule(service) = kube._Object("monitoring.coreos.com/v1", "PrometheusRule", service.name) {
metadata+: {
labels: {
app: service.name,
maintainer: service.maintainer,
prometheus: "global",
role: "alert-rules",
},
},
spec+: {
groups: [{
name: service.name + "-alerts.rules",
rules: [alert(service) for alert in ({alerts: []} + service).alerts],
}],
},
};
local Dashboard(service) = [
d(service) for d in ({dashboards: []} + service).dashboards
];
{
alerts:: alerts,
dashboards:: dashboards,
MicroService(service):: {
dashboards:: {
dashboard_:: Dashboard(service),
dashboard: kubecfg.manifestJson(self.dashboard_),
},
ns: Namespace(service),
deploy: Deployment(service),
svc: Service(service),
ingress: Ingress(service),
dashboards_cm: ConfigMap(service, service.name + "-dashboards", self.dashboards),
prometheus_rule: PrometheusRule(service),
},
}
Sign up for free to join this conversation on GitHub. Already have an account? Sign in to comment