Skip to content

Instantly share code, notes, and snippets.

@liejuntao001
Created June 16, 2019 20:30
Show Gist options
  • Save liejuntao001/56de6ad8c76c891c1c45c48aa4027409 to your computer and use it in GitHub Desktop.
Save liejuntao001/56de6ad8c76c891c1c45c48aa4027409 to your computer and use it in GitHub Desktop.
// Modified version of:
// https://github.com/coreos/prometheus-operator/blob/master/contrib/kube-prometheus/example.jsonnet
local k = import 'ksonnet/ksonnet.beta.3/k.libsonnet'; // https://github.com/ksonnet/ksonnet-lib/blob/master/ksonnet.beta.3/k.libsonnet - imports k8s.libsonnet
// * https://github.com/ksonnet/ksonnet-lib/blob/master/ksonnet.beta.3/k8s.libsonnet defines things such as "persistentVolumeClaim:: {"
//
local pvc = k.core.v1.persistentVolumeClaim; // https://kubernetes.io/docs/reference/generated/kubernetes-api/v1.11/#persistentvolumeclaim-v1-core (defines variable named 'spec' of type 'PersistentVolumeClaimSpec')
local kp = (import 'kube-prometheus/kube-prometheus.libsonnet') +
(import 'kube-prometheus/kube-prometheus-kubeadm.libsonnet') {
_config+:: {
namespace: 'monitoring',
prometheus+:: {
namespaces: ["default", "kube-system", "monitoring", "rook-ceph", "jenkins-slaves", "jenkins"],
},
},
alertmanager+:: {
alertmanager+: {
spec+: {
replicas: 1,
},
},
},
prometheus+:: {
prometheus+: {
spec+: { // https://github.com/coreos/prometheus-operator/blob/master/Documentation/api.md#prometheusspec
replicas: 1,
// If a value isn't specified for 'retention', then by default the '--storage.tsdb.retention=24h' arg will be passed to prometheus by prometheus-operator.
// The possible values for a prometheus <duration> are:
// * https://github.com/prometheus/common/blob/c7de230/model/time.go#L178 specifies "^([0-9]+)(y|w|d|h|m|s|ms)$" (years weeks days hours minutes seconds milliseconds)
retention: '30d',
// Reference info: https://github.com/coreos/prometheus-operator/blob/master/Documentation/user-guides/storage.md
// By default (if the following 'storage.volumeClaimTemplate' isn't created), prometheus will be created with an EmptyDir for the 'prometheus-k8s-db' volume (for the prom tsdb).
// This 'storage.volumeClaimTemplate' causes the following to be automatically created (via dynamic provisioning) for each prometheus pod:
// * PersistentVolumeClaim (and a corresponding PersistentVolume)
// * the actual volume (per the StorageClassName specified below)
storage: { // https://github.com/coreos/prometheus-operator/blob/master/Documentation/api.md#storagespec
volumeClaimTemplate: // (same link as above where the 'pvc' variable is defined)
pvc.new() + // http://g.bryan.dev.hepti.center/core/v1/persistentVolumeClaim/#core.v1.persistentVolumeClaim.new
pvc.mixin.spec.withAccessModes('ReadWriteOnce') +
// https://kubernetes.io/docs/reference/generated/kubernetes-api/v1.11/#resourcerequirements-v1-core (defines 'requests'),
// and https://kubernetes.io/docs/concepts/policy/resource-quotas/#storage-resource-quota (defines 'requests.storage')
pvc.mixin.spec.resources.withRequests({ storage: '10Gi' }) +
// A StorageClass of the following name (which can be seen via `kubectl get storageclass` from a node in the given K8s cluster) must exist prior to kube-prometheus being deployed.
pvc.mixin.spec.withStorageClassName('rook-ceph-block'),
// The following 'selector' is only needed if you're using manual storage provisioning (https://github.com/coreos/prometheus-operator/blob/master/Documentation/user-guides/storage.md#manual-storage-provisioning).
// And note that this is not supported/allowed by AWS - uncommenting the following 'selector' line (when deploying kube-prometheus to a K8s cluster in AWS) will cause the pvc to be stuck in the Pending status and have the following error:
// * 'Failed to provision volume with StorageClass "ssd": claim.Spec.Selector is not supported for dynamic provisioning on AWS'
//pvc.mixin.spec.selector.withMatchLabels({}),
}, // storage
}, // spec
}, // prometheus
serviceMonitorRook:
{
apiVersion: 'monitoring.coreos.com/v1',
kind: 'ServiceMonitor',
metadata: {
name: 'rook-ceph-mgr',
namespace: $._config.namespace,
labels: {
'k8s-app': 'rook',
},
},
spec: {
jobLabel: 'k8s-app',
endpoints: [
{
port: 'http-metrics',
interval: '30s',
path: '/metrics'
},
],
selector: {
matchLabels: {
'app': 'rook-ceph-mgr',
'rook_cluster': 'rook-ceph'
},
},
namespaceSelector: {
matchNames: [
'rook-ceph',
],
},
},
},
}, // prometheus
};
{ ['00namespace-' + name]: kp.kubePrometheus[name] for name in std.objectFields(kp.kubePrometheus) } +
{ ['0prometheus-operator-' + name]: kp.prometheusOperator[name] for name in std.objectFields(kp.prometheusOperator) } +
{ ['node-exporter-' + name]: kp.nodeExporter[name] for name in std.objectFields(kp.nodeExporter) } +
{ ['kube-state-metrics-' + name]: kp.kubeStateMetrics[name] for name in std.objectFields(kp.kubeStateMetrics) } +
{ ['alertmanager-' + name]: kp.alertmanager[name] for name in std.objectFields(kp.alertmanager) } +
{ ['prometheus-' + name]: kp.prometheus[name] for name in std.objectFields(kp.prometheus) } +
{ ['prometheus-adapter-' + name]: kp.prometheusAdapter[name] for name in std.objectFields(kp.prometheusAdapter) } +
{ ['grafana-' + name]: kp.grafana[name] for name in std.objectFields(kp.grafana) }
Sign up for free to join this conversation on GitHub. Already have an account? Sign in to comment