Skip to content

Instantly share code, notes, and snippets.

@jsoriano
Last active December 14, 2020 14:53
Show Gist options
  • Save jsoriano/32ea46daa1e24ae4c86c5f91f729264d to your computer and use it in GitHub Desktop.
Save jsoriano/32ea46daa1e24ae4c86c5f91f729264d to your computer and use it in GitHub Desktop.
# Create a kafka namespace:
#
# kubectl create ns kafka
#
# Install strimzi with:
#
# kubectl apply -f 'https://strimzi.io/install/latest?namespace=kafka' -n kafka
#
# Deploy this manifest into the kafka namespace:
#
# kubectl apply -f kafka-sasl-scram-sha-512.yaml -n kafka
#
# To consume from the topic:
#
# Secret in:
# $ kubectl get secret -n kafka filebeat-kafka-user -o jsonpath='{ .data.password }' | base64 -d
#
# Config properties for consumer:
# $ cat /tmp/consumer-properties.conf
# security.protocol=SASL_PLAINTEXT
# sasl.mechanism=SCRAM-SHA-512
#
# $ cat /tmp/jaas-kafka-client-consumer.conf
# KafkaClient {
# org.apache.kafka.common.security.scram.ScramLoginModule required
# username="filebeat-kafka-user"
# password="<< REPLACE WITH SECRET >>";
# };
#
# Run consumer
# $ export KAFKA_OPTS="-Djava.security.auth.login.config=/tmp/jaas-kafka-client-consumer.conf"
# $ ./kafka-console-consumer.sh --topic filebeat --bootstrap-server localhost:9092 --consumer.config /tmp/consumer-properties.conf
#
#
apiVersion: kafka.strimzi.io/v1beta1
kind: Kafka
metadata:
name: my-cluster
spec:
kafka:
version: 2.5.0
replicas: 3
listeners:
- name: plain
port: 9092
type: internal
tls: false
authentication:
type: scram-sha-512
config:
offsets.topic.replication.factor: 1
transaction.state.log.replication.factor: 1
transaction.state.log.min.isr: 1
log.message.format.version: "2.5"
storage:
type: ephemeral
zookeeper:
replicas: 1
storage:
type: ephemeral
entityOperator:
topicOperator: {}
userOperator: {}
---
apiVersion: kafka.strimzi.io/v1beta1
kind: KafkaUser
metadata:
name: filebeat-kafka-user
labels:
strimzi.io/cluster: my-cluster
spec:
authentication:
type: scram-sha-512
authorization:
type: simple
acls:
- resource:
type: topic
name: my-topic
operation: Write
- resource:
type: topic
name: my-topic
operation: Create
- resource:
type: topic
name: my-topic
operation: Describe
---
apiVersion: v1
kind: ConfigMap
metadata:
name: filebeat-config
labels:
k8s-app: filebeat
data:
filebeat.yml: |-
filebeat.autodiscover:
providers:
- type: kubernetes
node: ${NODE_NAME}
hints.enabled: true
hints.default_config:
type: container
paths:
- /var/log/containers/*${data.kubernetes.container.id}.log
output.kafka:
enabled: true
hosts:
- my-cluster-kafka-0.my-cluster-kafka-brokers.kafka.svc:9092
- my-cluster-kafka-1.my-cluster-kafka-brokers.kafka.svc:9092
- my-cluster-kafka-2.my-cluster-kafka-brokers.kafka.svc:9092
topic: filebeat
username: filebeat-kafka-user
password: ${KAFKA_PASSWORD}
sasl.mechanism: SCRAM-SHA-512
max_message_bytes: 1900000
client_id: filebeat
---
apiVersion: apps/v1
kind: DaemonSet
metadata:
name: filebeat
labels:
k8s-app: filebeat
spec:
selector:
matchLabels:
k8s-app: filebeat
template:
metadata:
labels:
k8s-app: filebeat
annotations:
co.elastic.logs/enabled: "true"
spec:
serviceAccountName: filebeat
terminationGracePeriodSeconds: 30
hostNetwork: true
dnsPolicy: ClusterFirstWithHostNet
containers:
- name: filebeat
image: docker.elastic.co/beats/filebeat:8.0.0
args: [
"-c", "/etc/filebeat.yml",
"-d", "kafka",
"-e",
]
env:
- name: ELASTICSEARCH_HOST
value: elasticsearch
- name: ELASTICSEARCH_PORT
value: "9200"
- name: ELASTICSEARCH_USERNAME
value: elastic
- name: ELASTICSEARCH_PASSWORD
value: changeme
- name: ELASTIC_CLOUD_ID
value:
- name: ELASTIC_CLOUD_AUTH
value:
- name: NODE_NAME
valueFrom:
fieldRef:
fieldPath: spec.nodeName
- name: KAFKA_PASSWORD
valueFrom:
secretKeyRef:
name: filebeat-kafka-user
key: password
securityContext:
runAsUser: 0
# If using Red Hat OpenShift uncomment this:
#privileged: true
resources:
limits:
memory: 200Mi
requests:
cpu: 100m
memory: 100Mi
volumeMounts:
- name: config
mountPath: /etc/filebeat.yml
readOnly: true
subPath: filebeat.yml
- name: data
mountPath: /usr/share/filebeat/data
- name: varlibdockercontainers
mountPath: /var/lib/docker/containers
readOnly: true
- name: varlog
mountPath: /var/log
readOnly: true
volumes:
- name: config
configMap:
defaultMode: 0600
name: filebeat-config
- name: varlibdockercontainers
hostPath:
path: /var/lib/docker/containers
- name: varlog
hostPath:
path: /var/log
# data folder stores a registry of read status for all files, so we don't send everything again on a Filebeat pod restart
- name: data
hostPath:
path: /var/lib/filebeat-data
type: DirectoryOrCreate
---
apiVersion: rbac.authorization.k8s.io/v1
kind: ClusterRoleBinding
metadata:
name: filebeat
subjects:
- kind: ServiceAccount
name: filebeat
namespace: kafka
roleRef:
kind: ClusterRole
name: filebeat
apiGroup: rbac.authorization.k8s.io
---
apiVersion: rbac.authorization.k8s.io/v1
kind: ClusterRole
metadata:
name: filebeat
labels:
k8s-app: filebeat
rules:
- apiGroups: [""] # "" indicates the core API group
resources:
- namespaces
- pods
- nodes
verbs:
- get
- watch
- list
---
apiVersion: v1
kind: ServiceAccount
metadata:
name: filebeat
labels:
k8s-app: filebeat
---
# Create a kafka namespace:
#
# kubectl create ns kafka
#
# Install strimzi with:
#
# kubectl apply -f 'https://strimzi.io/install/latest?namespace=kafka' -n kafka
#
# Deploy this manifest into the kafka namespace:
#
# kubectl apply -f kafka-ssl-problem.yaml -n kafka
#
apiVersion: kafka.strimzi.io/v1beta1
kind: Kafka
metadata:
name: my-cluster
spec:
kafka:
version: 2.5.0
replicas: 3
listeners:
plain: {}
tls:
authentication:
type: tls
config:
offsets.topic.replication.factor: 1
transaction.state.log.replication.factor: 1
transaction.state.log.min.isr: 1
log.message.format.version: "2.5"
storage:
type: ephemeral
zookeeper:
replicas: 1
storage:
type: ephemeral
entityOperator:
topicOperator: {}
userOperator: {}
---
apiVersion: kafka.strimzi.io/v1beta1
kind: KafkaUser
metadata:
name: filebeat-kafka-user
labels:
strimzi.io/cluster: my-cluster
spec:
authentication:
type: tls
authorization:
type: simple
acls:
- resource:
type: topic
name: my-topic
operation: Write
- resource:
type: topic
name: my-topic
operation: Create
- resource:
type: topic
name: my-topic
operation: Describe
---
apiVersion: v1
kind: ConfigMap
metadata:
name: filebeat-config
labels:
k8s-app: filebeat
data:
filebeat.yml: |-
filebeat.autodiscover:
providers:
- type: kubernetes
node: ${NODE_NAME}
hints.enabled: true
hints.default_config:
type: container
paths:
- /var/log/containers/*${data.kubernetes.container.id}.log
output.kafka:
enabled: true
hosts:
- my-cluster-kafka-0.my-cluster-kafka-brokers.kafka.svc:9093
- my-cluster-kafka-1.my-cluster-kafka-brokers.kafka.svc:9093
- my-cluster-kafka-2.my-cluster-kafka-brokers.kafka.svc:9093
topic: filebeat
max_message_bytes: 1900000
client_id: filebeat
ssl.certificate_authorities: ["/etc/ssl/kafka-ca/ca.crt"]
ssl.certificate: "/etc/ssl/kafka/user.crt"
ssl.key: "/etc/ssl/kafka/user.key"
---
apiVersion: apps/v1
kind: DaemonSet
metadata:
name: filebeat
labels:
k8s-app: filebeat
spec:
selector:
matchLabels:
k8s-app: filebeat
template:
metadata:
labels:
k8s-app: filebeat
annotations:
co.elastic.logs/enabled: "true"
spec:
serviceAccountName: filebeat
terminationGracePeriodSeconds: 30
hostNetwork: true
dnsPolicy: ClusterFirstWithHostNet
containers:
- name: filebeat
image: docker.elastic.co/beats/filebeat:7.8.0-SNAPSHOT
args: [
"-c", "/etc/filebeat.yml",
"-d", "kafka",
"-e",
]
env:
- name: ELASTICSEARCH_HOST
value: elasticsearch
- name: ELASTICSEARCH_PORT
value: "9200"
- name: ELASTICSEARCH_USERNAME
value: elastic
- name: ELASTICSEARCH_PASSWORD
value: changeme
- name: ELASTIC_CLOUD_ID
value:
- name: ELASTIC_CLOUD_AUTH
value:
- name: NODE_NAME
valueFrom:
fieldRef:
fieldPath: spec.nodeName
securityContext:
runAsUser: 0
# If using Red Hat OpenShift uncomment this:
#privileged: true
resources:
limits:
memory: 200Mi
requests:
cpu: 100m
memory: 100Mi
volumeMounts:
- name: config
mountPath: /etc/filebeat.yml
readOnly: true
subPath: filebeat.yml
- name: data
mountPath: /usr/share/filebeat/data
- name: varlibdockercontainers
mountPath: /var/lib/docker/containers
readOnly: true
- name: varlog
mountPath: /var/log
readOnly: true
- name: ssl-kafka
mountPath: /etc/ssl/kafka
- name: ssl-kafka-ca
mountPath: /etc/ssl/kafka-ca
volumes:
- name: config
configMap:
defaultMode: 0600
name: filebeat-config
- name: varlibdockercontainers
hostPath:
path: /var/lib/docker/containers
- name: varlog
hostPath:
path: /var/log
- name: ssl-kafka-ca
secret:
secretName: my-cluster-cluster-ca-cert
- name: ssl-kafka
secret:
secretName: filebeat-kafka-user
# data folder stores a registry of read status for all files, so we don't send everything again on a Filebeat pod restart
- name: data
hostPath:
path: /var/lib/filebeat-data
type: DirectoryOrCreate
---
apiVersion: rbac.authorization.k8s.io/v1
kind: ClusterRoleBinding
metadata:
name: filebeat
subjects:
- kind: ServiceAccount
name: filebeat
namespace: kafka
roleRef:
kind: ClusterRole
name: filebeat
apiGroup: rbac.authorization.k8s.io
---
apiVersion: rbac.authorization.k8s.io/v1
kind: ClusterRole
metadata:
name: filebeat
labels:
k8s-app: filebeat
rules:
- apiGroups: [""] # "" indicates the core API group
resources:
- namespaces
- pods
verbs:
- get
- watch
- list
---
apiVersion: v1
kind: ServiceAccount
metadata:
name: filebeat
labels:
k8s-app: filebeat
---
Sign up for free to join this conversation on GitHub. Already have an account? Sign in to comment