Skip to content

Instantly share code, notes, and snippets.

@mauriciovasquezbernal
Last active September 20, 2021 11:08
Show Gist options
  • Star 0 You must be signed in to star a gist
  • Fork 1 You must be signed in to fork a gist
  • Save mauriciovasquezbernal/820e8e11fa546c2b4e04a3a9105b47ed to your computer and use it in GitHub Desktop.
Save mauriciovasquezbernal/820e8e11fa546c2b4e04a3a9105b47ed to your computer and use it in GitHub Desktop.
Cilium-SPIFFE help material
#/bin/bash
set -e
######## agents ########
# spire-agent
kubectl exec -n spire spire-server-0 -- \
/opt/spire/bin/spire-server entry create \
-node \
-spiffeID spiffe://example.org/ns/spire/sa/spire-agent \
-selector k8s_sat:cluster:demo-cluster \
-selector k8s_sat:agent_ns:spire \
-selector k8s_sat:agent_sa:spire-agent
# cilium-agent
# This entry is needed to be sure that the cilium agent is able to use the spire
# privileged API. The unix:uid:0 selector is used because cilium-agent runs as a
# process in the host in the dev environment. If cilium-agent is run as a pod
# then the k8s selectors for that pod should be used.
kubectl exec -n spire spire-server-0 -- \
/opt/spire/bin/spire-server entry create \
-spiffeID spiffe://example.org/ciliumagent \
-parentID spiffe://example.org/ns/spire/sa/spire-agent \
-selector unix:uid:0
######## workloads ########
# ns/default/sa/default
kubectl exec -n spire spire-server-0 -- \
/opt/spire/bin/spire-server entry create \
-spiffeID spiffe://example.org/ns/default/sa/default \
-parentID spiffe://example.org/ns/spire/sa/spire-agent \
-selector k8s:ns:default \
-selector k8s:sa:default \
-ttl 60
# ns/default/sa/foo
kubectl exec -n spire spire-server-0 -- \
/opt/spire/bin/spire-server entry create \
-spiffeID spiffe://example.org/ns/default/sa/foo \
-parentID spiffe://example.org/ns/spire/sa/spire-agent \
-selector k8s:ns:default \
-selector k8s:sa:foo \
-ttl 60
# ns/foo/sa/default
kubectl exec -n spire spire-server-0 -- \
/opt/spire/bin/spire-server entry create \
-spiffeID spiffe://example.org/ns/foo/sa/default \
-parentID spiffe://example.org/ns/spire/sa/spire-agent \
-selector k8s:ns:foo \
-selector k8s:sa:default
# ns/foo/sa/foo
kubectl exec -n spire spire-server-0 -- \
/opt/spire/bin/spire-server entry create \
-spiffeID spiffe://example.org/ns/foo/sa/foo \
-parentID spiffe://example.org/ns/spire/sa/spire-agent \
-selector k8s:ns:foo \
-selector k8s:sa:foo
# ns/foo
kubectl exec -n spire spire-server-0 -- \
/opt/spire/bin/spire-server entry create \
-spiffeID spiffe://example.org/ns/foo \
-parentID spiffe://example.org/ns/spire/sa/spire-agent \
-selector k8s:ns:foo
# workloads based on image
kubectl exec -n spire spire-server-0 -- \
/opt/spire/bin/spire-server entry create \
-spiffeID spiffe://example.org/image/praqma/network-multitool \
-parentID spiffe://example.org/ns/spire/sa/spire-agent \
-selector k8s:pod-image:praqma/network-multitool:latest
kubectl exec -n spire spire-server-0 -- \
/opt/spire/bin/spire-server entry create \
-spiffeID spiffe://example.org/image/ubuntu \
-parentID spiffe://example.org/ns/spire/sa/spire-agent \
-selector k8s:pod-image:ubuntu:latest
# workloads in the host
kubectl exec -n spire spire-server-0 -- \
/opt/spire/bin/spire-server entry create \
-spiffeID spiffe://example.org/server \
-parentID spiffe://example.org/ns/spire/sa/spire-agent \
-selector unix:uid:1001
kubectl exec -n spire spire-server-0 -- \
/opt/spire/bin/spire-server entry create \
-spiffeID spiffe://example.org/client \
-parentID spiffe://example.org/ns/spire/sa/spire-agent \
-selector unix:uid:1002
apiVersion: "cilium.io/v2"
kind: CiliumNetworkPolicy
metadata:
name: "deny-all-egress"
spec:
endpointSelector:
{}
egress:
- {}
# pods running a container with praqma/network-multitool:latest are
# able to talk to pods running a container with ubuntu:latest
---
# alternative 1
#apiVersion: "cilium.io/v2"
#kind: CiliumNetworkPolicy
#metadata:
# name: "image-based"
#spec:
# endpointSelector:
# matchLabels:
# spiffe://example.org/image/praqma/network-multitool: ""
# egress:
# - toEndpoints:
# - matchLabels:
# spiffe://example.org/image/ubuntu: ""
---
# alternative 2
apiVersion: "cilium.io/v2"
kind: CiliumNetworkPolicy
metadata:
name: "image-based"
spec:
endpointSelector:
matchExpressions:
- key: spiffe://example.org/image/praqma/network-multitool
operator: Exists
egress:
- toEndpoints:
- matchExpressions:
- key: spiffe://example.org/image/ubuntu
operator: Exists
# All spire components are deployed in its own namespace
apiVersion: v1
kind: Namespace
metadata:
name: spire
---
### spire-server related ###
# Spire-server service account
apiVersion: v1
kind: ServiceAccount
metadata:
name: spire-server
namespace: spire
---
# ClusterRole to allow spire-server node attestor to query Token Review API
# and to be able to push certificate bundles to a configmap
kind: ClusterRole
apiVersion: rbac.authorization.k8s.io/v1
metadata:
name: spire-server-trust-role
rules:
- apiGroups: ["authentication.k8s.io"]
resources: ["tokenreviews"]
verbs: ["create"]
- apiGroups: [""]
resources: ["configmaps"]
verbs: ["patch", "get", "list"]
---
# Binds above cluster role to spire-server service account
kind: ClusterRoleBinding
apiVersion: rbac.authorization.k8s.io/v1
metadata:
name: spire-server-trust-role-binding
subjects:
- kind: ServiceAccount
name: spire-server
namespace: spire
roleRef:
kind: ClusterRole
name: spire-server-trust-role
apiGroup: rbac.authorization.k8s.io
---
# server config
apiVersion: v1
kind: ConfigMap
metadata:
name: spire-server
namespace: spire
data:
server.conf: |
server {
bind_address = "0.0.0.0"
bind_port = "8081"
registration_uds_path = "/tmp/spire-registration.sock"
trust_domain = "example.org"
data_dir = "/run/spire/data"
log_level = "DEBUG"
#AWS requires the use of RSA. EC cryptography is not supported
ca_key_type = "rsa-2048"
default_svid_ttl = "1h"
ca_subject = {
country = ["US"],
organization = ["SPIFFE"],
common_name = "",
}
}
plugins {
DataStore "sql" {
plugin_data {
database_type = "sqlite3"
connection_string = "/run/spire/data/datastore.sqlite3"
}
}
NodeAttestor "k8s_sat" {
plugin_data {
clusters = {
# NOTE: Change this to your cluster name
"demo-cluster" = {
use_token_review_api_validation = true
service_account_whitelist = ["spire:spire-agent"]
}
}
}
}
NodeResolver "noop" {
plugin_data {}
}
KeyManager "disk" {
plugin_data {
keys_path = "/run/spire/data/keys.json"
}
}
Notifier "k8sbundle" {
plugin_data {
}
}
}
---
# server service
apiVersion: v1
kind: Service
metadata:
name: spire-server
namespace: spire
spec:
type: NodePort
ports:
- name: grpc
port: 8081
targetPort: 8081
protocol: TCP
selector:
app: spire-server
---
# server bundle map
apiVersion: v1
kind: ConfigMap
metadata:
name: spire-bundle
namespace: spire
---
# server persistent volume
apiVersion: v1
kind: PersistentVolume
metadata:
name: my-pv-vol
labels:
type: local
spec:
storageClassName: ""
capacity:
storage: 4Gi
accessModes:
- ReadWriteOnce
hostPath:
path: "/run"
---
# server stateful set
apiVersion: apps/v1
kind: StatefulSet
metadata:
name: spire-server
namespace: spire
labels:
app: spire-server
spec:
replicas: 1
selector:
matchLabels:
app: spire-server
serviceName: spire-server
template:
metadata:
namespace: spire
labels:
app: spire-server
spec:
serviceAccountName: spire-server
containers:
- name: spire-server
image: gcr.io/spiffe-io/spire-server:0.12.0
args:
- -config
- /run/spire/config/server.conf
ports:
- containerPort: 8081
volumeMounts:
- name: spire-config
mountPath: /run/spire/config
readOnly: true
- name: spire-data
mountPath: /run/spire/data
readOnly: false
livenessProbe:
exec:
command:
- /opt/spire/bin/spire-server
- healthcheck
failureThreshold: 2
initialDelaySeconds: 15
periodSeconds: 60
timeoutSeconds: 3
readinessProbe:
exec:
command:
- /opt/spire/bin/spire-server
- healthcheck
- --shallow
initialDelaySeconds: 5
periodSeconds: 5
volumes:
- name: spire-config
configMap:
name: spire-server
volumeClaimTemplates:
- metadata:
name: spire-data
namespace: spire
spec:
accessModes:
- ReadWriteOnce
resources:
requests:
storage: 1Gi
---
### spire-agent related ####
# agent serviceaccount
apiVersion: v1
kind: ServiceAccount
metadata:
name: spire-agent
namespace: spire
---
# Required cluster role to allow spire-agent to query k8s API server
kind: ClusterRole
apiVersion: rbac.authorization.k8s.io/v1
metadata:
name: spire-agent-cluster-role
rules:
- apiGroups: [""]
resources: ["pods","nodes","nodes/proxy"]
verbs: ["get"]
---
# Binds above cluster role to spire-agent service account
kind: ClusterRoleBinding
apiVersion: rbac.authorization.k8s.io/v1
metadata:
name: spire-agent-cluster-role-binding
subjects:
- kind: ServiceAccount
name: spire-agent
namespace: spire
roleRef:
kind: ClusterRole
name: spire-agent-cluster-role
apiGroup: rbac.authorization.k8s.io
---
# agent config
apiVersion: v1
kind: ConfigMap
metadata:
name: spire-agent
namespace: spire
data:
agent.conf: |
agent {
data_dir = "/run/spire"
log_level = "DEBUG"
server_address = "spire-server"
server_port = "8081"
socket_path = "/run/spire/sockets/agent.sock"
trust_bundle_path = "/run/spire/bundle/bundle.crt"
trust_domain = "example.org"
admin_socket_path = "/run/spire/sockets-admin/admin.sock"
authorized_users_privileged_api = [
"spiffe://example.org/ciliumagent",
]
}
plugins {
NodeAttestor "k8s_sat" {
plugin_data {
# NOTE: Change this to your cluster name
cluster = "demo-cluster"
}
}
KeyManager "memory" {
plugin_data {
}
}
WorkloadAttestor "k8s" {
plugin_data {
# Defaults to the secure kubelet port by default.
# Minikube does not have a cert in the cluster CA bundle that
# can authenticate the kubelet cert, so skip validation.
skip_kubelet_verification = true
}
}
WorkloadAttestor "unix" {
plugin_data {
}
}
}
---
# agent daemonset
apiVersion: apps/v1
kind: DaemonSet
metadata:
name: spire-agent
namespace: spire
labels:
app: spire-agent
spec:
selector:
matchLabels:
app: spire-agent
template:
metadata:
namespace: spire
labels:
app: spire-agent
spec:
hostPID: true
hostNetwork: true
dnsPolicy: ClusterFirstWithHostNet
serviceAccountName: spire-agent
initContainers:
- name: init
# This is a small image with wait-for-it, choose whatever image
# you prefer that waits for a service to be up. This image is built
# from https://github.com/lqhl/wait-for-it
image: gcr.io/spiffe-io/wait-for-it
args: ["-t", "30", "spire-server:8081"]
containers:
- name: spire-agent
#image: gcr.io/spiffe-io/spire-agent:0.12.0
image: docker.io/mauriciovasquezbernal/spire-agent:v4
args: ["-config", "/run/spire/config/agent.conf"]
volumeMounts:
- name: spire-config
mountPath: /run/spire/config
readOnly: true
- name: spire-bundle
mountPath: /run/spire/bundle
- name: spire-agent-socket
mountPath: /run/spire/sockets
readOnly: false
- name: spire-admin-agent-socket
mountPath: /run/spire/sockets-admin
readOnly: false
livenessProbe:
exec:
command:
- /opt/spire/bin/spire-agent
- healthcheck
- -socketPath
- /run/spire/sockets/agent.sock
failureThreshold: 2
initialDelaySeconds: 15
periodSeconds: 60
timeoutSeconds: 3
volumes:
- name: spire-config
configMap:
name: spire-agent
- name: spire-bundle
configMap:
name: spire-bundle
- name: spire-agent-socket
hostPath:
path: /run/spire/sockets
type: DirectoryOrCreate
- name: spire-admin-agent-socket
hostPath:
path: /run/spire/sockets-admin
type: DirectoryOrCreate
Sign up for free to join this conversation on GitHub. Already have an account? Sign in to comment