Skip to content

Instantly share code, notes, and snippets.

@x95castle1
Last active July 10, 2024 13:31
Show Gist options
  • Save x95castle1/67091cae86f2e6138ff7dfdbc7edc376 to your computer and use it in GitHub Desktop.
Save x95castle1/67091cae86f2e6138ff7dfdbc7edc376 to your computer and use it in GitHub Desktop.
Custom Workload Type for SSO Workloads with Sticky Sessions
# This should be installed on your build cluster
apiVersion: carto.run/v1alpha1
kind: ClusterConfigTemplate
metadata:
name: server-with-sso-template
annotations:
doc: |
This template consumes an input named config which contains a
PodTemplateSpec and returns a ConfigMap which contains a
"delivery.yml" which contains a manifests for a Kubernetes
Deployment.
This template creates a deployment that allows you to control
the replicas with an HTTPProxy with sticy sessions enabled to be
used by SSO applications only.
spec:
configPath: .data
params:
- name: replicas
default: 1
- name: ports
default:
- containerPort: 8080
port: 80
name: http
healthRule:
alwaysHealthy: {}
#@ label_exclusions = "[\"" + "\", \"".join(["kapp.k14s.io/app", "kapp.k14s.io/association"]) + "\"]"
#@yaml/text-templated-strings
ytt: |
#@ load("@ytt:data", "data")
#@ load("@ytt:yaml", "yaml")
#@ load("@ytt:struct", "struct")
#@ load("@ytt:assert", "assert")
#@ def merge_labels(fixed_values):
#@ labels = {}
#@ if hasattr(data.values.workload.metadata, "labels"):
#@ exclusions = (@= label_exclusions @)
#@ for k,v in dict(data.values.workload.metadata.labels).items():
#@ if k not in exclusions:
#@ labels[k] = v
#@ end
#@ end
#@ end
#@ labels.update(fixed_values)
#@ return labels
#@ end
#@ def intOrString(v):
#@ return v if type(v) == "int" else int(v.strip()) if v.strip().isdigit() else v
#@ end
#@ def merge_ports(ports_spec, containers):
#@ ports = {}
#@ for c in containers:
#@ for p in getattr(c, "ports", []):
#@ ports[p.containerPort] = {"targetPort": p.containerPort, "port": p.containerPort, "name": getattr(p, "name", str(p.containerPort))}
#@ end
#@ end
#@ for p in ports_spec:
#@ targetPort = getattr(p, "containerPort", p.port)
#@ type(targetPort) in ("string", "int") or fail("containerPort must be a string or int")
#@ targetPort = intOrString(targetPort)
#@
#@ port = p.port
#@ type(port) in ("string", "int") or fail("port must be a string or int")
#@ port = int(port)
#@ ports[p.port] = {"targetPort": targetPort, "port": port, "name": getattr(p, "name", str(p.port))}
#@ end
#@ return ports.values()
#@ end
#@ def delivery():
---
apiVersion: apps/v1
kind: Deployment
metadata:
name: #@ data.values.workload.metadata.name
annotations:
kapp.k14s.io/update-strategy: "fallback-on-replace"
ootb.apps.tanzu.vmware.com/servicebinding-workload: "true"
kapp.k14s.io/change-rule: "upsert after upserting servicebinding.io/ServiceBindings"
labels: #@ merge_labels({ "app.kubernetes.io/component": "run", "carto.run/workload-name": data.values.workload.metadata.name })
spec:
replicas: #@ data.values.params.replicas
selector:
matchLabels: #@ data.values.config.metadata.labels
template: #@ data.values.config
---
apiVersion: v1
kind: Service
metadata:
name: #@ data.values.workload.metadata.name
labels: #@ merge_labels({ "app.kubernetes.io/component": "run", "carto.run/workload-name": data.values.workload.metadata.name })
spec:
selector: #@ data.values.config.metadata.labels
ports:
#@ hasattr(data.values.params, "ports") and len(data.values.params.ports) or assert.fail("one or more ports param must be provided.")
#@ declared_ports = []
#@ if "ports" in data.values.params:
#@ declared_ports = data.values.params.ports
#@ else:
#@ declared_ports = struct.encode([{ "containerPort": 8080, "port": 80, "name": "http"}])
#@ end
#@ for p in merge_ports(declared_ports, data.values.config.spec.containers):
- #@ p
#@ end
---
apiVersion: projectcontour.io/v1
kind: HTTPProxy
metadata:
name: #@ data.values.workload.metadata.name
annotations:
kapp.k14s.io/change-rule: "upsert after upserting servicebinding.io/ServiceBindings"
labels: #@ merge_labels({ "app.kubernetes.io/component": "run", "carto.run/workload-name": data.values.workload.metadata.name })
spec:
routes:
- services:
- name: #@ data.values.workload.metadata.name
port: 80
loadBalancerPolicy:
strategy: Cookie
virtualhost:
fqdn: #@ data.values.workload.metadata.name
tls:
secretName: kube-system/wildcard-cert
#@ end
---
apiVersion: v1
kind: ConfigMap
metadata:
name: #@ data.values.workload.metadata.name + "-server-with-sso"
labels: #@ merge_labels({ "app.kubernetes.io/component": "config" })
data:
delivery.yml: #@ yaml.encode(delivery())
# Add the following to your tap-values.yaml on your build cluster to allow the new workload type in a supply chain.
ootb_supply_chain_testing_scanning:
supported_workloads:
- type: server-with-sso
cluster_config_template_name: server-with-sso-template
# This should be applied to the run cluster to allow the ClusterDelivery to apply httpproxies as part
# an installation of an application.
apiVersion: rbac.authorization.k8s.io/v1
kind: ClusterRole
metadata:
name: httpproxies
labels:
apps.tanzu.vmware.com/aggregate-to-deliverable: "true"
rules:
- apiGroups: ["projectcontour.io/v1", "projectcontour.io"]
resources: ["httpproxies"]
verbs:
- get
- list
- watch
- create
- patch
- update
- delete
- deletecollection
# This needs to be applied on the Run Cluster to dynamically set the FQDN on the HTTPProxies.
# The FQDN domain is read from a configmap named env-specific that should be installed on every namespace.
# The overlay secret ingress-overlay also needs to be installed in every namespace.
apiVersion: v1
kind: Secret
metadata:
name: ootb-templates-overlay
namespace: tap-install
type: Opaque
stringData:
ootb-templates-overlay.yaml: |
#@ load("@ytt:overlay", "overlay")
#@overlay/match by=overlay.subset({"kind": "ClusterDeploymentTemplate","metadata":{"name":"app-deploy"}}), expects="0+"
---
spec:
ytt: |
#@ load("@ytt:data", "data")
#@ load("@ytt:yaml", "yaml")
#@ load("@ytt:assert", "assert")
#@ def merge_labels(fixed_values):
#@ labels = {}
#@ if hasattr(data.values.deliverable.metadata, "labels"):
#@ exclusions = ["kapp.k14s.io/app", "kapp.k14s.io/association"]
#@ for k,v in dict(data.values.deliverable.metadata.labels).items():
#@ if k not in exclusions:
#@ labels[k] = v
#@ end
#@ end
#@ end
#@ labels.update(fixed_values)
#@ return labels
#@ end
#@ def kapp_config():
apiVersion: kapp.k14s.io/v1alpha1
kind: Config
rebaseRules:
- path:
- metadata
- annotations
- serving.knative.dev/creator
type: copy
sources:
- new
- existing
resourceMatchers:
- apiVersionKindMatcher:
apiVersion: serving.knative.dev/v1
kind: Service
- path:
- metadata
- annotations
- serving.knative.dev/lastModifier
type: copy
sources:
- new
- existing
resourceMatchers:
- apiVersionKindMatcher:
apiVersion: serving.knative.dev/v1
kind: Service
waitRules:
- resourceMatchers:
- apiVersionKindMatcher:
apiVersion: serving.knative.dev/v1
kind: Service
conditionMatchers:
- type: Ready
status: "True"
success: true
- type: Ready
status: "False"
failure: true
ownershipLabelRules:
- path:
- spec
- template
- metadata
- labels
resourceMatchers:
- apiVersionKindMatcher:
apiVersion: serving.knative.dev/v1
kind: Service
#@ end
---
apiVersion: kappctrl.k14s.io/v1alpha1
kind: App
metadata:
name: #@ data.values.deliverable.metadata.name
labels: #@ merge_labels({ "app.kubernetes.io/component": "deploy" })
spec:
syncPeriod: 10m0s
serviceAccountName: #@ data.values.params.serviceAccount
fetch:
- http:
url: #@ data.values.deployment.url or assert.fail("missing required deployment config url")
#@ if hasattr(data.values.deliverable.spec.source, "subPath"):
subPath: #@ data.values.deliverable.spec.source.subPath
#@ elif hasattr(data.values.params, "gitops_sub_path"):
subPath: #@ data.values.params.gitops_sub_path
#@ end
- inline:
paths:
config.yml: #@ yaml.encode(kapp_config())
template:
- ytt:
ignoreUnknownComments: true
valuesFrom:
- configMapRef:
name: env-specific
inline:
pathsFrom:
- secretRef:
name: ingress-overlay
- kbld: {}
deploy:
- kapp: {}
# This should be applied via the namespace provisioner in every namespace on Run clusters.
# This secret also contains an overlay to add a domain to the ingress host name if it's not present.
---
apiVersion: v1
kind: Secret
metadata:
name: ingress-overlay
stringData:
ingress-overlay.yaml: |
#@ load("@ytt:overlay", "overlay")
#@ load("@ytt:yaml", "yaml")
#@ load("@ytt:data", "data")
#@ def append_hostname_rule(left):
#@ rules = yaml.decode(yaml.encode(left))
#@ for rule in rules:
#! Safety check. If there is a . then assume domain is there
#@ if rule.get("host").find(".") > 0:
#@ rule.update({"host": rule.get("host")})
#@ else:
#@ rule.update({"host": rule.get("host") + data.values.domain})
#@ end
#@ end
#@ return rules
#@ end
#@ def append_hostname_tls(left):
#@ secrets = yaml.decode(yaml.encode(left))
#@ for secret in secrets:
#@ fqdnhosts = list()
#@ for host in secret.get("hosts"):
#! Safety check. If there is a . then assume domain is there
#@ if host.find(".") > 0:
#@ fqdnhosts.append(host)
#@ else:
#@ fqdnhosts.append(host + data.values.domain)
#@ end
#@ end
#@ secret.update({"hosts": fqdnhosts})
#@ end
#@ return secrets
#@ end
#@ def append_domain_to_fqdn(left, right):
#@ if left.find(".") < 0:
#@ return left+right
#@ end
#@ return left
#@ end
#@overlay/match by=overlay.subset({"kind": "Ingress","apiVersion": "networking.k8s.io/v1"}), expects="0+"
---
spec:
#@overlay/replace via=lambda left, _: append_hostname_rule(left)
rules:
#@overlay/replace via=lambda left, _: append_hostname_tls(left)
tls:
#@overlay/match by=overlay.subset({"kind": "HTTPProxy","apiVersion": "projectcontour.io/v1"}), expects="0+"
---
spec:
virtualhost:
#@overlay/replace via=lambda left, right: append_domain_to_fqdn(left, right)
fqdn: #@ data.values.domain
# This should be applied via the namespace provisioner in every namespace on Run Clusters.
apiVersion: v1
kind: ConfigMap
metadata:
name: env-specific
data:
config.yaml: |
domain: ".run.cssa.tapsme.org"
apiVersion: carto.run/v1alpha1
kind: Workload
metadata:
labels:
app.kubernetes.io/part-of: server-with-sso
apps.tanzu.vmware.com/auto-configure-actuators: "true"
apps.tanzu.vmware.com/has-tests: "true"
apps.tanzu.vmware.com/workload-type: server-with-sso
name: server-with-sso
spec:
build:
env:
- name: BP_JVM_VERSION
value: "17"
params:
- name: testing_pipeline_matching_labels
value:
apps.tanzu.vmware.com/language: java
apps.tanzu.vmware.com/pipeline: test
source:
git:
ref:
branch: main
url: https://github.com/x95castle1/friday-java-web-app
Sign up for free to join this conversation on GitHub. Already have an account? Sign in to comment