Skip to content

Instantly share code, notes, and snippets.

@matzew
Created June 13, 2019 10:41
Show Gist options
  • Star 0 You must be signed in to star a gist
  • Fork 0 You must be signed in to fork a gist
  • Save matzew/455f38737e37d3bcfaba85631f57bec5 to your computer and use it in GitHub Desktop.
Save matzew/455f38737e37d3bcfaba85631f57bec5 to your computer and use it in GitHub Desktop.
apiVersion: v1
kind: Namespace
metadata:
labels:
istio-injection: enabled
serving.knative.dev/release: "v20190613-5f6e052c"
name: knative-serving
---
apiVersion: rbac.authorization.k8s.io/v1
kind: ClusterRole
metadata:
labels:
networking.knative.dev/certificate-provider: cert-manager
serving.knative.dev/controller: "true"
serving.knative.dev/release: "v20190613-5f6e052c"
name: knative-serving-certmanager
rules:
- apiGroups:
- certmanager.k8s.io
resources:
- certificates
verbs:
- get
- list
- create
- update
- delete
- patch
- watch
---
apiVersion: rbac.authorization.k8s.io/v1
kind: ClusterRole
metadata:
labels:
networking.knative.dev/ingress-provider: istio
serving.knative.dev/controller: "true"
serving.knative.dev/release: "v20190613-5f6e052c"
name: knative-serving-istio
rules:
- apiGroups:
- networking.istio.io
resources:
- virtualservices
- gateways
verbs:
- get
- list
- create
- update
- delete
- patch
- watch
---
aggregationRule:
clusterRoleSelectors:
- matchLabels:
serving.knative.dev/controller: "true"
apiVersion: rbac.authorization.k8s.io/v1
kind: ClusterRole
metadata:
labels:
serving.knative.dev/release: "v20190613-5f6e052c"
name: knative-serving-admin
rules: []
---
apiVersion: rbac.authorization.k8s.io/v1
kind: ClusterRole
metadata:
labels:
serving.knative.dev/controller: "true"
serving.knative.dev/release: "v20190613-5f6e052c"
name: knative-serving-core
rules:
- apiGroups:
- ""
resources:
- pods
- namespaces
- secrets
- configmaps
- endpoints
- services
- events
- serviceaccounts
verbs:
- get
- list
- create
- update
- delete
- patch
- watch
- apiGroups:
- apps
resources:
- deployments
- deployments/finalizers
verbs:
- get
- list
- create
- update
- delete
- patch
- watch
- apiGroups:
- admissionregistration.k8s.io
resources:
- mutatingwebhookconfigurations
verbs:
- get
- list
- create
- update
- delete
- patch
- watch
- apiGroups:
- apiextensions.k8s.io
resources:
- customresourcedefinitions
verbs:
- get
- list
- create
- update
- delete
- patch
- watch
- apiGroups:
- autoscaling
resources:
- horizontalpodautoscalers
verbs:
- get
- list
- create
- update
- delete
- patch
- watch
- apiGroups:
- serving.knative.dev
- autoscaling.internal.knative.dev
- networking.internal.knative.dev
resources:
- '*'
- '*/status'
- '*/finalizers'
verbs:
- get
- list
- create
- update
- delete
- deletecollection
- patch
- watch
- apiGroups:
- caching.internal.knative.dev
resources:
- images
verbs:
- get
- list
- create
- update
- delete
- patch
- watch
---
apiVersion: v1
kind: ServiceAccount
metadata:
labels:
serving.knative.dev/release: "v20190613-5f6e052c"
name: controller
namespace: knative-serving
---
apiVersion: rbac.authorization.k8s.io/v1
kind: ClusterRoleBinding
metadata:
labels:
serving.knative.dev/release: "v20190613-5f6e052c"
name: knative-serving-controller-admin
roleRef:
apiGroup: rbac.authorization.k8s.io
kind: ClusterRole
name: knative-serving-admin
subjects:
- kind: ServiceAccount
name: controller
namespace: knative-serving
---
apiVersion: networking.istio.io/v1alpha3
kind: Gateway
metadata:
labels:
networking.knative.dev/ingress-provider: istio
serving.knative.dev/release: "v20190613-5f6e052c"
name: knative-ingress-gateway
namespace: knative-serving
spec:
selector:
istio: ingressgateway
servers:
- hosts:
- '*'
port:
name: http
number: 80
protocol: HTTP
- hosts:
- '*'
port:
name: https
number: 443
protocol: HTTPS
tls:
mode: PASSTHROUGH
---
apiVersion: networking.istio.io/v1alpha3
kind: Gateway
metadata:
labels:
networking.knative.dev/ingress-provider: istio
serving.knative.dev/release: "v20190613-5f6e052c"
name: cluster-local-gateway
namespace: knative-serving
spec:
selector:
istio: cluster-local-gateway
servers:
- hosts:
- '*'
port:
name: http
number: 80
protocol: HTTP
---
apiVersion: apiextensions.k8s.io/v1beta1
kind: CustomResourceDefinition
metadata:
labels:
knative.dev/crd-install: "true"
serving.knative.dev/release: "v20190613-5f6e052c"
name: certificates.networking.internal.knative.dev
spec:
additionalPrinterColumns:
- JSONPath: .status.conditions[?(@.type=="Ready")].status
name: Ready
type: string
- JSONPath: .status.conditions[?(@.type=="Ready")].reason
name: Reason
type: string
group: networking.internal.knative.dev
names:
categories:
- all
- knative-internal
- networking
kind: Certificate
plural: certificates
shortNames:
- kcert
singular: certificate
scope: Namespaced
subresources:
status: {}
version: v1alpha1
---
apiVersion: apiextensions.k8s.io/v1beta1
kind: CustomResourceDefinition
metadata:
labels:
knative.dev/crd-install: "true"
serving.knative.dev/release: "v20190613-5f6e052c"
name: clusteringresses.networking.internal.knative.dev
spec:
additionalPrinterColumns:
- JSONPath: .status.conditions[?(@.type=='Ready')].status
name: Ready
type: string
- JSONPath: .status.conditions[?(@.type=='Ready')].reason
name: Reason
type: string
group: networking.internal.knative.dev
names:
categories:
- all
- knative-internal
- networking
kind: ClusterIngress
plural: clusteringresses
singular: clusteringress
scope: Cluster
subresources:
status: {}
version: v1alpha1
---
apiVersion: apiextensions.k8s.io/v1beta1
kind: CustomResourceDefinition
metadata:
labels:
knative.dev/crd-install: "true"
serving.knative.dev/release: "v20190613-5f6e052c"
name: configurations.serving.knative.dev
spec:
additionalPrinterColumns:
- JSONPath: .status.latestCreatedRevisionName
name: LatestCreated
type: string
- JSONPath: .status.latestReadyRevisionName
name: LatestReady
type: string
- JSONPath: .status.conditions[?(@.type=='Ready')].status
name: Ready
type: string
- JSONPath: .status.conditions[?(@.type=='Ready')].reason
name: Reason
type: string
group: serving.knative.dev
names:
categories:
- all
- knative
- serving
kind: Configuration
plural: configurations
shortNames:
- config
- cfg
singular: configuration
scope: Namespaced
subresources:
status: {}
versions:
- name: v1alpha1
served: true
storage: true
- name: v1beta1
served: true
storage: false
---
apiVersion: apiextensions.k8s.io/v1beta1
kind: CustomResourceDefinition
metadata:
labels:
knative.dev/crd-install: "true"
name: images.caching.internal.knative.dev
spec:
group: caching.internal.knative.dev
names:
categories:
- all
- knative-internal
- caching
kind: Image
plural: images
shortNames:
- img
singular: image
scope: Namespaced
subresources:
status: {}
version: v1alpha1
---
apiVersion: apiextensions.k8s.io/v1beta1
kind: CustomResourceDefinition
metadata:
labels:
knative.dev/crd-install: "true"
serving.knative.dev/release: "v20190613-5f6e052c"
name: ingresses.networking.internal.knative.dev
spec:
additionalPrinterColumns:
- JSONPath: .status.conditions[?(@.type=='Ready')].status
name: Ready
type: string
- JSONPath: .status.conditions[?(@.type=='Ready')].reason
name: Reason
type: string
group: networking.internal.knative.dev
names:
categories:
- all
- knative-internal
- networking
kind: Ingress
plural: ingresses
shortNames:
- ing
singular: ingress
scope: Namespaced
subresources:
status: {}
version: v1alpha1
---
apiVersion: apiextensions.k8s.io/v1beta1
kind: CustomResourceDefinition
metadata:
labels:
knative.dev/crd-install: "true"
serving.knative.dev/release: "v20190613-5f6e052c"
name: podautoscalers.autoscaling.internal.knative.dev
spec:
additionalPrinterColumns:
- JSONPath: .status.conditions[?(@.type=='Ready')].status
name: Ready
type: string
- JSONPath: .status.conditions[?(@.type=='Ready')].reason
name: Reason
type: string
group: autoscaling.internal.knative.dev
names:
categories:
- all
- knative-internal
- autoscaling
kind: PodAutoscaler
plural: podautoscalers
shortNames:
- kpa
singular: podautoscaler
scope: Namespaced
subresources:
status: {}
version: v1alpha1
---
apiVersion: apiextensions.k8s.io/v1beta1
kind: CustomResourceDefinition
metadata:
labels:
knative.dev/crd-install: "true"
serving.knative.dev/release: "v20190613-5f6e052c"
name: revisions.serving.knative.dev
spec:
additionalPrinterColumns:
- JSONPath: .status.serviceName
name: Service Name
type: string
- JSONPath: .metadata.labels['serving\.knative\.dev/configurationGeneration']
name: Generation
type: string
- JSONPath: .status.conditions[?(@.type=='Ready')].status
name: Ready
type: string
- JSONPath: .status.conditions[?(@.type=='Ready')].reason
name: Reason
type: string
group: serving.knative.dev
names:
categories:
- all
- knative
- serving
kind: Revision
plural: revisions
shortNames:
- rev
singular: revision
scope: Namespaced
subresources:
status: {}
versions:
- name: v1alpha1
served: true
storage: true
- name: v1beta1
served: true
storage: false
---
apiVersion: apiextensions.k8s.io/v1beta1
kind: CustomResourceDefinition
metadata:
labels:
knative.dev/crd-install: "true"
serving.knative.dev/release: "v20190613-5f6e052c"
name: routes.serving.knative.dev
spec:
additionalPrinterColumns:
- JSONPath: .status.url
name: URL
type: string
- JSONPath: .status.conditions[?(@.type=='Ready')].status
name: Ready
type: string
- JSONPath: .status.conditions[?(@.type=='Ready')].reason
name: Reason
type: string
group: serving.knative.dev
names:
categories:
- all
- knative
- serving
kind: Route
plural: routes
shortNames:
- rt
singular: route
scope: Namespaced
subresources:
status: {}
versions:
- name: v1alpha1
served: true
storage: true
- name: v1beta1
served: true
storage: false
---
apiVersion: apiextensions.k8s.io/v1beta1
kind: CustomResourceDefinition
metadata:
labels:
knative.dev/crd-install: "true"
serving.knative.dev/release: "v20190613-5f6e052c"
name: services.serving.knative.dev
spec:
additionalPrinterColumns:
- JSONPath: .status.url
name: URL
type: string
- JSONPath: .status.latestCreatedRevisionName
name: LatestCreated
type: string
- JSONPath: .status.latestReadyRevisionName
name: LatestReady
type: string
- JSONPath: .status.conditions[?(@.type=='Ready')].status
name: Ready
type: string
- JSONPath: .status.conditions[?(@.type=='Ready')].reason
name: Reason
type: string
group: serving.knative.dev
names:
categories:
- all
- knative
- serving
kind: Service
plural: services
shortNames:
- kservice
- ksvc
singular: service
scope: Namespaced
subresources:
status: {}
versions:
- name: v1alpha1
served: true
storage: true
- name: v1beta1
served: true
storage: false
---
apiVersion: apiextensions.k8s.io/v1beta1
kind: CustomResourceDefinition
metadata:
labels:
knative.dev/crd-install: "true"
serving.knative.dev/release: "v20190613-5f6e052c"
name: serverlessservices.networking.internal.knative.dev
spec:
additionalPrinterColumns:
- JSONPath: .status.serviceName
name: ServiceName
type: string
- JSONPath: .status.privateServiceName
name: PrivateServiceName
type: string
- JSONPath: .status.conditions[?(@.type=='Ready')].status
name: Ready
type: string
- JSONPath: .status.conditions[?(@.type=='Ready')].reason
name: Reason
type: string
group: networking.internal.knative.dev
names:
categories:
- all
- knative-internal
- networking
kind: ServerlessService
plural: serverlessservices
shortNames:
- sks
singular: serverlessservice
scope: Namespaced
subresources:
status: {}
version: v1alpha1
---
apiVersion: v1
kind: Service
metadata:
labels:
app: activator
serving.knative.dev/release: "v20190613-5f6e052c"
name: activator-service
namespace: knative-serving
spec:
ports:
- name: http
port: 80
protocol: TCP
targetPort: 8012
- name: http2
port: 81
protocol: TCP
targetPort: 8013
- name: metrics
port: 9090
protocol: TCP
targetPort: 9090
selector:
app: activator
type: ClusterIP
---
apiVersion: v1
kind: Service
metadata:
labels:
app: controller
serving.knative.dev/release: "v20190613-5f6e052c"
name: controller
namespace: knative-serving
spec:
ports:
- name: metrics
port: 9090
protocol: TCP
targetPort: 9090
selector:
app: controller
---
apiVersion: v1
kind: Service
metadata:
labels:
role: webhook
serving.knative.dev/release: "v20190613-5f6e052c"
name: webhook
namespace: knative-serving
spec:
ports:
- port: 443
targetPort: 8443
selector:
role: webhook
---
apiVersion: caching.internal.knative.dev/v1alpha1
kind: Image
metadata:
labels:
serving.knative.dev/release: "v20190613-5f6e052c"
name: queue-proxy
namespace: knative-serving
spec:
image: gcr.io/knative-nightly/github.com/knative/serving/cmd/queue@sha256:8b4df4238962d6f676e6d58d9dc480a9b243390ec2eaffa4b1830f9d968cf73a
---
apiVersion: apps/v1
kind: Deployment
metadata:
labels:
serving.knative.dev/release: "v20190613-5f6e052c"
name: activator
namespace: knative-serving
spec:
selector:
matchLabels:
app: activator
role: activator
template:
metadata:
annotations:
cluster-autoscaler.kubernetes.io/safe-to-evict: "false"
sidecar.istio.io/inject: "true"
labels:
app: activator
role: activator
serving.knative.dev/release: "v20190613-5f6e052c"
spec:
containers:
- args:
- -logtostderr=false
- -stderrthreshold=FATAL
env:
- name: POD_NAME
valueFrom:
fieldRef:
fieldPath: metadata.name
- name: SYSTEM_NAMESPACE
valueFrom:
fieldRef:
fieldPath: metadata.namespace
- name: CONFIG_LOGGING_NAME
value: config-logging
- name: CONFIG_OBSERVABILITY_NAME
value: config-observability
- name: METRICS_DOMAIN
value: knative.dev/serving
image: gcr.io/knative-nightly/github.com/knative/serving/cmd/activator@sha256:a06ff88bb014289e257df865e56ae7d26fc203f1a9de9b3e355e2f225e4d4b51
livenessProbe:
httpGet:
httpHeaders:
- name: k-kubelet-probe
value: activator
path: /healthz
port: 8012
name: activator
ports:
- containerPort: 8012
name: http1-port
- containerPort: 8013
name: h2c-port
- containerPort: 9090
name: metrics-port
readinessProbe:
httpGet:
httpHeaders:
- name: k-kubelet-probe
value: activator
path: /healthz
port: 8012
resources:
limits:
cpu: 200m
memory: 600Mi
requests:
cpu: 20m
memory: 60Mi
volumeMounts:
- mountPath: /etc/config-logging
name: config-logging
- mountPath: /etc/config-observability
name: config-observability
serviceAccountName: controller
volumes:
- configMap:
name: config-logging
name: config-logging
- configMap:
name: config-observability
name: config-observability
---
apiVersion: v1
kind: Service
metadata:
labels:
app: autoscaler
serving.knative.dev/release: "v20190613-5f6e052c"
name: autoscaler
namespace: knative-serving
spec:
ports:
- name: http
port: 8080
protocol: TCP
targetPort: 8080
- name: metrics
port: 9090
protocol: TCP
targetPort: 9090
- name: custom-metrics
port: 443
protocol: TCP
targetPort: 8443
selector:
app: autoscaler
---
apiVersion: apps/v1
kind: Deployment
metadata:
labels:
serving.knative.dev/release: "v20190613-5f6e052c"
name: autoscaler
namespace: knative-serving
spec:
replicas: 1
selector:
matchLabels:
app: autoscaler
template:
metadata:
annotations:
cluster-autoscaler.kubernetes.io/safe-to-evict: "false"
sidecar.istio.io/inject: "true"
labels:
app: autoscaler
serving.knative.dev/release: "v20190613-5f6e052c"
spec:
containers:
- args:
- --secure-port=8443
env:
- name: SYSTEM_NAMESPACE
valueFrom:
fieldRef:
fieldPath: metadata.namespace
- name: CONFIG_LOGGING_NAME
value: config-logging
- name: CONFIG_OBSERVABILITY_NAME
value: config-observability
- name: METRICS_DOMAIN
value: knative.dev/serving
image: gcr.io/knative-nightly/github.com/knative/serving/cmd/autoscaler@sha256:bbf1f3b5823cfe9854c19ed868d9425d67126643572a00e7a5204edd48409ca4
name: autoscaler
ports:
- containerPort: 8080
name: websocket
- containerPort: 9090
name: metrics
- containerPort: 8443
name: custom-metrics
resources:
limits:
cpu: 300m
memory: 400Mi
requests:
cpu: 30m
memory: 40Mi
volumeMounts:
- mountPath: /etc/config-autoscaler
name: config-autoscaler
- mountPath: /etc/config-logging
name: config-logging
- mountPath: /etc/config-observability
name: config-observability
serviceAccountName: controller
volumes:
- configMap:
name: config-autoscaler
name: config-autoscaler
- configMap:
name: config-logging
name: config-logging
- configMap:
name: config-observability
name: config-observability
---
apiVersion: v1
data:
_example: |
################################
# #
# EXAMPLE CONFIGURATION #
# #
################################
# This block is not actually functional configuration,
# but serves to illustrate the available configuration
# options and document them in a way that is accessible
# to users that `kubectl edit` this config map.
#
# These sample configuration options may be copied out of
# this example block and unindented to be in the data block
# to actually change the configuration.
# The Revision ContainerConcurrency field specifies the maximum number
# of requests the Container can handle at once. Container concurrency
# target percentage is how much of that maximum to use in a stable
# state. E.g. if a Revision specifies ContainerConcurrency of 10, then
# the Autoscaler will try to maintain 7 concurrent connections per pod
# on average. A value of 0.7 is chosen because the Autoscaler panics
# when concurrency exceeds 2x the desired set point. So we will panic
# before we reach the limit.
container-concurrency-target-percentage: "1.0"
# The container concurrency target default is what the Autoscaler will
# try to maintain when the Revision specifies unlimited concurrency.
# Even when specifying unlimited concurrency, the autoscaler will
# horizontally scale the application based on this target concurrency.
#
# A value of 100 is chosen because it's enough to allow vertical pod
# autoscaling to tune resource requests. E.g. maintaining 1 concurrent
# "hello world" request doesn't consume enough resources to allow VPA
# to achieve efficient resource usage (VPA CPU minimum is 300m).
container-concurrency-target-default: "100"
# When operating in a stable mode, the autoscaler operates on the
# average concurrency over the stable window.
stable-window: "60s"
# When observed average concurrency during the panic window reaches
# panic-threshold-percentage the target concurrency, the autoscaler
# enters panic mode. When operating in panic mode, the autoscaler
# scales on the average concurrency over the panic window which is
# panic-window-percentage of the stable-window.
panic-window-percentage: "10.0"
# Absolute panic window duration.
# Deprecated in favor of panic-window-percentage.
# Existing revisions will continue to scale based on panic-window
# but new revisions will default to panic-window-percentage.
panic-window: "6s"
# The percentage of the container concurrency target at which to
# enter panic mode when reached within the panic window.
panic-threshold-percentage: "200.0"
# Max scale up rate limits the rate at which the autoscaler will
# increase pod count. It is the maximum ratio of desired pods versus
# observed pods.
max-scale-up-rate: "10"
# Scale to zero feature flag
enable-scale-to-zero: "true"
# Tick interval is the time between autoscaling calculations.
tick-interval: "2s"
# Dynamic parameters (take effect when config map is updated):
# Scale to zero grace period is the time an inactive revision is left
# running before it is scaled to zero (min: 30s).
scale-to-zero-grace-period: "30s"
kind: ConfigMap
metadata:
labels:
serving.knative.dev/release: "v20190613-5f6e052c"
name: config-autoscaler
namespace: knative-serving
---
apiVersion: v1
data:
_example: |
################################
# #
# EXAMPLE CONFIGURATION #
# #
################################
# This block is not actually functional configuration,
# but serves to illustrate the available configuration
# options and document them in a way that is accessible
# to users that `kubectl edit` this config map.
#
# These sample configuration options may be copied out of
# this block and unindented to actually change the configuration.
# IssuerRef is a reference to the issuer for this certificate.
# IssuerRef should be either `ClusterIssuer` or `Issuer`.
# Please refer `IssuerRef` in https://github.com/jetstack/cert-manager/blob/master/pkg/apis/certmanager/v1alpha1/types_certificate.go
# for more details about IssuerRef configuration.
issuerRef: |
kind: ClusterIssuer
name: letsencrypt-issuer
# solverConfig defines the configuration for the ACME certificate provider.
# The solverConfig should be either dns01 or http01.
# Please refer `SolverConfig` in https://github.com/jetstack/cert-manager/blob/master/pkg/apis/certmanager/v1alpha1/types_certificate.go
# for more details about ACME configuration.
solverConfig: |
dns01:
provider: cloud-dns-provider
kind: ConfigMap
metadata:
labels:
networking.knative.dev/certificate-provider: cert-manager
serving.knative.dev/release: "v20190613-5f6e052c"
name: config-certmanager
namespace: knative-serving
---
apiVersion: v1
data:
_example: |
################################
# #
# EXAMPLE CONFIGURATION #
# #
################################
# This block is not actually functional configuration,
# but serves to illustrate the available configuration
# options and document them in a way that is accessible
# to users that `kubectl edit` this config map.
#
# These sample configuration options may be copied out of
# this example block and unindented to be in the data block
# to actually change the configuration.
# revision-timeout-seconds contains the default number of
# seconds to use for the revision's per-request timeout, if
# none is specified.
revision-timeout-seconds: "300" # 5 minutes
# max-revision-timeout-seconds contains the maximum number of
# seconds that can be used for revision-timeout-seconds.
# This value must be greater than or equal to revision-timeout-seconds.
# If omitted, the system default is used (600 seconds).
max-revision-timeout-seconds: "600" # 10 minutes
# revision-cpu-request contains the cpu allocation to assign
# to revisions by default. If omitted, no value is specified
# and the system default is used.
revision-cpu-request: "400m" # 0.4 of a CPU (aka 400 milli-CPU)
# revision-memory-request contains the memory allocation to assign
# to revisions by default. If omitted, no value is specified
# and the system default is used.
revision-memory-request: "100M" # 100 megabytes of memory
# revision-cpu-limit contains the cpu allocation to limit
# revisions to by default. If omitted, no value is specified
# and the system default is used.
revision-cpu-limit: "1000m" # 1 CPU (aka 1000 milli-CPU)
# revision-memory-limit contains the memory allocation to limit
# revisions to by default. If omitted, no value is specified
# and the system default is used.
revision-memory-limit: "200M" # 200 megabytes of memory
# container-name-template contains a template for the default
# container name, if none is specified. This field supports
# Go templating and is supplied with the ObjectMeta of the
# enclosing Service or Configuration, so values such as
# {{.Name}} are also valid.
container-name-template: "user-container"
kind: ConfigMap
metadata:
labels:
serving.knative.dev/release: "v20190613-5f6e052c"
name: config-defaults
namespace: knative-serving
---
apiVersion: v1
data:
_example: |
################################
# #
# EXAMPLE CONFIGURATION #
# #
################################
# This block is not actually functional configuration,
# but serves to illustrate the available configuration
# options and document them in a way that is accessible
# to users that `kubectl edit` this config map.
#
# These sample configuration options may be copied out of
# this example block and unindented to be in the data block
# to actually change the configuration.
# List of repositories for which tag to digest resolving should be skipped
registriesSkippingTagResolving: "ko.local,dev.local"
queueSidecarImage: gcr.io/knative-nightly/github.com/knative/serving/cmd/queue@sha256:8b4df4238962d6f676e6d58d9dc480a9b243390ec2eaffa4b1830f9d968cf73a
kind: ConfigMap
metadata:
labels:
serving.knative.dev/release: "v20190613-5f6e052c"
name: config-deployment
namespace: knative-serving
---
apiVersion: v1
data:
_example: |
################################
# #
# EXAMPLE CONFIGURATION #
# #
################################
# This block is not actually functional configuration,
# but serves to illustrate the available configuration
# options and document them in a way that is accessible
# to users that `kubectl edit` this config map.
#
# These sample configuration options may be copied out of
# this example block and unindented to be in the data block
# to actually change the configuration.
# Default value for domain.
# Although it will match all routes, it is the least-specific rule so it
# will only be used if no other domain matches.
example.com: |
# These are example settings of domain.
# example.org will be used for routes having app=nonprofit.
example.org: |
selector:
app: nonprofit
# Routes having domain suffix of 'svc.cluster.local' will not be exposed
# through Ingress. You can define your own label selector to assign that
# domain suffix to your Route here, or you can set the label
# "serving.knative.dev/visibility=cluster-local"
# to achieve the same effect. This shows how to make routes having
# the label app=secret only exposed to the local cluster.
svc.cluster.local: |
selector:
app: secret
kind: ConfigMap
metadata:
labels:
serving.knative.dev/release: "v20190613-5f6e052c"
name: config-domain
namespace: knative-serving
---
apiVersion: v1
data:
_example: |
################################
# #
# EXAMPLE CONFIGURATION #
# #
################################
# This block is not actually functional configuration,
# but serves to illustrate the available configuration
# options and document them in a way that is accessible
# to users that `kubectl edit` this config map.
#
# These sample configuration options may be copied out of
# this example block and unindented to be in the data block
# to actually change the configuration.
# Delay after revision creation before considering it for GC
stale-revision-create-delay: "24h"
# Duration since a route has been pointed at a revision before it should be GC'd
# This minus lastpinned-debounce be longer than the controller resync period (10 hours)
stale-revision-timeout: "15h"
# Minimum number of generations of revisions to keep before considering for GC
stale-revision-minimum-generations: "1"
# To avoid constant updates, we allow an existing annotation to be stale by this
# amount before we update the timestamp
stale-revision-lastpinned-debounce: "5h"
kind: ConfigMap
metadata:
labels:
serving.knative.dev/release: "v20190613-5f6e052c"
name: config-gc
namespace: knative-serving
---
apiVersion: v1
data:
_example: |
################################
# #
# EXAMPLE CONFIGURATION #
# #
################################
# This block is not actually functional configuration,
# but serves to illustrate the available configuration
# options and document them in a way that is accessible
# to users that `kubectl edit` this config map.
#
# These sample configuration options may be copied out of
# this example block and unindented to be in the data block
# to actually change the configuration.
# Default Knative Gateway after v0.3. It points to the Istio
# standard istio-ingressgateway, instead of a custom one that we
# used pre-0.3.
gateway.knative-ingress-gateway: "istio-ingressgateway.istio-system.svc.cluster.local"
# A cluster local gateway to allow pods outside of the mesh to access
# Services and Routes not exposing through an ingress. If the users
# do have a service mesh setup, this isn't required and can be removed.
#
# An example use case is when users want to use Istio without any
# sidecar injection (like Knative's istio-lean.yaml). Since every pod
# is outside of the service mesh in that case, a cluster-local service
# will need to be exposed to a cluster-local gateway to be accessible.
local-gateway.cluster-local-gateway: "cluster-local-gateway.istio-system.svc.cluster.local"
# To use only Istio service mesh and no cluster-local-gateway, replace
# all local-gateway.* entries the following entry.
local-gateway.mesh: "mesh"
kind: ConfigMap
metadata:
labels:
networking.knative.dev/ingress-provider: istio
serving.knative.dev/release: "v20190613-5f6e052c"
name: config-istio
namespace: knative-serving
---
apiVersion: v1
data:
_example: |
################################
# #
# EXAMPLE CONFIGURATION #
# #
################################
# This block is not actually functional configuration,
# but serves to illustrate the available configuration
# options and document them in a way that is accessible
# to users that `kubectl edit` this config map.
#
# These sample configuration options may be copied out of
# this example block and unindented to be in the data block
# to actually change the configuration.
# Common configuration for all Knative codebase
zap-logger-config: |
{
"level": "info",
"development": false,
"outputPaths": ["stdout"],
"errorOutputPaths": ["stderr"],
"encoding": "json",
"encoderConfig": {
"timeKey": "ts",
"levelKey": "level",
"nameKey": "logger",
"callerKey": "caller",
"messageKey": "msg",
"stacktraceKey": "stacktrace",
"lineEnding": "",
"levelEncoder": "",
"timeEncoder": "iso8601",
"durationEncoder": "",
"callerEncoder": ""
}
}
# Log level overrides
# For all components except the autoscaler and queue proxy,
# changes are be picked up immediately.
# For autoscaler and queue proxy, changes require recreation of the pods.
loglevel.controller: "info"
loglevel.autoscaler: "info"
loglevel.queueproxy: "info"
loglevel.webhook: "info"
loglevel.activator: "info"
kind: ConfigMap
metadata:
labels:
serving.knative.dev/release: "v20190613-5f6e052c"
name: config-logging
namespace: knative-serving
---
apiVersion: v1
data:
_example: |
################################
# #
# EXAMPLE CONFIGURATION #
# #
################################
# This block is not actually functional configuration,
# but serves to illustrate the available configuration
# options and document them in a way that is accessible
# to users that `kubectl edit` this config map.
#
# These sample configuration options may be copied out of
# this example block and unindented to be in the data block
# to actually change the configuration.
# istio.sidecar.includeOutboundIPRanges specifies the IP ranges that Istio sidecar
# will intercept.
#
# Replace this with the IP ranges of your cluster (see below for some examples).
# Separate multiple entries with a comma.
# Example: "10.4.0.0/14,10.7.240.0/20"
#
# If set to "*" Istio will intercept all traffic within
# the cluster as well as traffic that is going outside the cluster.
# Traffic going outside the cluster will be blocked unless
# necessary egress rules are created.
#
# If omitted or set to "", value of global.proxy.includeIPRanges
# provided at Istio deployment time is used. In default Knative serving
# deployment, global.proxy.includeIPRanges value is set to "*".
#
# If an invalid value is passed, "" is used instead.
#
# If valid set of IP address ranges are put into this value,
# Istio will no longer intercept traffic going to IP addresses
# outside the provided ranges and there is no need to specify
# egress rules.
#
# To determine the IP ranges of your cluster:
# IBM Cloud Private: cat cluster/config.yaml | grep service_cluster_ip_range
# IBM Cloud Kubernetes Service: "172.30.0.0/16,172.20.0.0/16,10.10.10.0/24"
# Google Container Engine (GKE): gcloud container clusters describe XXXXXXX --zone=XXXXXX | grep -e clusterIpv4Cidr -e servicesIpv4Cidr
# Azure Kubernetes Service (AKS): "10.0.0.0/16"
# Azure Container Service (ACS; deprecated): "10.244.0.0/16,10.240.0.0/16"
# Azure Container Service Engine (ACS-Engine; OSS): Configurable, but defaults to "10.0.0.0/16"
# Minikube: "10.0.0.1/24"
#
# For more information, visit
# https://istio.io/docs/tasks/traffic-management/egress/
#
istio.sidecar.includeOutboundIPRanges: "*"
# clusteringress.class specifies the default cluster ingress class
# to use when not dictated by Route annotation.
#
# If not specified, will use the Istio ingress.
#
# Note that changing the ClusterIngress class of an existing Route
# will result in undefined behavior. Therefore it is best to only
# update this value during the setup of Knative, to avoid getting
# undefined behavior.
clusteringress.class: "istio.ingress.networking.knative.dev"
# domainTemplate specifies the golang text template string to use
# when constructing the Knative service's DNS name. The default
# value is "{{.Name}}.{{.Namespace}}.{{.Domain}}". And those three
# values (Name, Namespace, Domain) are the only variables defined.
#
# Changing this value might be necessary when the extra levels in
# the domain name generated is problematic for wildcard certificates
# that only support a single level of domain name added to the
# certificate's domain. In those cases you might consider using a value
# of "{{.Name}}-{{.Namespace}}.{{.Domain}}", or removing the Namespace
# entirely from the template. When choosing a new value be thoughtful
# of the potential for conflicts - for example, when users choose to use
# characters such as `-` in their service, or namespace, names.
# {{.Annotations}} can be used for any customization in the go template if needed.
# We strongly recommend keeping namespace part of the template to avoid domain name clashes
# Example '{{.Name}}-{{.Namespace}}.{{ index .Annotations "sub"}}.{{.Domain}}'
# and you have an annotation {"sub":"foo"}, then the generated template would be {Name}-{Namespace}.foo.{Domain}
domainTemplate: "{{.Name}}.{{.Namespace}}.{{.Domain}}"
# tagTemplate specifies the golang text template string to use
# when constructing the DNS name for "tags" within the traffic blocks
# of Routes and Configuration. This is used in conjunction with the
# domainTemplate above to determine the full URL for the tag.
tagTemplate: "{{.Name}}-{{.Tag}}"
# Controls whether TLS certificates are automatically provisioned and
# installed in the Knative ingress to terminate external TLS connection.
# 1. Enabled: enabling auto-TLS feature.
# 2. Disabled: disabling auto-TLS feature.
autoTLS: "Disabled"
# Controls the behavior of the HTTP endpoint for the Knative ingress.
# It requires autoTLS to be enabled.
# 1. Enabled: The Knative ingress will be able to serve HTTP connection.
# 2. Disabled: The Knative ingress ter will reject HTTP traffic.
# 3. Redirected: The Knative ingress will send a 302 redirect for all
# http connections, asking the clients to use HTTPS
httpProtocol: "Enabled"
kind: ConfigMap
metadata:
labels:
serving.knative.dev/release: "v20190613-5f6e052c"
name: config-network
namespace: knative-serving
---
apiVersion: v1
data:
_example: |
################################
# #
# EXAMPLE CONFIGURATION #
# #
################################
# This block is not actually functional configuration,
# but serves to illustrate the available configuration
# options and document them in a way that is accessible
# to users that `kubectl edit` this config map.
#
# These sample configuration options may be copied out of
# this example block and unindented to be in the data block
# to actually change the configuration.
# logging.enable-var-log-collection defaults to false.
# The fluentd daemon set will be set up to collect /var/log if
# this flag is true.
logging.enable-var-log-collection: false
# logging.revision-url-template provides a template to use for producing the
# logging URL that is injected into the status of each Revision.
# This value is what you might use the the Knative monitoring bundle, and provides
# access to Kibana after setting up kubectl proxy.
logging.revision-url-template: |
http://localhost:8001/api/v1/namespaces/knative-monitoring/services/kibana-logging/proxy/app/kibana#/discover?_a=(query:(match:(kubernetes.labels.knative-dev%2FrevisionUID:(query:'${REVISION_UID}',type:phrase))))
# If non-empty, this enables queue proxy writing request logs to stdout.
# The value determines the shape of the request logs and it must be a valid go text/template.
# It is important to keep this as a single line. Multiple lines are parsed as separate entities
# by most collection agents and will split the request logs into multiple records.
#
# The following fields and functions are available to the template:
#
# Request: An http.Request (see https://golang.org/pkg/net/http/#Request)
# representing an HTTP request received by the server.
#
# Response:
# struct {
# Code int // HTTP status code (see https://www.iana.org/assignments/http-status-codes/http-status-codes.xhtml)
# Size int // An int representing the size of the response.
# Latency float64 // A float64 representing the latency of the response in seconds.
# }
#
# Revision:
# struct {
# Name string // Knative revision name
# Namespace string // Knative revision namespace
# Service string // Knative service name
# Configuration string // Knative configuration name
# PodName string // Name of the pod hosting the revision
# PodIP string // IP of the pod hosting the revision
# }
#
logging.request-log-template: '{"httpRequest": {"requestMethod": "{{.Request.Method}}", "requestUrl": "{{js .Request.RequestURI}}", "requestSize": "{{.Request.ContentLength}}", "status": {{.Response.Code}}, "responseSize": "{{.Response.Size}}", "userAgent": "{{js .Request.UserAgent}}", "remoteIp": "{{js .Request.RemoteAddr}}", "serverIp": "{{.Revision.PodIP}}", "referer": "{{js .Request.Referer}}", "latency": "{{.Response.Latency}}s", "protocol": "{{.Request.Proto}}"}, "traceId": "{{index .Request.Header "X-B3-Traceid"}}"}'
# metrics.backend-destination field specifies the system metrics destination.
# It supports either prometheus (the default) or stackdriver.
# Note: Using stackdriver will incur additional charges
metrics.backend-destination: prometheus
# metrics.request-metrics-backend-destination specifies the request metrics
# destination. If non-empty, it enables queue proxy to send request metrics.
# Currently supported values: prometheus, stackdriver.
metrics.request-metrics-backend-destination: prometheus
# metrics.stackdriver-project-id field specifies the stackdriver project ID. This
# field is optional. When running on GCE, application default credentials will be
# used if this field is not provided.
metrics.stackdriver-project-id: "<your stackdriver project id>"
# metrics.allow-stackdriver-custom-metrics indicates whether it is allowed to send metrics to
# Stackdriver using "global" resource type and custom metric type if the
# metrics are not supported by "knative_revision" resource type. Setting this
# flag to "true" could cause extra Stackdriver charge.
# If metrics.backend-destination is not Stackdriver, this is ignored.
metrics.allow-stackdriver-custom-metrics: "false"
kind: ConfigMap
metadata:
labels:
serving.knative.dev/release: "v20190613-5f6e052c"
name: config-observability
namespace: knative-serving
---
apiVersion: v1
data:
_example: |
################################
# #
# EXAMPLE CONFIGURATION #
# #
################################
# This block is not actually functional configuration,
# but serves to illustrate the available configuration
# options and document them in a way that is accessible
# to users that `kubectl edit` this config map.
#
# These sample configuration options may be copied out of
# this example block and unindented to be in the data block
# to actually change the configuration.
#
# If true we enable adding spans within our applications.
enable: "false"
# URL to zipkin collector where traces are sent.
zipkin-endpoint: "http://zipkin.istio-system.svc.cluster.local:9411/api/v2/spans"
# Enable zipkin debug mode. This allows all spans to be sent to the server
# bypassing sampling.
debug: "false"
# Percentage (0-1) of requests to trace
sample-rate: "0.1"
kind: ConfigMap
metadata:
labels:
serving.knative.dev/release: "v20190613-5f6e052c"
name: config-tracing
namespace: knative-serving
---
apiVersion: apps/v1
kind: Deployment
metadata:
labels:
serving.knative.dev/release: "v20190613-5f6e052c"
name: controller
namespace: knative-serving
spec:
replicas: 1
selector:
matchLabels:
app: controller
template:
metadata:
annotations:
sidecar.istio.io/inject: "false"
labels:
app: controller
serving.knative.dev/release: "v20190613-5f6e052c"
spec:
containers:
- env:
- name: SYSTEM_NAMESPACE
valueFrom:
fieldRef:
fieldPath: metadata.namespace
- name: CONFIG_LOGGING_NAME
value: config-logging
- name: CONFIG_OBSERVABILITY_NAME
value: config-observability
- name: METRICS_DOMAIN
value: knative.dev/serving
image: gcr.io/knative-nightly/github.com/knative/serving/cmd/controller@sha256:0b4813041c097b9911f59c8a8cd70200f5d67a935a8c407b2acd47ef20fe44ca
name: controller
ports:
- containerPort: 9090
name: metrics
resources:
limits:
cpu: 1000m
memory: 1000Mi
requests:
cpu: 100m
memory: 100Mi
volumeMounts:
- mountPath: /etc/config-logging
name: config-logging
serviceAccountName: controller
volumes:
- configMap:
name: config-logging
name: config-logging
---
apiVersion: apps/v1
kind: Deployment
metadata:
labels:
networking.knative.dev/certificate-provider: cert-manager
serving.knative.dev/release: "v20190613-5f6e052c"
name: networking-certmanager
namespace: knative-serving
spec:
replicas: 1
selector:
matchLabels:
app: networking-certmanager
template:
metadata:
annotations:
sidecar.istio.io/inject: "false"
labels:
app: networking-certmanager
spec:
containers:
- env:
- name: SYSTEM_NAMESPACE
valueFrom:
fieldRef:
fieldPath: metadata.namespace
- name: CONFIG_LOGGING_NAME
value: config-logging
- name: CONFIG_OBSERVABILITY_NAME
value: config-observability
- name: METRICS_DOMAIN
value: knative.dev/serving
image: gcr.io/knative-nightly/github.com/knative/serving/cmd/networking/certmanager@sha256:4a54fc2d416be97feedcc94f9130d28f6eca62e7c6a6447744ae2fe46ac43121
name: networking-certmanager
ports:
- containerPort: 9090
name: metrics
resources:
limits:
cpu: 1000m
memory: 1000Mi
requests:
cpu: 100m
memory: 100Mi
volumeMounts:
- mountPath: /etc/config-logging
name: config-logging
serviceAccountName: controller
volumes:
- configMap:
name: config-logging
name: config-logging
---
apiVersion: apps/v1
kind: Deployment
metadata:
labels:
networking.knative.dev/ingress-provider: istio
serving.knative.dev/release: "v20190613-5f6e052c"
name: networking-istio
namespace: knative-serving
spec:
replicas: 1
selector:
matchLabels:
app: networking-istio
template:
metadata:
annotations:
sidecar.istio.io/inject: "false"
labels:
app: networking-istio
spec:
containers:
- env:
- name: SYSTEM_NAMESPACE
valueFrom:
fieldRef:
fieldPath: metadata.namespace
- name: CONFIG_LOGGING_NAME
value: config-logging
- name: CONFIG_OBSERVABILITY_NAME
value: config-observability
- name: METRICS_DOMAIN
value: knative.dev/serving
image: gcr.io/knative-nightly/github.com/knative/serving/cmd/networking/istio@sha256:baf21df9543ae9128dd4f6fdcecefb4b0427f96168b6b6ebc22b9dbc9b83725b
name: networking-istio
ports:
- containerPort: 9090
name: metrics
resources:
limits:
cpu: 1000m
memory: 1000Mi
requests:
cpu: 100m
memory: 100Mi
volumeMounts:
- mountPath: /etc/config-logging
name: config-logging
serviceAccountName: controller
volumes:
- configMap:
name: config-logging
name: config-logging
---
apiVersion: apps/v1
kind: Deployment
metadata:
labels:
serving.knative.dev/release: "v20190613-5f6e052c"
name: webhook
namespace: knative-serving
spec:
replicas: 1
selector:
matchLabels:
app: webhook
role: webhook
template:
metadata:
annotations:
cluster-autoscaler.kubernetes.io/safe-to-evict: "false"
sidecar.istio.io/inject: "false"
labels:
app: webhook
role: webhook
serving.knative.dev/release: "v20190613-5f6e052c"
spec:
containers:
- env:
- name: SYSTEM_NAMESPACE
valueFrom:
fieldRef:
fieldPath: metadata.namespace
- name: CONFIG_LOGGING_NAME
value: config-logging
image: gcr.io/knative-nightly/github.com/knative/serving/cmd/webhook@sha256:73f488fe12255236ea280ec2bdcfa238390b1ba6c81853edd931a26448b3a0db
name: webhook
resources:
limits:
cpu: 200m
memory: 200Mi
requests:
cpu: 20m
memory: 20Mi
volumeMounts:
- mountPath: /etc/config-logging
name: config-logging
serviceAccountName: controller
volumes:
- configMap:
name: config-logging
name: config-logging
---
Sign up for free to join this conversation on GitHub. Already have an account? Sign in to comment