Skip to content

Instantly share code, notes, and snippets.

@dchen1107
Created November 7, 2017 23:19
Show Gist options
  • Star 0 You must be signed in to star a gist
  • Fork 0 You must be signed in to fork a gist
  • Save dchen1107/25b515f6836bb7eab39d27559b665692 to your computer and use it in GitHub Desktop.
Save dchen1107/25b515f6836bb7eab39d27559b665692 to your computer and use it in GitHub Desktop.
================= /etc/kubernetes/addons/cluster-loadbalancing/glbc/default-svc.yaml ==================
apiVersion: v1
kind: Service
metadata:
# This must match the --default-backend-service argument of the l7 lb
# controller and is required because GCE mandates a default backend.
name: default-http-backend
namespace: kube-system
labels:
k8s-app: glbc
kubernetes.io/cluster-service: "true"
addonmanager.kubernetes.io/mode: Reconcile
kubernetes.io/name: "GLBCDefaultBackend"
spec:
# The default backend must be of type NodePort.
type: NodePort
ports:
- port: 80
targetPort: 8080
protocol: TCP
name: http
selector:
k8s-app: glbc
================= /etc/kubernetes/addons/cluster-loadbalancing/glbc/default-svc-controller.yaml ==================
apiVersion: extensions/v1beta1
kind: Deployment
metadata:
name: l7-default-backend
namespace: kube-system
labels:
k8s-app: glbc
kubernetes.io/name: "GLBC"
kubernetes.io/cluster-service: "true"
addonmanager.kubernetes.io/mode: Reconcile
spec:
replicas: 1
selector:
matchLabels:
k8s-app: glbc
template:
metadata:
labels:
k8s-app: glbc
name: glbc
spec:
containers:
- name: default-http-backend
# Any image is permissible as long as:
# 1. It serves a 404 page at /
# 2. It serves 200 on a /healthz endpoint
image: eu.gcr.io/google_containers/defaultbackend:1.3
livenessProbe:
httpGet:
path: /healthz
port: 8080
scheme: HTTP
initialDelaySeconds: 30
timeoutSeconds: 5
ports:
- containerPort: 8080
resources:
limits:
cpu: 10m
memory: 20Mi
requests:
cpu: 10m
memory: 20Mi
================= /etc/kubernetes/addons/cluster-monitoring/heapster-rbac.yaml ==================
apiVersion: rbac.authorization.k8s.io/v1beta1
kind: ClusterRoleBinding
metadata:
name: heapster-binding
labels:
kubernetes.io/cluster-service: "true"
addonmanager.kubernetes.io/mode: Reconcile
roleRef:
apiGroup: rbac.authorization.k8s.io
kind: ClusterRole
name: system:heapster
subjects:
- kind: ServiceAccount
name: heapster
namespace: kube-system
---
# Heapster's pod_nanny monitors the heapster deployment & its pod(s), and scales
# the resources of the deployment if necessary.
apiVersion: rbac.authorization.k8s.io/v1beta1
kind: Role
metadata:
name: system:pod-nanny
namespace: kube-system
labels:
kubernetes.io/cluster-service: "true"
addonmanager.kubernetes.io/mode: Reconcile
rules:
- apiGroups:
- ""
resources:
- pods
verbs:
- get
- apiGroups:
- "extensions"
resources:
- deployments
verbs:
- get
- update
---
apiVersion: rbac.authorization.k8s.io/v1beta1
kind: RoleBinding
metadata:
name: heapster-binding
namespace: kube-system
labels:
kubernetes.io/cluster-service: "true"
addonmanager.kubernetes.io/mode: Reconcile
roleRef:
apiGroup: rbac.authorization.k8s.io
kind: Role
name: system:pod-nanny
subjects:
- kind: ServiceAccount
name: heapster
namespace: kube-system
---
================= /etc/kubernetes/addons/cluster-monitoring/stackdriver/heapster-controller.yaml ==================
apiVersion: v1
kind: ServiceAccount
metadata:
name: heapster
namespace: kube-system
labels:
kubernetes.io/cluster-service: "true"
addonmanager.kubernetes.io/mode: Reconcile
---
apiVersion: extensions/v1beta1
kind: Deployment
metadata:
name: heapster-v1.4.3
namespace: kube-system
labels:
k8s-app: heapster
kubernetes.io/cluster-service: "true"
addonmanager.kubernetes.io/mode: Reconcile
version: v1.4.3
spec:
replicas: 1
selector:
matchLabels:
k8s-app: heapster
version: v1.4.3
template:
metadata:
labels:
k8s-app: heapster
version: v1.4.3
annotations:
scheduler.alpha.kubernetes.io/critical-pod: ''
spec:
containers:
- image: eu.gcr.io/google_containers/heapster-amd64:v1.4.3
name: heapster
livenessProbe:
httpGet:
path: /healthz
port: 8082
scheme: HTTP
initialDelaySeconds: 180
timeoutSeconds: 5
command:
- /heapster
- --source=kubernetes.summary_api:''
- --sink=stackdriver:?cluster_name=gke-dawnchen-176&min_interval_sec=100&batch_export_timeout_sec=110
volumeMounts:
- name: ssl-certs
mountPath: /etc/ssl/certs
readOnly: true
- name: usr-ca-certs
mountPath: /usr/share/ca-certificates
readOnly: true
- name: prom-to-sd
image: gcr.io/google-containers/prometheus-to-sd:v0.2.1
command:
- /monitor
- --source=heapster:http://localhost:8082?whitelisted=stackdriver_requests_count,stackdriver_timeseries_count
- --stackdriver-prefix=container.googleapis.com/internal/addons
- --pod-id=$(POD_NAME)
- --namespace-id=$(POD_NAMESPACE)
volumeMounts:
- name: ssl-certs
mountPath: /etc/ssl/certs
env:
- name: POD_NAME
valueFrom:
fieldRef:
fieldPath: metadata.name
- name: POD_NAMESPACE
valueFrom:
fieldRef:
fieldPath: metadata.namespace
- image: eu.gcr.io/google_containers/addon-resizer:1.7
name: heapster-nanny
resources:
limits:
cpu: 50m
memory: 92960Ki
requests:
cpu: 50m
memory: 92960Ki
env:
- name: MY_POD_NAME
valueFrom:
fieldRef:
fieldPath: metadata.name
- name: MY_POD_NAMESPACE
valueFrom:
fieldRef:
fieldPath: metadata.namespace
command:
- /pod_nanny
- --cpu=80m
- --extra-cpu=0.5m
- --memory=140Mi
- --extra-memory=4Mi
- --threshold=5
- --deployment=heapster-v1.4.3
- --container=heapster
- --poll-period=300000
- --estimator=exponential
volumes:
- name: ssl-certs
hostPath:
path: "/etc/ssl/certs"
- name: usr-ca-certs
hostPath:
path: "/usr/share/ca-certificates"
serviceAccountName: heapster
tolerations:
- key: "CriticalAddonsOnly"
operator: "Exists"
================= /etc/kubernetes/addons/cluster-monitoring/stackdriver/heapster-service.yaml ==================
kind: Service
apiVersion: v1
metadata:
name: heapster
namespace: kube-system
labels:
kubernetes.io/cluster-service: "true"
addonmanager.kubernetes.io/mode: Reconcile
kubernetes.io/name: "Heapster"
spec:
ports:
- port: 80
targetPort: 8082
selector:
k8s-app: heapster
================= /etc/kubernetes/addons/dashboard/dashboard-controller.yaml ==================
apiVersion: extensions/v1beta1
kind: Deployment
metadata:
name: kubernetes-dashboard
namespace: kube-system
labels:
k8s-app: kubernetes-dashboard
kubernetes.io/cluster-service: "true"
addonmanager.kubernetes.io/mode: Reconcile
spec:
selector:
matchLabels:
k8s-app: kubernetes-dashboard
template:
metadata:
labels:
k8s-app: kubernetes-dashboard
annotations:
scheduler.alpha.kubernetes.io/critical-pod: ''
spec:
containers:
- name: kubernetes-dashboard
image: eu.gcr.io/google_containers/kubernetes-dashboard-amd64:v1.6.1
resources:
# keep request = limit to keep this container in guaranteed class
limits:
cpu: 100m
memory: 300Mi
requests:
cpu: 100m
memory: 100Mi
ports:
- containerPort: 9090
livenessProbe:
httpGet:
path: /
port: 9090
initialDelaySeconds: 30
timeoutSeconds: 30
tolerations:
- key: "CriticalAddonsOnly"
operator: "Exists"
================= /etc/kubernetes/addons/dashboard/dashboard-service.yaml ==================
apiVersion: v1
kind: Service
metadata:
name: kubernetes-dashboard
namespace: kube-system
labels:
k8s-app: kubernetes-dashboard
kubernetes.io/cluster-service: "true"
addonmanager.kubernetes.io/mode: Reconcile
spec:
selector:
k8s-app: kubernetes-dashboard
ports:
- port: 80
targetPort: 9090
================= /etc/kubernetes/addons/dns-horizontal-autoscaler/dns-horizontal-autoscaler.yaml ==================
# Copyright 2016 The Kubernetes Authors.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
apiVersion: extensions/v1beta1
kind: Deployment
metadata:
name: kube-dns-autoscaler
namespace: kube-system
labels:
k8s-app: kube-dns-autoscaler
kubernetes.io/cluster-service: "true"
addonmanager.kubernetes.io/mode: Reconcile
spec:
template:
metadata:
labels:
k8s-app: kube-dns-autoscaler
annotations:
scheduler.alpha.kubernetes.io/critical-pod: ''
spec:
containers:
- name: autoscaler
image: eu.gcr.io/google_containers/cluster-proportional-autoscaler-amd64:1.1.2-r2
resources:
requests:
cpu: "20m"
memory: "10Mi"
command:
- /cluster-proportional-autoscaler
- --namespace=kube-system
- --configmap=kube-dns-autoscaler
# Should keep target in sync with cluster/addons/dns/kubedns-controller.yaml.base
- --target=Deployment/kube-dns
# When cluster is using large nodes(with more cores), "coresPerReplica" should dominate.
# If using small nodes, "nodesPerReplica" should dominate.
- --default-params={"linear":{"coresPerReplica":256,"nodesPerReplica":16,"preventSinglePointFailure":true}}
- --logtostderr=true
- --v=2
tolerations:
- key: "CriticalAddonsOnly"
operator: "Exists"
serviceAccountName: kube-dns-autoscaler
================= /etc/kubernetes/addons/dns-horizontal-autoscaler/dns-horizontal-autoscaler-rbac.yaml ==================
# Copyright 2016 The Kubernetes Authors.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
kind: ServiceAccount
apiVersion: v1
metadata:
name: kube-dns-autoscaler
namespace: kube-system
labels:
addonmanager.kubernetes.io/mode: Reconcile
---
kind: ClusterRole
apiVersion: rbac.authorization.k8s.io/v1beta1
metadata:
name: system:kube-dns-autoscaler
labels:
addonmanager.kubernetes.io/mode: Reconcile
rules:
- apiGroups: [""]
resources: ["nodes"]
verbs: ["list"]
- apiGroups: [""]
resources: ["replicationcontrollers/scale"]
verbs: ["get", "update"]
- apiGroups: ["extensions"]
resources: ["deployments/scale", "replicasets/scale"]
verbs: ["get", "update"]
# Remove the configmaps rule once below issue is fixed:
# kubernetes-incubator/cluster-proportional-autoscaler#16
- apiGroups: [""]
resources: ["configmaps"]
verbs: ["get", "create"]
---
kind: ClusterRoleBinding
apiVersion: rbac.authorization.k8s.io/v1beta1
metadata:
name: system:kube-dns-autoscaler
labels:
addonmanager.kubernetes.io/mode: Reconcile
subjects:
- kind: ServiceAccount
name: kube-dns-autoscaler
namespace: kube-system
roleRef:
kind: ClusterRole
name: system:kube-dns-autoscaler
apiGroup: rbac.authorization.k8s.io
================= /etc/kubernetes/addons/dns/kubedns-cm.yaml ==================
# Copyright 2016 The Kubernetes Authors.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
apiVersion: v1
kind: ConfigMap
metadata:
name: kube-dns
namespace: kube-system
labels:
addonmanager.kubernetes.io/mode: EnsureExists
================= /etc/kubernetes/addons/dns/kubedns-controller.yaml ==================
# Copyright 2016 The Kubernetes Authors.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# Should keep target in cluster/addons/dns-horizontal-autoscaler/dns-horizontal-autoscaler.yaml
# in sync with this file.
# Warning: This is a file generated from the base underscore template file: kubedns-controller.yaml.base
apiVersion: extensions/v1beta1
kind: Deployment
metadata:
name: kube-dns
namespace: kube-system
labels:
k8s-app: kube-dns
kubernetes.io/cluster-service: "true"
addonmanager.kubernetes.io/mode: Reconcile
spec:
# replicas: not specified here:
# 1. In order to make Addon Manager do not reconcile this replicas parameter.
# 2. Default is 1.
# 3. Will be tuned in real time if DNS horizontal auto-scaling is turned on.
strategy:
rollingUpdate:
maxSurge: 10%
maxUnavailable: 0
selector:
matchLabels:
k8s-app: kube-dns
template:
metadata:
labels:
k8s-app: kube-dns
annotations:
scheduler.alpha.kubernetes.io/critical-pod: ''
spec:
tolerations:
- key: "CriticalAddonsOnly"
operator: "Exists"
volumes:
- name: kube-dns-config
configMap:
name: kube-dns
optional: true
containers:
- name: kubedns
image: eu.gcr.io/google_containers/k8s-dns-kube-dns-amd64:1.14.5
resources:
# TODO: Set memory limits when we've profiled the container for large
# clusters, then set request = limit to keep this container in
# guaranteed class. Currently, this container falls into the
# "burstable" category so the kubelet doesn't backoff from restarting it.
limits:
memory: 170Mi
requests:
cpu: 100m
memory: 70Mi
livenessProbe:
httpGet:
path: /healthcheck/kubedns
port: 10054
scheme: HTTP
initialDelaySeconds: 60
timeoutSeconds: 5
successThreshold: 1
failureThreshold: 5
readinessProbe:
httpGet:
path: /readiness
port: 8081
scheme: HTTP
# we poll on pod startup for the Kubernetes master service and
# only setup the /readiness HTTP server once that's available.
initialDelaySeconds: 3
timeoutSeconds: 5
args:
- --domain=cluster.local.
- --dns-port=10053
- --config-dir=/kube-dns-config
- --v=2
env:
- name: PROMETHEUS_PORT
value: "10055"
ports:
- containerPort: 10053
name: dns-local
protocol: UDP
- containerPort: 10053
name: dns-tcp-local
protocol: TCP
- containerPort: 10055
name: metrics
protocol: TCP
volumeMounts:
- name: kube-dns-config
mountPath: /kube-dns-config
- name: dnsmasq
image: eu.gcr.io/google_containers/k8s-dns-dnsmasq-nanny-amd64:1.14.5
livenessProbe:
httpGet:
path: /healthcheck/dnsmasq
port: 10054
scheme: HTTP
initialDelaySeconds: 60
timeoutSeconds: 5
successThreshold: 1
failureThreshold: 5
args:
- -v=2
- -logtostderr
- -configDir=/etc/k8s/dns/dnsmasq-nanny
- -restartDnsmasq=true
- --
- -k
- --cache-size=1000
- --log-facility=-
- --server=/cluster.local/127.0.0.1#10053
- --server=/in-addr.arpa/127.0.0.1#10053
- --server=/ip6.arpa/127.0.0.1#10053
ports:
- containerPort: 53
name: dns
protocol: UDP
- containerPort: 53
name: dns-tcp
protocol: TCP
# see: https://github.com/kubernetes/kubernetes/issues/29055 for details
resources:
requests:
cpu: 150m
memory: 20Mi
volumeMounts:
- name: kube-dns-config
mountPath: /etc/k8s/dns/dnsmasq-nanny
- name: sidecar
image: eu.gcr.io/google_containers/k8s-dns-sidecar-amd64:1.14.5
livenessProbe:
httpGet:
path: /metrics
port: 10054
scheme: HTTP
initialDelaySeconds: 60
timeoutSeconds: 5
successThreshold: 1
failureThreshold: 5
args:
- --v=2
- --logtostderr
- --probe=kubedns,127.0.0.1:10053,kubernetes.default.svc.cluster.local,5,A
- --probe=dnsmasq,127.0.0.1:53,kubernetes.default.svc.cluster.local,5,A
ports:
- containerPort: 10054
name: metrics
protocol: TCP
resources:
requests:
memory: 20Mi
cpu: 10m
dnsPolicy: Default # Don't use cluster DNS.
serviceAccountName: kube-dns
================= /etc/kubernetes/addons/dns/kubedns-sa.yaml ==================
apiVersion: v1
kind: ServiceAccount
metadata:
name: kube-dns
namespace: kube-system
labels:
kubernetes.io/cluster-service: "true"
addonmanager.kubernetes.io/mode: Reconcile
================= /etc/kubernetes/addons/dns/kubedns-svc.yaml ==================
# Copyright 2016 The Kubernetes Authors.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# Warning: This is a file generated from the base underscore template file: kubedns-svc.yaml.base
apiVersion: v1
kind: Service
metadata:
name: kube-dns
namespace: kube-system
labels:
k8s-app: kube-dns
kubernetes.io/cluster-service: "true"
addonmanager.kubernetes.io/mode: Reconcile
kubernetes.io/name: "KubeDNS"
spec:
selector:
k8s-app: kube-dns
clusterIP: 10.59.240.10
ports:
- name: dns
port: 53
protocol: UDP
- name: dns-tcp
port: 53
protocol: TCP
================= /etc/kubernetes/addons/fluentd-gcp/event-exporter.yaml ==================
apiVersion: v1
kind: ServiceAccount
metadata:
name: event-exporter-sa
namespace: kube-system
labels:
k8s-app: event-exporter
kubernetes.io/cluster-service: "true"
addonmanager.kubernetes.io/mode: Reconcile
---
apiVersion: rbac.authorization.k8s.io/v1beta1
kind: ClusterRoleBinding
metadata:
name: event-exporter-rb
namespace: kube-system
labels:
k8s-app: event-exporter
kubernetes.io/cluster-service: "true"
addonmanager.kubernetes.io/mode: Reconcile
roleRef:
apiGroup: rbac.authorization.k8s.io
kind: ClusterRole
name: view
subjects:
- kind: ServiceAccount
name: event-exporter-sa
namespace: kube-system
---
apiVersion: apps/v1beta1
kind: Deployment
metadata:
name: event-exporter-v0.1.7
namespace: kube-system
labels:
k8s-app: event-exporter
version: v0.1.7
kubernetes.io/cluster-service: "true"
addonmanager.kubernetes.io/mode: Reconcile
spec:
replicas: 1
template:
metadata:
labels:
k8s-app: event-exporter
version: v0.1.7
spec:
serviceAccountName: event-exporter-sa
containers:
- name: event-exporter
image: gcr.io/google-containers/event-exporter:v0.1.7
command:
- '/event-exporter'
- name: prometheus-to-sd-exporter
image: gcr.io/google-containers/prometheus-to-sd:v0.2.1
command:
- /monitor
- --component=event_exporter
- --stackdriver-prefix=container.googleapis.com/internal/addons
- --whitelisted-metrics=stackdriver_sink_received_entry_count,stackdriver_sink_request_count,stackdriver_sink_successfully_sent_entry_count
volumeMounts:
- name: ssl-certs
mountPath: /etc/ssl/certs
terminationGracePeriodSeconds: 30
volumes:
- name: ssl-certs
hostPath:
path: /etc/ssl/certs
================= /etc/kubernetes/addons/fluentd-gcp/fluentd-gcp-configmap.yaml ==================
kind: ConfigMap
apiVersion: v1
data:
containers.input.conf: |-
# This configuration file for Fluentd is used
# to watch changes to Docker log files that live in the
# directory /var/lib/docker/containers/ and are symbolically
# linked to from the /var/log/containers directory using names that capture the
# pod name and container name. These logs are then submitted to
# Google Cloud Logging which assumes the installation of the cloud-logging plug-in.
#
# Example
# =======
# A line in the Docker log file might look like this JSON:
#
# {"log":"2014/09/25 21:15:03 Got request with path wombat\\n",
# "stream":"stderr",
# "time":"2014-09-25T21:15:03.499185026Z"}
#
# The record reformer is used to write the tag to focus on the pod name
# and the Kubernetes container name. For example a Docker container's logs
# might be in the directory:
# /var/lib/docker/containers/997599971ee6366d4a5920d25b79286ad45ff37a74494f262e3bc98d909d0a7b
# and in the file:
# 997599971ee6366d4a5920d25b79286ad45ff37a74494f262e3bc98d909d0a7b-json.log
# where 997599971ee6... is the Docker ID of the running container.
# The Kubernetes kubelet makes a symbolic link to this file on the host machine
# in the /var/log/containers directory which includes the pod name and the Kubernetes
# container name:
# synthetic-logger-0.25lps-pod_default-synth-lgr-997599971ee6366d4a5920d25b79286ad45ff37a74494f262e3bc98d909d0a7b.log
# ->
# /var/lib/docker/containers/997599971ee6366d4a5920d25b79286ad45ff37a74494f262e3bc98d909d0a7b/997599971ee6366d4a5920d25b79286ad45ff37a74494f262e3bc98d909d0a7b-json.log
# The /var/log directory on the host is mapped to the /var/log directory in the container
# running this instance of Fluentd and we end up collecting the file:
# /var/log/containers/synthetic-logger-0.25lps-pod_default-synth-lgr-997599971ee6366d4a5920d25b79286ad45ff37a74494f262e3bc98d909d0a7b.log
# This results in the tag:
# var.log.containers.synthetic-logger-0.25lps-pod_default-synth-lgr-997599971ee6366d4a5920d25b79286ad45ff37a74494f262e3bc98d909d0a7b.log
# The record reformer is used is discard the var.log.containers prefix and
# the Docker container ID suffix and "kubernetes." is pre-pended giving the tag:
# kubernetes.synthetic-logger-0.25lps-pod_default-synth-lgr
# Tag is then parsed by google_cloud plugin and translated to the metadata,
# visible in the log viewer
# Example:
# {"log":"[info:2016-02-16T16:04:05.930-08:00] Some log text here\n","stream":"stdout","time":"2016-02-17T00:04:05.931087621Z"}
<source>
type tail
format json
time_key time
path /var/log/containers/*.log
pos_file /var/log/gcp-containers.log.pos
time_format %Y-%m-%dT%H:%M:%S.%N%Z
tag reform.*
read_from_head true
</source>
<filter reform.**>
type parser
format /^(?<severity>\w)(?<time>\d{4} [^\s]*)\s+(?<pid>\d+)\s+(?<source>[^ \]]+)\] (?<log>.*)/
reserve_data true
suppress_parse_error_log true
key_name log
</filter>
<match reform.**>
type record_reformer
enable_ruby true
tag raw.kubernetes.${tag_suffix[4].split('-')[0..-2].join('-')}
</match>
# Detect exceptions in the log output and forward them as one log entry.
<match raw.kubernetes.**>
@type detect_exceptions
remove_tag_prefix raw
message log
stream stream
multiline_flush_interval 5
max_bytes 500000
max_lines 1000
</match>
system.input.conf: |-
# Example:
# 2015-12-21 23:17:22,066 [salt.state ][INFO ] Completed state [net.ipv4.ip_forward] at time 23:17:22.066081
<source>
type tail
format /^(?<time>[^ ]* [^ ,]*)[^\[]*\[[^\]]*\]\[(?<severity>[^ \]]*) *\] (?<message>.*)$/
time_format %Y-%m-%d %H:%M:%S
path /var/log/salt/minion
pos_file /var/log/gcp-salt.pos
tag salt
</source>
# Example:
# Dec 21 23:17:22 gke-foo-1-1-4b5cbd14-node-4eoj startupscript: Finished running startup script /var/run/google.startup.script
<source>
type tail
format syslog
path /var/log/startupscript.log
pos_file /var/log/gcp-startupscript.log.pos
tag startupscript
</source>
# Examples:
# time="2016-02-04T06:51:03.053580605Z" level=info msg="GET /containers/json"
# time="2016-02-04T07:53:57.505612354Z" level=error msg="HTTP Error" err="No such image: -f" statusCode=404
<source>
type tail
format /^time="(?<time>[^)]*)" level=(?<severity>[^ ]*) msg="(?<message>[^"]*)"( err="(?<error>[^"]*)")?( statusCode=($<status_code>\d+))?/
path /var/log/docker.log
pos_file /var/log/gcp-docker.log.pos
tag docker
</source>
# Example:
# 2016/02/04 06:52:38 filePurge: successfully removed file /var/etcd/data/member/wal/00000000000006d0-00000000010a23d1.wal
<source>
type tail
# Not parsing this, because it doesn't have anything particularly useful to
# parse out of it (like severities).
format none
path /var/log/etcd.log
pos_file /var/log/gcp-etcd.log.pos
tag etcd
</source>
# Multi-line parsing is required for all the kube logs because very large log
# statements, such as those that include entire object bodies, get split into
# multiple lines by glog.
# Example:
# I0204 07:32:30.020537 3368 server.go:1048] POST /stats/container/: (13.972191ms) 200 [[Go-http-client/1.1] 10.244.1.3:40537]
<source>
type tail
format multiline
multiline_flush_interval 5s
format_firstline /^\w\d{4}/
format1 /^(?<severity>\w)(?<time>\d{4} [^\s]*)\s+(?<pid>\d+)\s+(?<source>[^ \]]+)\] (?<message>.*)/
time_format %m%d %H:%M:%S.%N
path /var/log/kubelet.log
pos_file /var/log/gcp-kubelet.log.pos
tag kubelet
</source>
# Example:
# I1118 21:26:53.975789 6 proxier.go:1096] Port "nodePort for kube-system/default-http-backend:http" (:31429/tcp) was open before and is still needed
<source>
type tail
format multiline
multiline_flush_interval 5s
format_firstline /^\w\d{4}/
format1 /^(?<severity>\w)(?<time>\d{4} [^\s]*)\s+(?<pid>\d+)\s+(?<source>[^ \]]+)\] (?<message>.*)/
time_format %m%d %H:%M:%S.%N
path /var/log/kube-proxy.log
pos_file /var/log/gcp-kube-proxy.log.pos
tag kube-proxy
</source>
# Example:
# I0204 07:00:19.604280 5 handlers.go:131] GET /api/v1/nodes: (1.624207ms) 200 [[kube-controller-manager/v1.1.3 (linux/amd64) kubernetes/6a81b50] 127.0.0.1:38266]
<source>
type tail
format multiline
multiline_flush_interval 5s
format_firstline /^\w\d{4}/
format1 /^(?<severity>\w)(?<time>\d{4} [^\s]*)\s+(?<pid>\d+)\s+(?<source>[^ \]]+)\] (?<message>.*)/
time_format %m%d %H:%M:%S.%N
path /var/log/kube-apiserver.log
pos_file /var/log/gcp-kube-apiserver.log.pos
tag kube-apiserver
</source>
# Example:
# 2017-02-09T00:15:57.992775796Z AUDIT: id="90c73c7c-97d6-4b65-9461-f94606ff825f" ip="104.132.1.72" method="GET" user="kubecfg" as="<self>" asgroups="<lookup>" namespace="default" uri="/api/v1/namespaces/default/pods"
# 2017-02-09T00:15:57.993528822Z AUDIT: id="90c73c7c-97d6-4b65-9461-f94606ff825f" response="200"
<source>
type tail
format multiline
multiline_flush_interval 5s
format_firstline /^\S+\s+AUDIT:/
# Fields must be explicitly captured by name to be parsed into the record.
# Fields may not always be present, and order may change, so this just looks
# for a list of key="\"quoted\" value" pairs separated by spaces.
# Unknown fields are ignored.
# Note: We can't separate query/response lines as format1/format2 because
# they don't always come one after the other for a given query.
# TODO: Maybe add a JSON output mode to audit log so we can get rid of this?
format1 /^(?<time>\S+) AUDIT:(?: (?:id="(?<id>(?:[^"\\]|\\.)*)"|ip="(?<ip>(?:[^"\\]|\\.)*)"|method="(?<method>(?:[^"\\]|\\.)*)"|user="(?<user>(?:[^"\\]|\\.)*)"|groups="(?<groups>(?:[^"\\]|\\.)*)"|as="(?<as>(?:[^"\\]|\\.)*)"|asgroups="(?<asgroups>(?:[^"\\]|\\.)*)"|namespace="(?<namespace>(?:[^"\\]|\\.)*)"|uri="(?<uri>(?:[^"\\]|\\.)*)"|response="(?<response>(?:[^"\\]|\\.)*)"|\w+="(?:[^"\\]|\\.)*"))*/
time_format %FT%T.%L%Z
path /var/log/kube-apiserver-audit.log
pos_file /var/log/gcp-kube-apiserver-audit.log.pos
tag kube-apiserver-audit
</source>
# Example:
# I0204 06:55:31.872680 5 servicecontroller.go:277] LB already exists and doesn't need update for service kube-system/kube-ui
<source>
type tail
format multiline
multiline_flush_interval 5s
format_firstline /^\w\d{4}/
format1 /^(?<severity>\w)(?<time>\d{4} [^\s]*)\s+(?<pid>\d+)\s+(?<source>[^ \]]+)\] (?<message>.*)/
time_format %m%d %H:%M:%S.%N
path /var/log/kube-controller-manager.log
pos_file /var/log/gcp-kube-controller-manager.log.pos
tag kube-controller-manager
</source>
# Example:
# W0204 06:49:18.239674 7 reflector.go:245] pkg/scheduler/factory/factory.go:193: watch of *api.Service ended with: 401: The event in requested index is outdated and cleared (the requested history has been cleared [2578313/2577886]) [2579312]
<source>
type tail
format multiline
multiline_flush_interval 5s
format_firstline /^\w\d{4}/
format1 /^(?<severity>\w)(?<time>\d{4} [^\s]*)\s+(?<pid>\d+)\s+(?<source>[^ \]]+)\] (?<message>.*)/
time_format %m%d %H:%M:%S.%N
path /var/log/kube-scheduler.log
pos_file /var/log/gcp-kube-scheduler.log.pos
tag kube-scheduler
</source>
# Example:
# I1104 10:36:20.242766 5 rescheduler.go:73] Running Rescheduler
<source>
type tail
format multiline
multiline_flush_interval 5s
format_firstline /^\w\d{4}/
format1 /^(?<severity>\w)(?<time>\d{4} [^\s]*)\s+(?<pid>\d+)\s+(?<source>[^ \]]+)\] (?<message>.*)/
time_format %m%d %H:%M:%S.%N
path /var/log/rescheduler.log
pos_file /var/log/gcp-rescheduler.log.pos
tag rescheduler
</source>
# Example:
# I0603 15:31:05.793605 6 cluster_manager.go:230] Reading config from path /etc/gce.conf
<source>
type tail
format multiline
multiline_flush_interval 5s
format_firstline /^\w\d{4}/
format1 /^(?<severity>\w)(?<time>\d{4} [^\s]*)\s+(?<pid>\d+)\s+(?<source>[^ \]]+)\] (?<message>.*)/
time_format %m%d %H:%M:%S.%N
path /var/log/glbc.log
pos_file /var/log/gcp-glbc.log.pos
tag glbc
</source>
# Example:
# I0603 15:31:05.793605 6 cluster_manager.go:230] Reading config from path /etc/gce.conf
<source>
type tail
format multiline
multiline_flush_interval 5s
format_firstline /^\w\d{4}/
format1 /^(?<severity>\w)(?<time>\d{4} [^\s]*)\s+(?<pid>\d+)\s+(?<source>[^ \]]+)\] (?<message>.*)/
time_format %m%d %H:%M:%S.%N
path /var/log/cluster-autoscaler.log
pos_file /var/log/gcp-cluster-autoscaler.log.pos
tag cluster-autoscaler
</source>
# Logs from systemd-journal for interesting services.
<source>
type systemd
filters [{ "_SYSTEMD_UNIT": "docker.service" }]
pos_file /var/log/gcp-journald-docker.pos
read_from_head true
tag docker
</source>
<source>
type systemd
filters [{ "_SYSTEMD_UNIT": "kubelet.service" }]
pos_file /var/log/gcp-journald-kubelet.pos
read_from_head true
tag kubelet
</source>
monitoring.conf: |-
# Prometheus monitoring
<source>
@type prometheus
port 31337
</source>
<source>
@type prometheus_monitor
</source>
# This source is used to acquire approximate process start timestamp,
# which purpose is explained before the corresponding output plugin.
<source>
@type exec
command /bin/sh -c 'date +%s'
tag process_start
time_format %Y-%m-%d %H:%M:%S
keys process_start_timestamp
</source>
# This filter is used to convert process start timestamp to integer
# value for correct ingestion in the prometheus output plugin.
<filter process_start>
@type record_transformer
enable_ruby true
auto_typecast true
<record>
process_start_timestamp ${record["process_start_timestamp"].to_i}
</record>
</filter>
output.conf: |-
# This match is placed before the all-matching output to provide metric
# exporter with a process start timestamp for correct exporting of
# cumulative metrics to Stackdriver.
<match process_start>
@type prometheus
<metric>
type gauge
name process_start_time_seconds
desc Timestamp of the process start in seconds
key process_start_timestamp
</metric>
</match>
# TODO(instrumentation): Reconsider this workaround later.
# Trim the entries which exceed slightly less than 100KB, to avoid
# dropping them. It is a necessity, because Stackdriver only supports
# entries that are up to 100KB in size.
<filter kubernetes.**>
@type record_transformer
enable_ruby true
<record>
log ${record['log'].length > 100000 ? "[Trimmed]#{record['log'][0..100000]}..." : record['log']}
</record>
</filter>
# This filter allows to count the number of log entries read by fluentd
# before they are processed by the output plugin. This in turn allows to
# monitor the number of log entries that were read but never sent, e.g.
# because of liveness probe removing buffer.
<filter **>
@type prometheus
<metric>
type counter
name logging_entry_count
desc Total number of log entries generated by either application containers or system components
</metric>
</filter>
# We use 2 output stanzas - one to handle the container logs and one to handle
# the node daemon logs, the latter of which explicitly sends its logs to the
# compute.googleapis.com service rather than container.googleapis.com to keep
# them separate since most users don't care about the node logs.
<match kubernetes.**>
@type google_cloud
# Try to detect JSON formatted log entries.
detect_json true
# Collect metrics in Prometheus registry about plugin activity.
enable_monitoring true
monitoring_type prometheus
# Set the buffer type to file to improve the reliability and reduce the memory consumption
buffer_type file
buffer_path /var/log/fluentd-buffers/kubernetes.containers.buffer
# Set queue_full action to block because we want to pause gracefully
# in case of the off-the-limits load instead of throwing an exception
buffer_queue_full_action block
# Set the chunk limit conservatively to avoid exceeding the recommended
# chunk size of 5MB per write request.
buffer_chunk_limit 1M
# Cap the combined memory usage of this buffer and the one below to
# 1MiB/chunk * (6 + 2) chunks = 8 MiB
buffer_queue_limit 6
# Never wait more than 5 seconds before flushing logs in the non-error case.
flush_interval 5s
# Never wait longer than 30 seconds between retries.
max_retry_wait 30
# Disable the limit on the number of retries (retry forever).
disable_retry_limit
# Use multiple threads for processing.
num_threads 2
</match>
# Keep a smaller buffer here since these logs are less important than the user's
# container logs.
<match **>
@type google_cloud
detect_json true
enable_monitoring true
monitoring_type prometheus
detect_subservice false
buffer_type file
buffer_path /var/log/fluentd-buffers/kubernetes.system.buffer
buffer_queue_full_action block
buffer_chunk_limit 1M
buffer_queue_limit 2
flush_interval 5s
max_retry_wait 30
disable_retry_limit
num_threads 2
</match>
metadata:
name: fluentd-gcp-config-v1.1.3
namespace: kube-system
labels:
addonmanager.kubernetes.io/mode: Reconcile
================= /etc/kubernetes/addons/fluentd-gcp/fluentd-gcp-ds.yaml ==================
apiVersion: extensions/v1beta1
kind: DaemonSet
metadata:
name: fluentd-gcp-v2.0.9
namespace: kube-system
labels:
k8s-app: fluentd-gcp
kubernetes.io/cluster-service: "true"
addonmanager.kubernetes.io/mode: Reconcile
version: v2.0.9
spec:
updateStrategy:
type: RollingUpdate
template:
metadata:
labels:
k8s-app: fluentd-gcp
kubernetes.io/cluster-service: "true"
version: v2.0.9
# This annotation ensures that fluentd does not get evicted if the node
# supports critical pod annotation based priority scheme.
# Note that this does not guarantee admission on the nodes (#40573).
annotations:
scheduler.alpha.kubernetes.io/critical-pod: ''
spec:
dnsPolicy: Default
hostNetwork: true
containers:
- name: fluentd-gcp
image: gcr.io/google-containers/fluentd-gcp:2.0.9
env:
- name: FLUENTD_ARGS
value: --no-supervisor -q
resources:
limits:
memory: 300Mi
requests:
cpu: 100m
memory: 200Mi
volumeMounts:
- name: varlog
mountPath: /var/log
- name: varlibdockercontainers
mountPath: /var/lib/docker/containers
readOnly: true
- name: libsystemddir
mountPath: /host/lib
readOnly: true
- name: config-volume
mountPath: /etc/fluent/config.d
# Liveness probe is aimed to help in situarions where fluentd
# silently hangs for no apparent reasons until manual restart.
# The idea of this probe is that if fluentd is not queueing or
# flushing chunks for 5 minutes, something is not right. If
# you want to change the fluentd configuration, reducing amount of
# logs fluentd collects, consider changing the threshold or turning
# liveness probe off completely.
livenessProbe:
initialDelaySeconds: 600
periodSeconds: 60
exec:
command:
- '/bin/sh'
- '-c'
- >
LIVENESS_THRESHOLD_SECONDS=${LIVENESS_THRESHOLD_SECONDS:-300};
STUCK_THRESHOLD_SECONDS=${LIVENESS_THRESHOLD_SECONDS:-900};
if [ ! -e /var/log/fluentd-buffers ];
then
exit 1;
fi;
LAST_MODIFIED_DATE=`stat /var/log/fluentd-buffers | grep Modify | sed -r "s/Modify: (.*)/\1/"`;
LAST_MODIFIED_TIMESTAMP=`date -d "$LAST_MODIFIED_DATE" +%s`;
if [ `date +%s` -gt `expr $LAST_MODIFIED_TIMESTAMP + $STUCK_THRESHOLD_SECONDS` ];
then
rm -rf /var/log/fluentd-buffers;
exit 1;
fi;
if [ `date +%s` -gt `expr $LAST_MODIFIED_TIMESTAMP + $LIVENESS_THRESHOLD_SECONDS` ];
then
exit 1;
fi;
- name: prometheus-to-sd-exporter
image: gcr.io/google-containers/prometheus-to-sd:v0.1.3
command:
- /monitor
- --component=fluentd
- --target-port=31337
- --stackdriver-prefix=container.googleapis.com/internal/addons
- --whitelisted-metrics=stackdriver_successful_requests_count,stackdriver_failed_requests_count,stackdriver_ingested_entries_count,stackdriver_dropped_entries_count
volumeMounts:
- name: ssl-certs
mountPath: /etc/ssl/certs
nodeSelector:
beta.kubernetes.io/fluentd-ds-ready: "true"
tolerations:
- key: "node.alpha.kubernetes.io/ismaster"
effect: "NoSchedule"
- operator: "Exists"
effect: "NoExecute"
#TODO: remove this toleration once #44445 is properly fixed.
- operator: "Exists"
effect: "NoSchedule"
terminationGracePeriodSeconds: 30
volumes:
- name: varlog
hostPath:
path: /var/log
- name: varlibdockercontainers
hostPath:
path: /var/lib/docker/containers
- name: libsystemddir
hostPath:
path: /usr/lib64
- name: config-volume
configMap:
name: fluentd-gcp-config-v1.1.3
- name: ssl-certs
hostPath:
path: /etc/ssl/certs
================= /etc/kubernetes/addons/node-problem-detector/standalone/npd-binding.yaml ==================
apiVersion: rbac.authorization.k8s.io/v1beta1
kind: ClusterRoleBinding
metadata:
name: npd-binding
labels:
kubernetes.io/cluster-service: "true"
addonmanager.kubernetes.io/mode: Reconcile
roleRef:
apiGroup: rbac.authorization.k8s.io
kind: ClusterRole
name: system:node-problem-detector
subjects:
- apiGroup: rbac.authorization.k8s.io
kind: User
name: system:node-problem-detector
================= /etc/kubernetes/addons/rbac/kubelet-api-admin-role.yaml ==================
# This role allows full access to the kubelet API
apiVersion: rbac.authorization.k8s.io/v1beta1
kind: ClusterRole
metadata:
name: kubelet-api-admin
labels:
kubernetes.io/cluster-service: "true"
addonmanager.kubernetes.io/mode: Reconcile
rules:
- apiGroups:
- ""
resources:
- nodes/proxy
- nodes/log
- nodes/stats
- nodes/metrics
- nodes/spec
verbs:
- "*"
================= /etc/kubernetes/addons/rbac/kubelet-binding.yaml ==================
# The GKE environments don't have kubelets with certificates that
# identify the system:nodes group. They use the kubelet identity
# TODO: remove this once new nodes are granted individual identities and the
# NodeAuthorizer is enabled.
apiVersion: rbac.authorization.k8s.io/v1beta1
kind: ClusterRoleBinding
metadata:
name: kubelet-cluster-admin
labels:
kubernetes.io/cluster-service: "true"
addonmanager.kubernetes.io/mode: Reconcile
roleRef:
apiGroup: rbac.authorization.k8s.io
kind: ClusterRole
name: system:node
subjects:
- apiGroup: rbac.authorization.k8s.io
kind: User
name: kubelet
================= /etc/kubernetes/addons/rbac/kubelet-certificate-management.yaml ==================
apiVersion: rbac.authorization.k8s.io/v1beta1
kind: ClusterRoleBinding
metadata:
name: gce:beta:kubelet-certificate-bootstrap
labels:
kubernetes.io/cluster-service: "true"
addonmanager.kubernetes.io/mode: Reconcile
roleRef:
apiGroup: rbac.authorization.k8s.io
kind: ClusterRole
name: gce:beta:kubelet-certificate-bootstrap
subjects:
- apiGroup: rbac.authorization.k8s.io
kind: User
name: kubelet
---
apiVersion: rbac.authorization.k8s.io/v1beta1
kind: ClusterRoleBinding
metadata:
name: gce:beta:kubelet-certificate-rotation
labels:
kubernetes.io/cluster-service: "true"
addonmanager.kubernetes.io/mode: Reconcile
roleRef:
apiGroup: rbac.authorization.k8s.io
kind: ClusterRole
name: gce:beta:kubelet-certificate-rotation
subjects:
- apiGroup: rbac.authorization.k8s.io
kind: Group
name: system:nodes
---
apiVersion: rbac.authorization.k8s.io/v1beta1
kind: ClusterRole
metadata:
name: gce:beta:kubelet-certificate-bootstrap
labels:
kubernetes.io/cluster-service: "true"
addonmanager.kubernetes.io/mode: Reconcile
rules:
- apiGroups:
- "certificates.k8s.io"
resources:
- certificatesigningrequests/nodeclient
verbs:
- "create"
---
apiVersion: rbac.authorization.k8s.io/v1beta1
kind: ClusterRole
metadata:
name: gce:beta:kubelet-certificate-rotation
labels:
kubernetes.io/cluster-service: "true"
addonmanager.kubernetes.io/mode: Reconcile
rules:
- apiGroups:
- "certificates.k8s.io"
resources:
- certificatesigningrequests/selfnodeclient
- certificatesigningrequests/selfnodeserver
verbs:
- "create"
================= /etc/kubernetes/addons/rbac/kube-apiserver-kubelet-api-admin-binding.yaml ==================
# This binding gives the kube-apiserver user full access to the kubelet API
apiVersion: rbac.authorization.k8s.io/v1beta1
kind: ClusterRoleBinding
metadata:
name: kube-apiserver-kubelet-api-admin
labels:
kubernetes.io/cluster-service: "true"
addonmanager.kubernetes.io/mode: Reconcile
roleRef:
apiGroup: rbac.authorization.k8s.io
kind: ClusterRole
name: kubelet-api-admin
subjects:
- apiGroup: rbac.authorization.k8s.io
kind: User
name: kube-apiserver
================= /etc/kubernetes/addons/storage-class/gce/default.yaml ==================
apiVersion: storage.k8s.io/v1beta1
kind: StorageClass
metadata:
name: standard
annotations:
storageclass.beta.kubernetes.io/is-default-class: "true"
labels:
kubernetes.io/cluster-service: "true"
addonmanager.kubernetes.io/mode: EnsureExists
provisioner: kubernetes.io/gce-pd
parameters:
type: pd-standard
================= /etc/kubernetes/admission-controls/limit-range/limit-range.yaml ==================
apiVersion: "v1"
kind: "LimitRange"
metadata:
name: "limits"
namespace: default
spec:
limits:
- type: "Container"
defaultRequest:
cpu: "100m"
================= /etc/kubernetes/manifests/etcd.manifest ==================
{
"apiVersion": "v1",
"kind": "Pod",
"metadata": {
"name":"etcd-server",
"namespace": "kube-system",
"annotations": {
"scheduler.alpha.kubernetes.io/critical-pod": ""
}
},
"spec":{
"hostNetwork": true,
"containers":[
{
"name": "etcd-container",
"image": "eu.gcr.io/google_containers/etcd:3.0.17-gke.2",
"resources": {
"requests": {
"cpu": "200m"
}
},
"command": [
"/bin/sh",
"-c",
"if [ -e /usr/local/bin/migrate-if-needed.sh ]; then /usr/local/bin/migrate-if-needed.sh 1>>/var/log/etcd.log 2>&1; fi; /usr/local/bin/etcd --name etcd-gke-970079080726-7a585d8e886f019430b8-8778-21b9-vm --listen-peer-urls http://gke-970079080726-7a585d8e886f019430b8-8778-21b9-vm:2380 --initial-advertise-peer-urls http://gke-970079080726-7a585d8e886f019430b8-8778-21b9-vm:2380 --advertise-client-urls http://127.0.0.1:2379 --listen-client-urls http://127.0.0.1:2379 --quota-backend-bytes=4294967296 --data-dir /var/etcd/data --initial-cluster-state new --initial-cluster etcd-gke-970079080726-7a585d8e886f019430b8-8778-21b9-vm=http://gke-970079080726-7a585d8e886f019430b8-8778-21b9-vm:2380 1>>/var/log/etcd.log 2>&1"
],
"env": [
{ "name": "TARGET_STORAGE",
"value": "etcd3"
},
{ "name": "TARGET_VERSION",
"value": "3.0.17"
},
{ "name": "DATA_DIRECTORY",
"value": "/var/etcd/data"
}
],
"livenessProbe": {
"httpGet": {
"host": "127.0.0.1",
"port": 2379,
"path": "/health"
},
"initialDelaySeconds": 15,
"timeoutSeconds": 15
},
"ports": [
{ "name": "serverport",
"containerPort": 2380,
"hostPort": 2380
},
{ "name": "clientport",
"containerPort": 2379,
"hostPort": 2379
}
],
"volumeMounts": [
{ "name": "varetcd",
"mountPath": "/var/etcd",
"readOnly": false
},
{ "name": "varlogetcd",
"mountPath": "/var/log/etcd.log",
"readOnly": false
},
{ "name": "etc",
"mountPath": "/etc/srv/kubernetes",
"readOnly": false
}
]
}
],
"volumes":[
{ "name": "varetcd",
"hostPath": {
"path": "/mnt/disks/master-pd/var/etcd"}
},
{ "name": "varlogetcd",
"hostPath": {
"path": "/var/log/etcd.log"}
},
{ "name": "etc",
"hostPath": {
"path": "/etc/srv/kubernetes"}
}
]
}}
================= /etc/kubernetes/manifests/etcd-empty-dir-cleanup.yaml ==================
apiVersion: v1
kind: Pod
metadata:
name: etcd-empty-dir-cleanup
namespace: kube-system
annotations:
scheduler.alpha.kubernetes.io/critical-pod: ''
labels:
k8s-app: etcd-empty-dir-cleanup
spec:
hostNetwork: true
dnsPolicy: Default
containers:
- name: etcd-empty-dir-cleanup
image: gcr.io/google-containers/etcd-empty-dir-cleanup:3.0.14.0
================= /etc/kubernetes/manifests/etcd-events.manifest ==================
{
"apiVersion": "v1",
"kind": "Pod",
"metadata": {
"name":"etcd-server-events",
"namespace": "kube-system",
"annotations": {
"scheduler.alpha.kubernetes.io/critical-pod": ""
}
},
"spec":{
"hostNetwork": true,
"containers":[
{
"name": "etcd-container",
"image": "eu.gcr.io/google_containers/etcd:3.0.17-gke.2",
"resources": {
"requests": {
"cpu": "100m"
}
},
"command": [
"/bin/sh",
"-c",
"if [ -e /usr/local/bin/migrate-if-needed.sh ]; then /usr/local/bin/migrate-if-needed.sh 1>>/var/log/etcd-events.log 2>&1; fi; /usr/local/bin/etcd --name etcd-gke-970079080726-7a585d8e886f019430b8-8778-21b9-vm --listen-peer-urls http://gke-970079080726-7a585d8e886f019430b8-8778-21b9-vm:2381 --initial-advertise-peer-urls http://gke-970079080726-7a585d8e886f019430b8-8778-21b9-vm:2381 --advertise-client-urls http://127.0.0.1:4002 --listen-client-urls http://127.0.0.1:4002 --quota-backend-bytes=4294967296 --data-dir /var/etcd/data-events --initial-cluster-state new --initial-cluster etcd-gke-970079080726-7a585d8e886f019430b8-8778-21b9-vm=http://gke-970079080726-7a585d8e886f019430b8-8778-21b9-vm:2381 1>>/var/log/etcd-events.log 2>&1"
],
"env": [
{ "name": "TARGET_STORAGE",
"value": "etcd3"
},
{ "name": "TARGET_VERSION",
"value": "3.0.17"
},
{ "name": "DATA_DIRECTORY",
"value": "/var/etcd/data-events"
}
],
"livenessProbe": {
"httpGet": {
"host": "127.0.0.1",
"port": 4002,
"path": "/health"
},
"initialDelaySeconds": 15,
"timeoutSeconds": 15
},
"ports": [
{ "name": "serverport",
"containerPort": 2381,
"hostPort": 2381
},
{ "name": "clientport",
"containerPort": 4002,
"hostPort": 4002
}
],
"volumeMounts": [
{ "name": "varetcd",
"mountPath": "/var/etcd",
"readOnly": false
},
{ "name": "varlogetcd",
"mountPath": "/var/log/etcd-events.log",
"readOnly": false
},
{ "name": "etc",
"mountPath": "/etc/srv/kubernetes",
"readOnly": false
}
]
}
],
"volumes":[
{ "name": "varetcd",
"hostPath": {
"path": "/mnt/disks/master-pd/var/etcd"}
},
{ "name": "varlogetcd",
"hostPath": {
"path": "/var/log/etcd-events.log"}
},
{ "name": "etc",
"hostPath": {
"path": "/etc/srv/kubernetes"}
}
]
}}
================= /etc/kubernetes/manifests/glbc.manifest ==================
apiVersion: v1
kind: Pod
metadata:
name: l7-lb-controller-v0.9.6
namespace: kube-system
annotations:
scheduler.alpha.kubernetes.io/critical-pod: ''
labels:
k8s-app: glbc
version: v0.9.6
kubernetes.io/cluster-service: "true"
kubernetes.io/name: "GLBC"
spec:
terminationGracePeriodSeconds: 600
hostNetwork: true
containers:
- image: gcr.io/google_containers/glbc:0.9.6
livenessProbe:
httpGet:
path: /healthz
port: 8086
scheme: HTTP
initialDelaySeconds: 30
# healthz reaches out to GCE
periodSeconds: 30
timeoutSeconds: 15
successThreshold: 1
failureThreshold: 5
name: l7-lb-controller
volumeMounts:
- mountPath: /etc/gce.conf
name: cloudconfig
readOnly: true
- mountPath: /var/log/glbc.log
name: logfile
readOnly: false
resources:
# Request is set to accomodate this pod alongside the other
# master components on a single core master.
# TODO: Make resource requirements depend on the size of the cluster
requests:
cpu: 10m
memory: 50Mi
command:
# TODO: split this out into args when we no longer need to pipe stdout to a file #6428
- sh
- -c
- '/glbc --verbose=true --apiserver-host=http://localhost:8080 --default-backend-service=kube-system/default-http-backend --sync-period=600s --running-in-cluster=false --use-real-cloud=true --config-file-path=/etc/gce.conf --healthz-port=8086 1>>/var/log/glbc.log 2>&1'
volumes:
- hostPath:
path: /etc/gce.conf
name: cloudconfig
- hostPath:
path: /var/log/glbc.log
name: logfile
================= /etc/kubernetes/manifests/kube-addon-manager.yaml ==================
apiVersion: v1
kind: Pod
metadata:
name: kube-addon-manager
namespace: kube-system
annotations:
scheduler.alpha.kubernetes.io/critical-pod: ''
labels:
component: kube-addon-manager
spec:
hostNetwork: true
containers:
- name: kube-addon-manager
# When updating version also bump it in:
# - cluster/images/hyperkube/static-pods/addon-manager-singlenode.json
# - cluster/images/hyperkube/static-pods/addon-manager-multinode.json
# - test/kubemark/resources/manifests/kube-addon-manager.yaml
image: gcr.io/google-containers/kube-addon-manager:v6.4-beta.2
command:
- /bin/bash
- -c
- /opt/kube-addons.sh 1>>/var/log/kube-addon-manager.log 2>&1
resources:
requests:
cpu: 5m
memory: 50Mi
volumeMounts:
- mountPath: /etc/kubernetes/
name: addons
readOnly: true
- mountPath: /var/log
name: varlog
readOnly: false
volumes:
- hostPath:
path: /etc/kubernetes/
name: addons
- hostPath:
path: /var/log
name: varlog
================= /etc/kubernetes/manifests/kube-apiserver.manifest ==================
{
"apiVersion": "v1",
"kind": "Pod",
"metadata": {
"name":"kube-apiserver",
"namespace": "kube-system",
"annotations": {
"scheduler.alpha.kubernetes.io/critical-pod": ""
},
"labels": {
"tier": "control-plane",
"component": "kube-apiserver"
}
},
"spec":{
"hostNetwork": true,
"containers":[
{
"name": "kube-apiserver",
"image": "gcr.io/google_containers/kube-apiserver:v1.7.8-gke.0",
"resources": {
"requests": {
"cpu": "250m"
}
},
"command": [
"/bin/sh",
"-c",
"/usr/local/bin/kube-apiserver --v=2 --cloud-config=/etc/gce.conf --address=127.0.0.1 --allow-privileged=true --cloud-provider=gce --client-ca-file=/etc/srv/kubernetes/pki/ca-certificates.crt --etcd-servers=http://127.0.0.1:2379 --etcd-servers-overrides=/events#http://127.0.0.1:4002 --secure-port=443 --tls-cert-file=/etc/srv/kubernetes/pki/apiserver.crt --tls-private-key-file=/etc/srv/kubernetes/pki/apiserver.key --requestheader-client-ca-file=/etc/srv/kubernetes/pki/aggr_ca.crt --requestheader-allowed-names=aggregator --requestheader-extra-headers-prefix=X-Remote-Extra- --requestheader-group-headers=X-Remote-Group --requestheader-username-headers=X-Remote-User --proxy-client-cert-file=/etc/srv/kubernetes/pki/proxy_client.crt --proxy-client-key-file=/etc/srv/kubernetes/pki/proxy_client.key --enable-aggregator-routing=true --tls-cert-file=/etc/srv/kubernetes/pki/apiserver.crt --tls-private-key-file=/etc/srv/kubernetes/pki/apiserver.key --kubelet-client-certificate=/etc/srv/kubernetes/pki/apiserver-client.crt --kubelet-client-key=/etc/srv/kubernetes/pki/apiserver-client.key --service-account-key-file=/etc/srv/kubernetes/pki/serviceaccount.crt --token-auth-file=/etc/srv/kubernetes/known_tokens.csv --basic-auth-file=/etc/srv/kubernetes/basic_auth.csv --storage-backend=etcd3 --storage-media-type=application/vnd.kubernetes.protobuf --target-ram-mb=180 --service-cluster-ip-range=10.59.240.0/20 --admission-control=Initializers,NamespaceLifecycle,LimitRanger,ServiceAccount,PersistentVolumeLabel,DefaultStorageClass,DefaultTolerationSeconds,NodeRestriction,ResourceQuota --runtime-config=api/all=false,api/v1=true,autoscaling/v1=true,batch/v1=true,extensions/v1beta1=true,storage.k8s.io/v1beta1=true,apps/v1beta1=true,policy/v1beta1=true,authorization.k8s.io/v1beta1=true,rbac.authorization.k8s.io/v1beta1=true,storage.k8s.io/v1=true,certificates.k8s.io/v1beta1=true,extensions/v1beta1/podsecuritypolicies=false,authorization.k8s.io/v1=true,networking.k8s.io/v1=true,authentication.k8s.io/v1beta1=true,authentication.k8s.io/v1=true --advertise-address=35.189.81.22 --ssh-user=gke-7a585d8e886f019430b8 --ssh-keyfile=/etc/srv/sshproxy/.sshkeyfile --authentication-token-webhook-config-file=/etc/gcp_authn.config --authorization-policy-file=/etc/srv/kubernetes/abac-authz-policy.jsonl --authorization-webhook-config-file=/etc/gcp_authz.config --authorization-mode=Node,RBAC,ABAC,Webhook --allow-privileged=true 1>>/var/log/kube-apiserver.log 2>&1"
],
"livenessProbe": {
"httpGet": {
"host": "127.0.0.1",
"port": 8080,
"path": "/healthz"
},
"initialDelaySeconds": 15,
"timeoutSeconds": 15
},
"ports":[
{ "name": "https",
"containerPort": 443,
"hostPort": 443},{
"name": "local",
"containerPort": 8080,
"hostPort": 8080}
],
"volumeMounts": [
{"name": "cloudconfigmount","mountPath": "/etc/gce.conf", "readOnly": true},
{"name": "webhookconfigmount","mountPath": "/etc/gcp_authz.config", "readOnly": false},
{"name": "webhookauthnconfigmount","mountPath": "/etc/gcp_authn.config", "readOnly": false},
{ "name": "srvkube",
"mountPath": "/etc/srv/kubernetes",
"readOnly": true},
{ "name": "logfile",
"mountPath": "/var/log/kube-apiserver.log",
"readOnly": false},
{ "name": "auditlogfile",
"mountPath": "/var/log/kube-apiserver-audit.log",
"readOnly": false},
{ "name": "etcssl",
"mountPath": "/etc/ssl",
"readOnly": true},
{ "name": "usrsharecacerts",
"mountPath": "/usr/share/ca-certificates",
"readOnly": true},
{ "name": "varssl",
"mountPath": "/var/ssl",
"readOnly": true},
{ "name": "etcopenssl",
"mountPath": "/etc/openssl",
"readOnly": true},
{ "name": "etcpki",
"mountPath": "/etc/srv/pki",
"readOnly": true},
{ "name": "srvsshproxy",
"mountPath": "/etc/srv/sshproxy",
"readOnly": false}
]
}
],
"volumes":[
{"name": "cloudconfigmount","hostPath": {"path": "/etc/gce.conf"}},
{"name": "webhookconfigmount","hostPath": {"path": "/etc/gcp_authz.config"}},
{"name": "webhookauthnconfigmount","hostPath": {"path": "/etc/gcp_authn.config"}},
{ "name": "srvkube",
"hostPath": {
"path": "/etc/srv/kubernetes"}
},
{ "name": "logfile",
"hostPath": {
"path": "/var/log/kube-apiserver.log"}
},
{ "name": "auditlogfile",
"hostPath": {
"path": "/var/log/kube-apiserver-audit.log"}
},
{ "name": "etcssl",
"hostPath": {
"path": "/etc/ssl"}
},
{ "name": "usrsharecacerts",
"hostPath": {
"path": "/usr/share/ca-certificates"}
},
{ "name": "varssl",
"hostPath": {
"path": "/var/ssl"}
},
{ "name": "etcopenssl",
"hostPath": {
"path": "/etc/openssl"}
},
{ "name": "etcpki",
"hostPath": {
"path": "/etc/srv/pki"}
},
{ "name": "srvsshproxy",
"hostPath": {
"path": "/etc/srv/sshproxy"}
}
]
}}
================= /etc/kubernetes/manifests/kube-controller-manager.manifest ==================
{
"apiVersion": "v1",
"kind": "Pod",
"metadata": {
"name":"kube-controller-manager",
"namespace": "kube-system",
"annotations": {
"scheduler.alpha.kubernetes.io/critical-pod": ""
},
"labels": {
"tier": "control-plane",
"component": "kube-controller-manager"
}
},
"spec":{
"hostNetwork": true,
"containers":[
{
"name": "kube-controller-manager",
"image": "gcr.io/google_containers/kube-controller-manager:v1.7.8-gke.0",
"resources": {
"requests": {
"cpu": "200m"
}
},
"command": [
"/bin/sh",
"-c",
"/usr/local/bin/kube-controller-manager --v=2 --cloud-config=/etc/gce.conf --use-service-account-credentials --cloud-provider=gce --kubeconfig=/etc/srv/kubernetes/kube-controller-manager/kubeconfig --root-ca-file=/etc/srv/kubernetes/pki/ca-certificates.crt --service-account-private-key-file=/etc/srv/kubernetes/pki/serviceaccount.key --cluster-name=gke-gke-dawnchen-176-7a585d8e --cluster-cidr=10.56.0.0/14 --service-cluster-ip-range=10.59.240.0/20 --allocate-node-cidrs=true --terminated-pod-gc-threshold=1000 1>>/var/log/kube-controller-manager.log 2>&1"
],
"livenessProbe": {
"httpGet": {
"host": "127.0.0.1",
"port": 10252,
"path": "/healthz"
},
"initialDelaySeconds": 15,
"timeoutSeconds": 15
},
"volumeMounts": [
{"name": "cloudconfigmount","mountPath": "/etc/gce.conf", "readOnly": true},
{ "name": "srvkube",
"mountPath": "/etc/srv/kubernetes",
"readOnly": true},
{ "name": "logfile",
"mountPath": "/var/log/kube-controller-manager.log",
"readOnly": false},
{ "name": "etcssl",
"mountPath": "/etc/ssl",
"readOnly": true},
{ "name": "usrsharecacerts",
"mountPath": "/usr/share/ca-certificates",
"readOnly": true},
{ "name": "varssl",
"mountPath": "/var/ssl",
"readOnly": true},
{ "name": "etcopenssl",
"mountPath": "/etc/openssl",
"readOnly": true},
{ "name": "etcpki",
"mountPath": "/etc/pki",
"readOnly": true}
]
}
],
"volumes":[
{"name": "cloudconfigmount","hostPath": {"path": "/etc/gce.conf"}},
{ "name": "srvkube",
"hostPath": {
"path": "/etc/srv/kubernetes"}
},
{ "name": "logfile",
"hostPath": {
"path": "/var/log/kube-controller-manager.log"}
},
{ "name": "etcssl",
"hostPath": {
"path": "/etc/ssl"}
},
{ "name": "usrsharecacerts",
"hostPath": {
"path": "/usr/share/ca-certificates"}
},
{ "name": "varssl",
"hostPath": {
"path": "/var/ssl"}
},
{ "name": "etcopenssl",
"hostPath": {
"path": "/etc/openssl"}
},
{ "name": "etcpki",
"hostPath": {
"path": "/etc/pki"}
}
]
}}
================= /etc/kubernetes/manifests/kube-scheduler.manifest ==================
{
"apiVersion": "v1",
"kind": "Pod",
"metadata": {
"name":"kube-scheduler",
"namespace": "kube-system",
"annotations": {
"scheduler.alpha.kubernetes.io/critical-pod": ""
},
"labels": {
"tier": "control-plane",
"component": "kube-scheduler"
}
},
"spec":{
"hostNetwork": true,
"containers":[
{
"name": "kube-scheduler",
"image": "gcr.io/google_containers/kube-scheduler:v1.7.8-gke.0",
"resources": {
"requests": {
"cpu": "75m"
}
},
"command": [
"/bin/sh",
"-c",
"/usr/local/bin/kube-scheduler --v=2 --kubeconfig=/etc/srv/kubernetes/kube-scheduler/kubeconfig 1>>/var/log/kube-scheduler.log 2>&1"
],
"livenessProbe": {
"httpGet": {
"host": "127.0.0.1",
"port": 10251,
"path": "/healthz"
},
"initialDelaySeconds": 15,
"timeoutSeconds": 15
},
"volumeMounts": [
{
"name": "logfile",
"mountPath": "/var/log/kube-scheduler.log",
"readOnly": false
},
{
"name": "srvkube",
"mountPath": "/etc/srv/kubernetes",
"readOnly": true
}
]
}
],
"volumes":[
{
"name": "srvkube",
"hostPath": {"path": "/etc/srv/kubernetes"}
},
{
"name": "logfile",
"hostPath": {"path": "/var/log/kube-scheduler.log"}
}
]
}}
================= /etc/kubernetes/manifests/rescheduler.manifest ==================
apiVersion: v1
kind: Pod
metadata:
name: rescheduler-v0.3.1
namespace: kube-system
annotations:
scheduler.alpha.kubernetes.io/critical-pod: ''
labels:
k8s-app: rescheduler
version: v0.3.1
kubernetes.io/cluster-service: "true"
kubernetes.io/name: "Rescheduler"
spec:
hostNetwork: true
containers:
- image: gcr.io/google-containers/rescheduler:v0.3.1
name: rescheduler
volumeMounts:
- mountPath: /var/log/rescheduler.log
name: logfile
readOnly: false
# TODO: Make resource requirements depend on the size of the cluster
resources:
requests:
cpu: 10m
memory: 100Mi
command:
# TODO: split this out into args when we no longer need to pipe stdout to a file #6428
- sh
- -c
- '/rescheduler --running-in-cluster=false 1>>/var/log/rescheduler.log 2>&1'
volumes:
- hostPath:
path: /var/log/rescheduler.log
name: logfile
Sign up for free to join this conversation on GitHub. Already have an account? Sign in to comment