Vagrant provision script and Upstart service files to install and run kubernetes. As well as the kubernetes dashboard YAML file
apiVersion: v1 | |
clusters: | |
- cluster: | |
insecure-skip-tls-verify: true | |
server: http://44.0.0.103:8888 | |
name: vagrant | |
contexts: | |
- context: | |
cluster: vagrant | |
namespace: local | |
user: "" | |
name: local | |
current-context: local | |
kind: Config | |
preferences: {} |
# Copyright 2015 Google Inc. All Rights Reserved. | |
# | |
# Licensed under the Apache License, Version 2.0 (the "License"); | |
# you may not use this file except in compliance with the License. | |
# You may obtain a copy of the License at | |
# | |
# http://www.apache.org/licenses/LICENSE-2.0 | |
# | |
# Unless required by applicable law or agreed to in writing, software | |
# distributed under the License is distributed on an "AS IS" BASIS, | |
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. | |
# See the License for the specific language governing permissions and | |
# limitations under the License. | |
# Configuration to deploy release version of the Dashboard UI. | |
# | |
# Example usage: kubectl create -f <this_file> | |
kind: Deployment | |
apiVersion: extensions/v1beta1 | |
metadata: | |
labels: | |
app: kubernetes-dashboard | |
version: v1.1.0 | |
name: kubernetes-dashboard | |
namespace: kube-system | |
spec: | |
replicas: 1 | |
selector: | |
matchLabels: | |
app: kubernetes-dashboard | |
template: | |
metadata: | |
labels: | |
app: kubernetes-dashboard | |
spec: | |
containers: | |
- name: kubernetes-dashboard | |
image: gcr.io/google_containers/kubernetes-dashboard-amd64:v1.1.0 | |
imagePullPolicy: Always | |
ports: | |
- containerPort: 9090 | |
protocol: TCP | |
args: | |
# Uncomment the following line to manually specify Kubernetes API server Host | |
# If not specified, Dashboard will attempt to auto discover the API server and connect | |
# to it. Uncomment only if the default does not work. | |
- --apiserver-host=http://44.0.0.103:8888 | |
livenessProbe: | |
httpGet: | |
path: / | |
port: 9090 | |
initialDelaySeconds: 30 | |
timeoutSeconds: 30 | |
--- | |
kind: Service | |
apiVersion: v1 | |
metadata: | |
labels: | |
app: kubernetes-dashboard | |
name: kubernetes-dashboard | |
namespace: kube-system | |
spec: | |
type: NodePort | |
ports: | |
- port: 80 | |
targetPort: 9090 | |
selector: | |
app: kubernetes-dashboard |
description "Kubernetes API Server Service" | |
author "Matthew Mihok" | |
env KUBE_HOME=/opt/kubernetes-1.3.0/server/kubernetes/server/bin | |
env LOG_HOME=/var/log/kubernetes | |
# Make sure network and fs is up, and start in runlevels 2-5 | |
start on (net-device-up | |
and local-filesystems | |
and runlevel [2345]) | |
# Stop in runlevels 0,1 and 6 | |
stop on runlevel [016] | |
# automatically respawn, but if its respwaning too fast (5 times in 60 seconds, don't do that) | |
respawn | |
respawn limit 5 60 | |
# make sure node is there, the code directory is there | |
pre-start script | |
test -x $KUBE_HOME/kube-apiserver || { stop; exit 0; } | |
end script | |
# cd to code path and run node, with the right switches | |
script | |
exec kube-apiserver \ | |
--advertise-address="44.0.0.103" \ | |
--storage-backend="etcd3" \ | |
--service-cluster-ip-range="107.0.0.0/16" \ | |
--logtostderr=true \ | |
--etcd-servers="http://127.0.0.1:2379" \ | |
--insecure-bind-address="44.0.0.103" \ | |
--insecure-port=8888 \ | |
--kubelet-https=false >> $LOG_HOME/kube-apiserver.log 2>&1 | |
end script |
#!/bin/bash | |
set -x; | |
# Make symlinks so we can access the binaries | |
ln -s /opt/kubernetes-1.3.0/server/kubernetes/server/bin/kube-apiserver /usr/local/bin/kube-apiserver | |
# Copy the Upstart script into the machine | |
cp /vagrant/kube-apiserver.conf /etc/init/kube-apiserver.conf | |
start kube-apiserver |
description "Kubernetes Controller Manager Service" | |
author "Matthew Mihok" | |
env KUBE_HOME=/opt/kubernetes-1.3.0/server/kubernetes/server/bin | |
env LOG_HOME=/var/log/kubernetes | |
# Make sure network and fs is up, and start in runlevels 2-5 | |
start on (net-device-up | |
and local-filesystems | |
and runlevel [2345]) | |
# Stop in runlevels 0,1 and 6 | |
stop on runlevel [016] | |
# automatically respawn, but if its respwaning too fast (5 times in 60 seconds, don't do that) | |
respawn | |
respawn limit 5 60 | |
# make sure node is there, the code directory is there | |
pre-start script | |
test -x $KUBE_HOME/kube-controller-manager || { stop; exit 0; } | |
end script | |
# cd to code path and run node, with the right switches | |
script | |
exec kube-controller-manager \ | |
--cluster-cidr="107.0.0.0/16" \ | |
--cluster-name="vagrant" \ | |
--master="http://44.0.0.103:8888" \ | |
--port=8890 \ | |
--service-cluster-ip-range="107.0.0.0/16" \ | |
--logtostderr=true >> $LOG_HOME/kube-controller-manager.log 2>&1 | |
end script |
#!/bin/bash | |
set -x; | |
# Make symlinks so we can access the binaries | |
ln -s /opt/kubernetes-1.3.0/server/kubernetes/server/bin/kube-controller-manager /usr/local/bin/kube-controller-manager | |
# Copy the Upstart script into the machine | |
cp /vagrant/kube-controller-manager.conf /etc/init/kube-controller-manager.conf | |
start kube-controller-manager |
description "Kubernetes Proxy Service" | |
author "Matthew Mihok" | |
env KUBE_HOME=/opt/kubernetes-1.3.0/server/kubernetes/server/bin | |
env LOG_HOME=/var/log/kubernetes | |
# Make sure network and fs is up, and start in runlevels 2-5 | |
start on (net-device-up | |
and local-filesystems | |
and runlevel [2345]) | |
# Stop in runlevels 0,1 and 6 | |
stop on runlevel [016] | |
# automatically respawn, but if its respwaning too fast (5 times in 60 seconds, don't do that) | |
respawn | |
respawn limit 5 60 | |
# make sure node is there, the code directory is there | |
pre-start script | |
test -x $KUBE_HOME/kube-proxy || { stop; exit 0; } | |
end script | |
# cd to code path and run node, with the right switches | |
script | |
exec kube-proxy \ | |
--master="http://44.0.0.103:8888" \ | |
--proxy-mode=iptables \ | |
--logtostderr=true >> $LOG_HOME/kube-proxy.log 2>&1 | |
end script |
#!/bin/bash | |
set -x; | |
# Make symlinks so we can access the binaries | |
ln -s /opt/kubernetes-1.3.0/server/kubernetes/server/bin/kube-proxy /usr/local/bin/kube-proxy | |
# Copy the Upstart script into the machine | |
cp /vagrant/kube-proxy.conf /etc/init/kube-proxy.conf | |
start kube-proxy |
description "Kubernetes Scheduler Service" | |
author "Matthew Mihok" | |
env KUBE_HOME=/opt/kubernetes-1.3.0/server/kubernetes/server/bin | |
env LOG_HOME=/var/log/kubernetes | |
# Make sure network and fs is up, and start in runlevels 2-5 | |
start on (net-device-up | |
and local-filesystems | |
and runlevel [2345]) | |
# Stop in runlevels 0,1 and 6 | |
stop on runlevel [016] | |
# automatically respawn, but if its respwaning too fast (5 times in 60 seconds, don't do that) | |
respawn | |
respawn limit 5 60 | |
# make sure node is there, the code directory is there | |
pre-start script | |
test -x $KUBE_HOME/kube-scheduler || { stop; exit 0; } | |
end script | |
# cd to code path and run node, with the right switches | |
script | |
exec kube-scheduler \ | |
--master="http://44.0.0.103:8888" \ | |
--logtostderr=true >> $LOG_HOME/kube-scheduler.log 2>&1 | |
end script |
#!/bin/bash | |
set -x; | |
# Make symlinks so we can access the binaries | |
ln -s /opt/kubernetes-1.3.0/server/kubernetes/server/bin/kube-scheduler /usr/local/bin/kube-scheduler | |
# Copy the Upstart script into the machine | |
cp /vagrant/kube-scheduler.conf /etc/init/kube-scheduler.conf | |
start kube-scheduler |
description "Kubernetes Kublet Service" | |
author "Matthew Mihok" | |
env KUBE_HOME=/opt/kubernetes-1.3.0/server/kubernetes/server/bin | |
env LOG_HOME=/var/log/kubernetes | |
# Make sure network and fs is up, and start in runlevels 2-5 | |
start on (net-device-up | |
and local-filesystems | |
and runlevel [2345]) | |
# Stop in runlevels 0,1 and 6 | |
stop on runlevel [016] | |
# automatically respawn, but if its respwaning too fast (5 times in 60 seconds, don't do that) | |
respawn | |
respawn limit 5 60 | |
# make sure node is there, the code directory is there | |
pre-start script | |
test -x $KUBE_HOME/kubelet || { stop; exit 0; } | |
end script | |
# cd to code path and run node, with the right switches | |
script | |
exec kubelet \ | |
--api-servers="44.0.0.103:8888" \ | |
--experimental-flannel-overlay=true \ | |
--logtostderr=true >> $LOG_HOME/kubelet.log 2>&1 | |
end script |
#!/bin/bash | |
set -x; | |
# Make symlinks so we can access the binaries | |
ln -s /opt/kubernetes-1.3.0/server/kubernetes/server/bin/kubelet /usr/local/bin/kubelet | |
# Copy the Upstart script into the machine | |
cp /vagrant/kubelet.conf /etc/init/kubelet.conf | |
start kubelet |
#!/bin/bash | |
set -x; | |
# Kubernetes | |
cd /opt/ | |
# Download the release | |
curl -L https://github.com/kubernetes/kubernetes/releases/download/v1.3.0/kubernetes.tar.gz -o kubernetes-1.3.0.tar.gz | |
# Create the directory the binaries will live in | |
mkdir /opt/kubernetes-1.3.0 | |
# Extract the contents of the release | |
tar -zxvf kubernetes-1.3.0.tar.gz -C kubernetes-1.3.0 --strip-components=1 | |
# Extract the contents of the contents of the release (where the binaries actually reside) | |
cd /opt/kubernetes-1.3.0/server/ | |
tar -zxvf kubernetes-server-linux-amd64.tar.gz | |
# Make symlink for kubectl binary to interact with cluster | |
ln -s /opt/kubernetes-1.3.0/server/kubernetes/server/bin/kubectl /usr/local/bin/kubectl | |
# Create etcd's logging directory | |
mkdir /var/log/kubernetes | |
chown vagrant:vagrant /var/log/kubernetes |
Sign up for free
to join this conversation on GitHub.
Already have an account?
Sign in to comment