Skip to content

Instantly share code, notes, and snippets.

Embed
What would you like to do?
Archive: /var/lib/mesos/slave/slaves/66df60ae-ce98-4ce6-968a-f99922382ef3-S0/frameworks/66df60ae-ce98-4ce6-968a-f99922382ef3-0001/executors/kubernetes.eb803f00-daac-11e7-af0a-f66634909198/runs/6f3605e0-9e27-4133-a2af-bbbe3706877b/kubernetes-scheduler.zip
creating: /var/lib/mesos/slave/slaves/66df60ae-ce98-4ce6-968a-f99922382ef3-S0/frameworks/66df60ae-ce98-4ce6-968a-f99922382ef3-0001/executors/kubernetes.eb803f00-daac-11e7-af0a-f66634909198/runs/6f3605e0-9e27-4133-a2af-bbbe3706877b/kubernetes-scheduler/
inflating: /var/lib/mesos/slave/slaves/66df60ae-ce98-4ce6-968a-f99922382ef3-S0/frameworks/66df60ae-ce98-4ce6-968a-f99922382ef3-0001/executors/kubernetes.eb803f00-daac-11e7-af0a-f66634909198/runs/6f3605e0-9e27-4133-a2af-bbbe3706877b/kubernetes-scheduler/dashboard.template.yaml
inflating: /var/lib/mesos/slave/slaves/66df60ae-ce98-4ce6-968a-f99922382ef3-S0/frameworks/66df60ae-ce98-4ce6-968a-f99922382ef3-0001/executors/kubernetes.eb803f00-daac-11e7-af0a-f66634909198/runs/6f3605e0-9e27-4133-a2af-bbbe3706877b/kubernetes-scheduler/heapster.template.yaml
inflating: /var/lib/mesos/slave/slaves/66df60ae-ce98-4ce6-968a-f99922382ef3-S0/frameworks/66df60ae-ce98-4ce6-968a-f99922382ef3-0001/executors/kubernetes.eb803f00-daac-11e7-af0a-f66634909198/runs/6f3605e0-9e27-4133-a2af-bbbe3706877b/kubernetes-scheduler/install_addon.template.sh
inflating: /var/lib/mesos/slave/slaves/66df60ae-ce98-4ce6-968a-f99922382ef3-S0/frameworks/66df60ae-ce98-4ce6-968a-f99922382ef3-0001/executors/kubernetes.eb803f00-daac-11e7-af0a-f66634909198/runs/6f3605e0-9e27-4133-a2af-bbbe3706877b/kubernetes-scheduler/kubeconfig.template.yaml
inflating: /var/lib/mesos/slave/slaves/66df60ae-ce98-4ce6-968a-f99922382ef3-S0/frameworks/66df60ae-ce98-4ce6-968a-f99922382ef3-0001/executors/kubernetes.eb803f00-daac-11e7-af0a-f66634909198/runs/6f3605e0-9e27-4133-a2af-bbbe3706877b/kubernetes-scheduler/kubedns.template.yaml
inflating: /var/lib/mesos/slave/slaves/66df60ae-ce98-4ce6-968a-f99922382ef3-S0/frameworks/66df60ae-ce98-4ce6-968a-f99922382ef3-0001/executors/kubernetes.eb803f00-daac-11e7-af0a-f66634909198/runs/6f3605e0-9e27-4133-a2af-bbbe3706877b/kubernetes-scheduler/kubelet-wrapper.template.sh
inflating: /var/lib/mesos/slave/slaves/66df60ae-ce98-4ce6-968a-f99922382ef3-S0/frameworks/66df60ae-ce98-4ce6-968a-f99922382ef3-0001/executors/kubernetes.eb803f00-daac-11e7-af0a-f66634909198/runs/6f3605e0-9e27-4133-a2af-bbbe3706877b/kubernetes-scheduler/svc.yml
creating: /var/lib/mesos/slave/slaves/66df60ae-ce98-4ce6-968a-f99922382ef3-S0/frameworks/66df60ae-ce98-4ce6-968a-f99922382ef3-0001/executors/kubernetes.eb803f00-daac-11e7-af0a-f66634909198/runs/6f3605e0-9e27-4133-a2af-bbbe3706877b/kubernetes-scheduler/lib/
inflating: /var/lib/mesos/slave/slaves/66df60ae-ce98-4ce6-968a-f99922382ef3-S0/frameworks/66df60ae-ce98-4ce6-968a-f99922382ef3-0001/executors/kubernetes.eb803f00-daac-11e7-af0a-f66634909198/runs/6f3605e0-9e27-4133-a2af-bbbe3706877b/kubernetes-scheduler/lib/kubernetes.jar
inflating: /var/lib/mesos/slave/slaves/66df60ae-ce98-4ce6-968a-f99922382ef3-S0/frameworks/66df60ae-ce98-4ce6-968a-f99922382ef3-0001/executors/kubernetes.eb803f00-daac-11e7-af0a-f66634909198/runs/6f3605e0-9e27-4133-a2af-bbbe3706877b/kubernetes-scheduler/lib/scheduler-master.jar
inflating: /var/lib/mesos/slave/slaves/66df60ae-ce98-4ce6-968a-f99922382ef3-S0/frameworks/66df60ae-ce98-4ce6-968a-f99922382ef3-0001/executors/kubernetes.eb803f00-daac-11e7-af0a-f66634909198/runs/6f3605e0-9e27-4133-a2af-bbbe3706877b/kubernetes-scheduler/lib/executor-master.jar
inflating: /var/lib/mesos/slave/slaves/66df60ae-ce98-4ce6-968a-f99922382ef3-S0/frameworks/66df60ae-ce98-4ce6-968a-f99922382ef3-0001/executors/kubernetes.eb803f00-daac-11e7-af0a-f66634909198/runs/6f3605e0-9e27-4133-a2af-bbbe3706877b/kubernetes-scheduler/lib/mesos-1.4.0-rc1.jar
inflating: /var/lib/mesos/slave/slaves/66df60ae-ce98-4ce6-968a-f99922382ef3-S0/frameworks/66df60ae-ce98-4ce6-968a-f99922382ef3-0001/executors/kubernetes.eb803f00-daac-11e7-af0a-f66634909198/runs/6f3605e0-9e27-4133-a2af-bbbe3706877b/kubernetes-scheduler/lib/common-master.jar
inflating: /var/lib/mesos/slave/slaves/66df60ae-ce98-4ce6-968a-f99922382ef3-S0/frameworks/66df60ae-ce98-4ce6-968a-f99922382ef3-0001/executors/kubernetes.eb803f00-daac-11e7-af0a-f66634909198/runs/6f3605e0-9e27-4133-a2af-bbbe3706877b/kubernetes-scheduler/lib/jackson-datatype-guava-2.6.3.jar
inflating: /var/lib/mesos/slave/slaves/66df60ae-ce98-4ce6-968a-f99922382ef3-S0/frameworks/66df60ae-ce98-4ce6-968a-f99922382ef3-0001/executors/kubernetes.eb803f00-daac-11e7-af0a-f66634909198/runs/6f3605e0-9e27-4133-a2af-bbbe3706877b/kubernetes-scheduler/lib/jackson-datatype-jdk8-2.6.3.jar
inflating: /var/lib/mesos/slave/slaves/66df60ae-ce98-4ce6-968a-f99922382ef3-S0/frameworks/66df60ae-ce98-4ce6-968a-f99922382ef3-0001/executors/kubernetes.eb803f00-daac-11e7-af0a-f66634909198/runs/6f3605e0-9e27-4133-a2af-bbbe3706877b/kubernetes-scheduler/lib/jackson-datatype-jsr310-2.6.3.jar
inflating: /var/lib/mesos/slave/slaves/66df60ae-ce98-4ce6-968a-f99922382ef3-S0/frameworks/66df60ae-ce98-4ce6-968a-f99922382ef3-0001/executors/kubernetes.eb803f00-daac-11e7-af0a-f66634909198/runs/6f3605e0-9e27-4133-a2af-bbbe3706877b/kubernetes-scheduler/lib/jackson-dataformat-yaml-2.6.3.jar
inflating: /var/lib/mesos/slave/slaves/66df60ae-ce98-4ce6-968a-f99922382ef3-S0/frameworks/66df60ae-ce98-4ce6-968a-f99922382ef3-0001/executors/kubernetes.eb803f00-daac-11e7-af0a-f66634909198/runs/6f3605e0-9e27-4133-a2af-bbbe3706877b/kubernetes-scheduler/lib/jackson-datatype-protobuf-0.9.3.jar
inflating: /var/lib/mesos/slave/slaves/66df60ae-ce98-4ce6-968a-f99922382ef3-S0/frameworks/66df60ae-ce98-4ce6-968a-f99922382ef3-0001/executors/kubernetes.eb803f00-daac-11e7-af0a-f66634909198/runs/6f3605e0-9e27-4133-a2af-bbbe3706877b/kubernetes-scheduler/lib/annotations-3.0.1u2.jar
inflating: /var/lib/mesos/slave/slaves/66df60ae-ce98-4ce6-968a-f99922382ef3-S0/frameworks/66df60ae-ce98-4ce6-968a-f99922382ef3-0001/executors/kubernetes.eb803f00-daac-11e7-af0a-f66634909198/runs/6f3605e0-9e27-4133-a2af-bbbe3706877b/kubernetes-scheduler/lib/commons-collections-3.2.2.jar
inflating: /var/lib/mesos/slave/slaves/66df60ae-ce98-4ce6-968a-f99922382ef3-S0/frameworks/66df60ae-ce98-4ce6-968a-f99922382ef3-0001/executors/kubernetes.eb803f00-daac-11e7-af0a-f66634909198/runs/6f3605e0-9e27-4133-a2af-bbbe3706877b/kubernetes-scheduler/lib/commons-io-2.4.jar
inflating: /var/lib/mesos/slave/slaves/66df60ae-ce98-4ce6-968a-f99922382ef3-S0/frameworks/66df60ae-ce98-4ce6-968a-f99922382ef3-0001/executors/kubernetes.eb803f00-daac-11e7-af0a-f66634909198/runs/6f3605e0-9e27-4133-a2af-bbbe3706877b/kubernetes-scheduler/lib/antlr4-runtime-4.5.1-1.jar
inflating: /var/lib/mesos/slave/slaves/66df60ae-ce98-4ce6-968a-f99922382ef3-S0/frameworks/66df60ae-ce98-4ce6-968a-f99922382ef3-0001/executors/kubernetes.eb803f00-daac-11e7-af0a-f66634909198/runs/6f3605e0-9e27-4133-a2af-bbbe3706877b/kubernetes-scheduler/lib/javax.ws.rs-api-2.0.1.jar
inflating: /var/lib/mesos/slave/slaves/66df60ae-ce98-4ce6-968a-f99922382ef3-S0/frameworks/66df60ae-ce98-4ce6-968a-f99922382ef3-0001/executors/kubernetes.eb803f00-daac-11e7-af0a-f66634909198/runs/6f3605e0-9e27-4133-a2af-bbbe3706877b/kubernetes-scheduler/lib/curator-framework-2.9.1.jar
inflating: /var/lib/mesos/slave/slaves/66df60ae-ce98-4ce6-968a-f99922382ef3-S0/frameworks/66df60ae-ce98-4ce6-968a-f99922382ef3-0001/executors/kubernetes.eb803f00-daac-11e7-af0a-f66634909198/runs/6f3605e0-9e27-4133-a2af-bbbe3706877b/kubernetes-scheduler/lib/curator-recipes-2.9.1.jar
inflating: /var/lib/mesos/slave/slaves/66df60ae-ce98-4ce6-968a-f99922382ef3-S0/frameworks/66df60ae-ce98-4ce6-968a-f99922382ef3-0001/executors/kubernetes.eb803f00-daac-11e7-af0a-f66634909198/runs/6f3605e0-9e27-4133-a2af-bbbe3706877b/kubernetes-scheduler/lib/curator-test-2.9.1.jar
inflating: /var/lib/mesos/slave/slaves/66df60ae-ce98-4ce6-968a-f99922382ef3-S0/frameworks/66df60ae-ce98-4ce6-968a-f99922382ef3-0001/executors/kubernetes.eb803f00-daac-11e7-af0a-f66634909198/runs/6f3605e0-9e27-4133-a2af-bbbe3706877b/kubernetes-scheduler/lib/httpclient-4.5.2.jar
inflating: /var/lib/mesos/slave/slaves/66df60ae-ce98-4ce6-968a-f99922382ef3-S0/frameworks/66df60ae-ce98-4ce6-968a-f99922382ef3-0001/executors/kubernetes.eb803f00-daac-11e7-af0a-f66634909198/runs/6f3605e0-9e27-4133-a2af-bbbe3706877b/kubernetes-scheduler/lib/fluent-hc-4.5.2.jar
inflating: /var/lib/mesos/slave/slaves/66df60ae-ce98-4ce6-968a-f99922382ef3-S0/frameworks/66df60ae-ce98-4ce6-968a-f99922382ef3-0001/executors/kubernetes.eb803f00-daac-11e7-af0a-f66634909198/runs/6f3605e0-9e27-4133-a2af-bbbe3706877b/kubernetes-scheduler/lib/commons-lang3-3.4.jar
inflating: /var/lib/mesos/slave/slaves/66df60ae-ce98-4ce6-968a-f99922382ef3-S0/frameworks/66df60ae-ce98-4ce6-968a-f99922382ef3-0001/executors/kubernetes.eb803f00-daac-11e7-af0a-f66634909198/runs/6f3605e0-9e27-4133-a2af-bbbe3706877b/kubernetes-scheduler/lib/protobuf-java-format-1.4.jar
inflating: /var/lib/mesos/slave/slaves/66df60ae-ce98-4ce6-968a-f99922382ef3-S0/frameworks/66df60ae-ce98-4ce6-968a-f99922382ef3-0001/executors/kubernetes.eb803f00-daac-11e7-af0a-f66634909198/runs/6f3605e0-9e27-4133-a2af-bbbe3706877b/kubernetes-scheduler/lib/json-20160212.jar
inflating: /var/lib/mesos/slave/slaves/66df60ae-ce98-4ce6-968a-f99922382ef3-S0/frameworks/66df60ae-ce98-4ce6-968a-f99922382ef3-0001/executors/kubernetes.eb803f00-daac-11e7-af0a-f66634909198/runs/6f3605e0-9e27-4133-a2af-bbbe3706877b/kubernetes-scheduler/lib/diffutils-1.3.0.jar
inflating: /var/lib/mesos/slave/slaves/66df60ae-ce98-4ce6-968a-f99922382ef3-S0/frameworks/66df60ae-ce98-4ce6-968a-f99922382ef3-0001/executors/kubernetes.eb803f00-daac-11e7-af0a-f66634909198/runs/6f3605e0-9e27-4133-a2af-bbbe3706877b/kubernetes-scheduler/lib/jersey-container-jetty-http-2.23.jar
inflating: /var/lib/mesos/slave/slaves/66df60ae-ce98-4ce6-968a-f99922382ef3-S0/frameworks/66df60ae-ce98-4ce6-968a-f99922382ef3-0001/executors/kubernetes.eb803f00-daac-11e7-af0a-f66634909198/runs/6f3605e0-9e27-4133-a2af-bbbe3706877b/kubernetes-scheduler/lib/jersey-container-servlet-core-2.23.jar
inflating: /var/lib/mesos/slave/slaves/66df60ae-ce98-4ce6-968a-f99922382ef3-S0/frameworks/66df60ae-ce98-4ce6-968a-f99922382ef3-0001/executors/kubernetes.eb803f00-daac-11e7-af0a-f66634909198/runs/6f3605e0-9e27-4133-a2af-bbbe3706877b/kubernetes-scheduler/lib/jersey-media-json-jackson-2.23.jar
inflating: /var/lib/mesos/slave/slaves/66df60ae-ce98-4ce6-968a-f99922382ef3-S0/frameworks/66df60ae-ce98-4ce6-968a-f99922382ef3-0001/executors/kubernetes.eb803f00-daac-11e7-af0a-f66634909198/runs/6f3605e0-9e27-4133-a2af-bbbe3706877b/kubernetes-scheduler/lib/jersey-media-multipart-2.23.jar
inflating: /var/lib/mesos/slave/slaves/66df60ae-ce98-4ce6-968a-f99922382ef3-S0/frameworks/66df60ae-ce98-4ce6-968a-f99922382ef3-0001/executors/kubernetes.eb803f00-daac-11e7-af0a-f66634909198/runs/6f3605e0-9e27-4133-a2af-bbbe3706877b/kubernetes-scheduler/lib/mimepull-1.9.6.jar
inflating: /var/lib/mesos/slave/slaves/66df60ae-ce98-4ce6-968a-f99922382ef3-S0/frameworks/66df60ae-ce98-4ce6-968a-f99922382ef3-0001/executors/kubernetes.eb803f00-daac-11e7-af0a-f66634909198/runs/6f3605e0-9e27-4133-a2af-bbbe3706877b/kubernetes-scheduler/lib/jetty-servlet-9.2.3.v20140905.jar
inflating: /var/lib/mesos/slave/slaves/66df60ae-ce98-4ce6-968a-f99922382ef3-S0/frameworks/66df60ae-ce98-4ce6-968a-f99922382ef3-0001/executors/kubernetes.eb803f00-daac-11e7-af0a-f66634909198/runs/6f3605e0-9e27-4133-a2af-bbbe3706877b/kubernetes-scheduler/lib/hibernate-validator-5.3.2.Final.jar
inflating: /var/lib/mesos/slave/slaves/66df60ae-ce98-4ce6-968a-f99922382ef3-S0/frameworks/66df60ae-ce98-4ce6-968a-f99922382ef3-0001/executors/kubernetes.eb803f00-daac-11e7-af0a-f66634909198/runs/6f3605e0-9e27-4133-a2af-bbbe3706877b/kubernetes-scheduler/lib/javax.el-api-2.2.4.jar
inflating: /var/lib/mesos/slave/slaves/66df60ae-ce98-4ce6-968a-f99922382ef3-S0/frameworks/66df60ae-ce98-4ce6-968a-f99922382ef3-0001/executors/kubernetes.eb803f00-daac-11e7-af0a-f66634909198/runs/6f3605e0-9e27-4133-a2af-bbbe3706877b/kubernetes-scheduler/lib/javax.el-2.2.4.jar
inflating: /var/lib/mesos/slave/slaves/66df60ae-ce98-4ce6-968a-f99922382ef3-S0/frameworks/66df60ae-ce98-4ce6-968a-f99922382ef3-0001/executors/kubernetes.eb803f00-daac-11e7-af0a-f66634909198/runs/6f3605e0-9e27-4133-a2af-bbbe3706877b/kubernetes-scheduler/lib/mesos-http-adapter-0.4.1.jar
inflating: /var/lib/mesos/slave/slaves/66df60ae-ce98-4ce6-968a-f99922382ef3-S0/frameworks/66df60ae-ce98-4ce6-968a-f99922382ef3-0001/executors/kubernetes.eb803f00-daac-11e7-af0a-f66634909198/runs/6f3605e0-9e27-4133-a2af-bbbe3706877b/kubernetes-scheduler/lib/java-jwt-3.2.0.jar
inflating: /var/lib/mesos/slave/slaves/66df60ae-ce98-4ce6-968a-f99922382ef3-S0/frameworks/66df60ae-ce98-4ce6-968a-f99922382ef3-0001/executors/kubernetes.eb803f00-daac-11e7-af0a-f66634909198/runs/6f3605e0-9e27-4133-a2af-bbbe3706877b/kubernetes-scheduler/lib/bcprov-jdk15on-1.57.jar
inflating: /var/lib/mesos/slave/slaves/66df60ae-ce98-4ce6-968a-f99922382ef3-S0/frameworks/66df60ae-ce98-4ce6-968a-f99922382ef3-0001/executors/kubernetes.eb803f00-daac-11e7-af0a-f66634909198/runs/6f3605e0-9e27-4133-a2af-bbbe3706877b/kubernetes-scheduler/lib/bcpkix-jdk15on-1.57.jar
inflating: /var/lib/mesos/slave/slaves/66df60ae-ce98-4ce6-968a-f99922382ef3-S0/frameworks/66df60ae-ce98-4ce6-968a-f99922382ef3-0001/executors/kubernetes.eb803f00-daac-11e7-af0a-f66634909198/runs/6f3605e0-9e27-4133-a2af-bbbe3706877b/kubernetes-scheduler/lib/metrics-core-3.2.5.jar
inflating: /var/lib/mesos/slave/slaves/66df60ae-ce98-4ce6-968a-f99922382ef3-S0/frameworks/66df60ae-ce98-4ce6-968a-f99922382ef3-0001/executors/kubernetes.eb803f00-daac-11e7-af0a-f66634909198/runs/6f3605e0-9e27-4133-a2af-bbbe3706877b/kubernetes-scheduler/lib/metrics-servlet-3.2.5.jar
inflating: /var/lib/mesos/slave/slaves/66df60ae-ce98-4ce6-968a-f99922382ef3-S0/frameworks/66df60ae-ce98-4ce6-968a-f99922382ef3-0001/executors/kubernetes.eb803f00-daac-11e7-af0a-f66634909198/runs/6f3605e0-9e27-4133-a2af-bbbe3706877b/kubernetes-scheduler/lib/metrics-servlets-3.2.5.jar
inflating: /var/lib/mesos/slave/slaves/66df60ae-ce98-4ce6-968a-f99922382ef3-S0/frameworks/66df60ae-ce98-4ce6-968a-f99922382ef3-0001/executors/kubernetes.eb803f00-daac-11e7-af0a-f66634909198/runs/6f3605e0-9e27-4133-a2af-bbbe3706877b/kubernetes-scheduler/lib/simpleclient_dropwizard-0.0.26.jar
inflating: /var/lib/mesos/slave/slaves/66df60ae-ce98-4ce6-968a-f99922382ef3-S0/frameworks/66df60ae-ce98-4ce6-968a-f99922382ef3-0001/executors/kubernetes.eb803f00-daac-11e7-af0a-f66634909198/runs/6f3605e0-9e27-4133-a2af-bbbe3706877b/kubernetes-scheduler/lib/simpleclient_servlet-0.0.26.jar
inflating: /var/lib/mesos/slave/slaves/66df60ae-ce98-4ce6-968a-f99922382ef3-S0/frameworks/66df60ae-ce98-4ce6-968a-f99922382ef3-0001/executors/kubernetes.eb803f00-daac-11e7-af0a-f66634909198/runs/6f3605e0-9e27-4133-a2af-bbbe3706877b/kubernetes-scheduler/lib/metrics3-statsd-4.2.0.jar
inflating: /var/lib/mesos/slave/slaves/66df60ae-ce98-4ce6-968a-f99922382ef3-S0/frameworks/66df60ae-ce98-4ce6-968a-f99922382ef3-0001/executors/kubernetes.eb803f00-daac-11e7-af0a-f66634909198/runs/6f3605e0-9e27-4133-a2af-bbbe3706877b/kubernetes-scheduler/lib/slf4j-api-1.7.25.jar
inflating: /var/lib/mesos/slave/slaves/66df60ae-ce98-4ce6-968a-f99922382ef3-S0/frameworks/66df60ae-ce98-4ce6-968a-f99922382ef3-0001/executors/kubernetes.eb803f00-daac-11e7-af0a-f66634909198/runs/6f3605e0-9e27-4133-a2af-bbbe3706877b/kubernetes-scheduler/lib/log4j-core-2.8.1.jar
inflating: /var/lib/mesos/slave/slaves/66df60ae-ce98-4ce6-968a-f99922382ef3-S0/frameworks/66df60ae-ce98-4ce6-968a-f99922382ef3-0001/executors/kubernetes.eb803f00-daac-11e7-af0a-f66634909198/runs/6f3605e0-9e27-4133-a2af-bbbe3706877b/kubernetes-scheduler/lib/log4j-slf4j-impl-2.8.1.jar
inflating: /var/lib/mesos/slave/slaves/66df60ae-ce98-4ce6-968a-f99922382ef3-S0/frameworks/66df60ae-ce98-4ce6-968a-f99922382ef3-0001/executors/kubernetes.eb803f00-daac-11e7-af0a-f66634909198/runs/6f3605e0-9e27-4133-a2af-bbbe3706877b/kubernetes-scheduler/lib/compiler-0.9.2.jar
inflating: /var/lib/mesos/slave/slaves/66df60ae-ce98-4ce6-968a-f99922382ef3-S0/frameworks/66df60ae-ce98-4ce6-968a-f99922382ef3-0001/executors/kubernetes.eb803f00-daac-11e7-af0a-f66634909198/runs/6f3605e0-9e27-4133-a2af-bbbe3706877b/kubernetes-scheduler/lib/commons-codec-1.10.jar
inflating: /var/lib/mesos/slave/slaves/66df60ae-ce98-4ce6-968a-f99922382ef3-S0/frameworks/66df60ae-ce98-4ce6-968a-f99922382ef3-0001/executors/kubernetes.eb803f00-daac-11e7-af0a-f66634909198/runs/6f3605e0-9e27-4133-a2af-bbbe3706877b/kubernetes-scheduler/lib/snakeyaml-1.15.jar
inflating: /var/lib/mesos/slave/slaves/66df60ae-ce98-4ce6-968a-f99922382ef3-S0/frameworks/66df60ae-ce98-4ce6-968a-f99922382ef3-0001/executors/kubernetes.eb803f00-daac-11e7-af0a-f66634909198/runs/6f3605e0-9e27-4133-a2af-bbbe3706877b/kubernetes-scheduler/lib/jackson-annotations-2.6.0.jar
inflating: /var/lib/mesos/slave/slaves/66df60ae-ce98-4ce6-968a-f99922382ef3-S0/frameworks/66df60ae-ce98-4ce6-968a-f99922382ef3-0001/executors/kubernetes.eb803f00-daac-11e7-af0a-f66634909198/runs/6f3605e0-9e27-4133-a2af-bbbe3706877b/kubernetes-scheduler/lib/curator-client-2.9.1.jar
inflating: /var/lib/mesos/slave/slaves/66df60ae-ce98-4ce6-968a-f99922382ef3-S0/frameworks/66df60ae-ce98-4ce6-968a-f99922382ef3-0001/executors/kubernetes.eb803f00-daac-11e7-af0a-f66634909198/runs/6f3605e0-9e27-4133-a2af-bbbe3706877b/kubernetes-scheduler/lib/zookeeper-3.4.6.jar
inflating: /var/lib/mesos/slave/slaves/66df60ae-ce98-4ce6-968a-f99922382ef3-S0/frameworks/66df60ae-ce98-4ce6-968a-f99922382ef3-0001/executors/kubernetes.eb803f00-daac-11e7-af0a-f66634909198/runs/6f3605e0-9e27-4133-a2af-bbbe3706877b/kubernetes-scheduler/lib/javassist-3.18.1-GA.jar
inflating: /var/lib/mesos/slave/slaves/66df60ae-ce98-4ce6-968a-f99922382ef3-S0/frameworks/66df60ae-ce98-4ce6-968a-f99922382ef3-0001/executors/kubernetes.eb803f00-daac-11e7-af0a-f66634909198/runs/6f3605e0-9e27-4133-a2af-bbbe3706877b/kubernetes-scheduler/lib/commons-math-2.2.jar
inflating: /var/lib/mesos/slave/slaves/66df60ae-ce98-4ce6-968a-f99922382ef3-S0/frameworks/66df60ae-ce98-4ce6-968a-f99922382ef3-0001/executors/kubernetes.eb803f00-daac-11e7-af0a-f66634909198/runs/6f3605e0-9e27-4133-a2af-bbbe3706877b/kubernetes-scheduler/lib/httpcore-4.4.4.jar
inflating: /var/lib/mesos/slave/slaves/66df60ae-ce98-4ce6-968a-f99922382ef3-S0/frameworks/66df60ae-ce98-4ce6-968a-f99922382ef3-0001/executors/kubernetes.eb803f00-daac-11e7-af0a-f66634909198/runs/6f3605e0-9e27-4133-a2af-bbbe3706877b/kubernetes-scheduler/lib/commons-logging-1.2.jar
inflating: /var/lib/mesos/slave/slaves/66df60ae-ce98-4ce6-968a-f99922382ef3-S0/frameworks/66df60ae-ce98-4ce6-968a-f99922382ef3-0001/executors/kubernetes.eb803f00-daac-11e7-af0a-f66634909198/runs/6f3605e0-9e27-4133-a2af-bbbe3706877b/kubernetes-scheduler/lib/javax.inject-2.4.0-b34.jar
inflating: /var/lib/mesos/slave/slaves/66df60ae-ce98-4ce6-968a-f99922382ef3-S0/frameworks/66df60ae-ce98-4ce6-968a-f99922382ef3-0001/executors/kubernetes.eb803f00-daac-11e7-af0a-f66634909198/runs/6f3605e0-9e27-4133-a2af-bbbe3706877b/kubernetes-scheduler/lib/jetty-continuation-9.1.1.v20140108.jar
inflating: /var/lib/mesos/slave/slaves/66df60ae-ce98-4ce6-968a-f99922382ef3-S0/frameworks/66df60ae-ce98-4ce6-968a-f99922382ef3-0001/executors/kubernetes.eb803f00-daac-11e7-af0a-f66634909198/runs/6f3605e0-9e27-4133-a2af-bbbe3706877b/kubernetes-scheduler/lib/jersey-common-2.23.jar
inflating: /var/lib/mesos/slave/slaves/66df60ae-ce98-4ce6-968a-f99922382ef3-S0/frameworks/66df60ae-ce98-4ce6-968a-f99922382ef3-0001/executors/kubernetes.eb803f00-daac-11e7-af0a-f66634909198/runs/6f3605e0-9e27-4133-a2af-bbbe3706877b/kubernetes-scheduler/lib/jersey-server-2.23.jar
inflating: /var/lib/mesos/slave/slaves/66df60ae-ce98-4ce6-968a-f99922382ef3-S0/frameworks/66df60ae-ce98-4ce6-968a-f99922382ef3-0001/executors/kubernetes.eb803f00-daac-11e7-af0a-f66634909198/runs/6f3605e0-9e27-4133-a2af-bbbe3706877b/kubernetes-scheduler/lib/jersey-entity-filtering-2.23.jar
inflating: /var/lib/mesos/slave/slaves/66df60ae-ce98-4ce6-968a-f99922382ef3-S0/frameworks/66df60ae-ce98-4ce6-968a-f99922382ef3-0001/executors/kubernetes.eb803f00-daac-11e7-af0a-f66634909198/runs/6f3605e0-9e27-4133-a2af-bbbe3706877b/kubernetes-scheduler/lib/jackson-jaxrs-base-2.5.4.jar
inflating: /var/lib/mesos/slave/slaves/66df60ae-ce98-4ce6-968a-f99922382ef3-S0/frameworks/66df60ae-ce98-4ce6-968a-f99922382ef3-0001/executors/kubernetes.eb803f00-daac-11e7-af0a-f66634909198/runs/6f3605e0-9e27-4133-a2af-bbbe3706877b/kubernetes-scheduler/lib/jackson-jaxrs-json-provider-2.5.4.jar
inflating: /var/lib/mesos/slave/slaves/66df60ae-ce98-4ce6-968a-f99922382ef3-S0/frameworks/66df60ae-ce98-4ce6-968a-f99922382ef3-0001/executors/kubernetes.eb803f00-daac-11e7-af0a-f66634909198/runs/6f3605e0-9e27-4133-a2af-bbbe3706877b/kubernetes-scheduler/lib/jetty-security-9.2.3.v20140905.jar
inflating: /var/lib/mesos/slave/slaves/66df60ae-ce98-4ce6-968a-f99922382ef3-S0/frameworks/66df60ae-ce98-4ce6-968a-f99922382ef3-0001/executors/kubernetes.eb803f00-daac-11e7-af0a-f66634909198/runs/6f3605e0-9e27-4133-a2af-bbbe3706877b/kubernetes-scheduler/lib/validation-api-1.1.0.Final.jar
inflating: /var/lib/mesos/slave/slaves/66df60ae-ce98-4ce6-968a-f99922382ef3-S0/frameworks/66df60ae-ce98-4ce6-968a-f99922382ef3-0001/executors/kubernetes.eb803f00-daac-11e7-af0a-f66634909198/runs/6f3605e0-9e27-4133-a2af-bbbe3706877b/kubernetes-scheduler/lib/jboss-logging-3.3.0.Final.jar
inflating: /var/lib/mesos/slave/slaves/66df60ae-ce98-4ce6-968a-f99922382ef3-S0/frameworks/66df60ae-ce98-4ce6-968a-f99922382ef3-0001/executors/kubernetes.eb803f00-daac-11e7-af0a-f66634909198/runs/6f3605e0-9e27-4133-a2af-bbbe3706877b/kubernetes-scheduler/lib/classmate-1.3.1.jar
inflating: /var/lib/mesos/slave/slaves/66df60ae-ce98-4ce6-968a-f99922382ef3-S0/frameworks/66df60ae-ce98-4ce6-968a-f99922382ef3-0001/executors/kubernetes.eb803f00-daac-11e7-af0a-f66634909198/runs/6f3605e0-9e27-4133-a2af-bbbe3706877b/kubernetes-scheduler/lib/edu-umd-cs-findbugs-annotations-1.3.2-201002241900.nbm
inflating: /var/lib/mesos/slave/slaves/66df60ae-ce98-4ce6-968a-f99922382ef3-S0/frameworks/66df60ae-ce98-4ce6-968a-f99922382ef3-0001/executors/kubernetes.eb803f00-daac-11e7-af0a-f66634909198/runs/6f3605e0-9e27-4133-a2af-bbbe3706877b/kubernetes-scheduler/lib/log4j-over-slf4j-1.7.10.jar
inflating: /var/lib/mesos/slave/slaves/66df60ae-ce98-4ce6-968a-f99922382ef3-S0/frameworks/66df60ae-ce98-4ce6-968a-f99922382ef3-0001/executors/kubernetes.eb803f00-daac-11e7-af0a-f66634909198/runs/6f3605e0-9e27-4133-a2af-bbbe3706877b/kubernetes-scheduler/lib/jcl-over-slf4j-1.7.10.jar
inflating: /var/lib/mesos/slave/slaves/66df60ae-ce98-4ce6-968a-f99922382ef3-S0/frameworks/66df60ae-ce98-4ce6-968a-f99922382ef3-0001/executors/kubernetes.eb803f00-daac-11e7-af0a-f66634909198/runs/6f3605e0-9e27-4133-a2af-bbbe3706877b/kubernetes-scheduler/lib/google-http-client-1.20.0.jar
inflating: /var/lib/mesos/slave/slaves/66df60ae-ce98-4ce6-968a-f99922382ef3-S0/frameworks/66df60ae-ce98-4ce6-968a-f99922382ef3-0001/executors/kubernetes.eb803f00-daac-11e7-af0a-f66634909198/runs/6f3605e0-9e27-4133-a2af-bbbe3706877b/kubernetes-scheduler/lib/metrics-healthchecks-3.2.5.jar
inflating: /var/lib/mesos/slave/slaves/66df60ae-ce98-4ce6-968a-f99922382ef3-S0/frameworks/66df60ae-ce98-4ce6-968a-f99922382ef3-0001/executors/kubernetes.eb803f00-daac-11e7-af0a-f66634909198/runs/6f3605e0-9e27-4133-a2af-bbbe3706877b/kubernetes-scheduler/lib/metrics-json-3.2.5.jar
inflating: /var/lib/mesos/slave/slaves/66df60ae-ce98-4ce6-968a-f99922382ef3-S0/frameworks/66df60ae-ce98-4ce6-968a-f99922382ef3-0001/executors/kubernetes.eb803f00-daac-11e7-af0a-f66634909198/runs/6f3605e0-9e27-4133-a2af-bbbe3706877b/kubernetes-scheduler/lib/metrics-jvm-3.2.5.jar
inflating: /var/lib/mesos/slave/slaves/66df60ae-ce98-4ce6-968a-f99922382ef3-S0/frameworks/66df60ae-ce98-4ce6-968a-f99922382ef3-0001/executors/kubernetes.eb803f00-daac-11e7-af0a-f66634909198/runs/6f3605e0-9e27-4133-a2af-bbbe3706877b/kubernetes-scheduler/lib/profiler-1.0.2.jar
inflating: /var/lib/mesos/slave/slaves/66df60ae-ce98-4ce6-968a-f99922382ef3-S0/frameworks/66df60ae-ce98-4ce6-968a-f99922382ef3-0001/executors/kubernetes.eb803f00-daac-11e7-af0a-f66634909198/runs/6f3605e0-9e27-4133-a2af-bbbe3706877b/kubernetes-scheduler/lib/simpleclient-0.0.26.jar
inflating: /var/lib/mesos/slave/slaves/66df60ae-ce98-4ce6-968a-f99922382ef3-S0/frameworks/66df60ae-ce98-4ce6-968a-f99922382ef3-0001/executors/kubernetes.eb803f00-daac-11e7-af0a-f66634909198/runs/6f3605e0-9e27-4133-a2af-bbbe3706877b/kubernetes-scheduler/lib/simpleclient_common-0.0.26.jar
inflating: /var/lib/mesos/slave/slaves/66df60ae-ce98-4ce6-968a-f99922382ef3-S0/frameworks/66df60ae-ce98-4ce6-968a-f99922382ef3-0001/executors/kubernetes.eb803f00-daac-11e7-af0a-f66634909198/runs/6f3605e0-9e27-4133-a2af-bbbe3706877b/kubernetes-scheduler/lib/metrics-statsd-common-4.2.0.jar
inflating: /var/lib/mesos/slave/slaves/66df60ae-ce98-4ce6-968a-f99922382ef3-S0/frameworks/66df60ae-ce98-4ce6-968a-f99922382ef3-0001/executors/kubernetes.eb803f00-daac-11e7-af0a-f66634909198/runs/6f3605e0-9e27-4133-a2af-bbbe3706877b/kubernetes-scheduler/lib/log4j-api-2.8.1.jar
inflating: /var/lib/mesos/slave/slaves/66df60ae-ce98-4ce6-968a-f99922382ef3-S0/frameworks/66df60ae-ce98-4ce6-968a-f99922382ef3-0001/executors/kubernetes.eb803f00-daac-11e7-af0a-f66634909198/runs/6f3605e0-9e27-4133-a2af-bbbe3706877b/kubernetes-scheduler/lib/log4j-1.2.16.jar
inflating: /var/lib/mesos/slave/slaves/66df60ae-ce98-4ce6-968a-f99922382ef3-S0/frameworks/66df60ae-ce98-4ce6-968a-f99922382ef3-0001/executors/kubernetes.eb803f00-daac-11e7-af0a-f66634909198/runs/6f3605e0-9e27-4133-a2af-bbbe3706877b/kubernetes-scheduler/lib/jline-0.9.94.jar
inflating: /var/lib/mesos/slave/slaves/66df60ae-ce98-4ce6-968a-f99922382ef3-S0/frameworks/66df60ae-ce98-4ce6-968a-f99922382ef3-0001/executors/kubernetes.eb803f00-daac-11e7-af0a-f66634909198/runs/6f3605e0-9e27-4133-a2af-bbbe3706877b/kubernetes-scheduler/lib/netty-3.7.0.Final.jar
inflating: /var/lib/mesos/slave/slaves/66df60ae-ce98-4ce6-968a-f99922382ef3-S0/frameworks/66df60ae-ce98-4ce6-968a-f99922382ef3-0001/executors/kubernetes.eb803f00-daac-11e7-af0a-f66634909198/runs/6f3605e0-9e27-4133-a2af-bbbe3706877b/kubernetes-scheduler/lib/javax.servlet-api-3.1.0.jar
inflating: /var/lib/mesos/slave/slaves/66df60ae-ce98-4ce6-968a-f99922382ef3-S0/frameworks/66df60ae-ce98-4ce6-968a-f99922382ef3-0001/executors/kubernetes.eb803f00-daac-11e7-af0a-f66634909198/runs/6f3605e0-9e27-4133-a2af-bbbe3706877b/kubernetes-scheduler/lib/javax.annotation-api-1.2.jar
inflating: /var/lib/mesos/slave/slaves/66df60ae-ce98-4ce6-968a-f99922382ef3-S0/frameworks/66df60ae-ce98-4ce6-968a-f99922382ef3-0001/executors/kubernetes.eb803f00-daac-11e7-af0a-f66634909198/runs/6f3605e0-9e27-4133-a2af-bbbe3706877b/kubernetes-scheduler/lib/jersey-guava-2.23.jar
inflating: /var/lib/mesos/slave/slaves/66df60ae-ce98-4ce6-968a-f99922382ef3-S0/frameworks/66df60ae-ce98-4ce6-968a-f99922382ef3-0001/executors/kubernetes.eb803f00-daac-11e7-af0a-f66634909198/runs/6f3605e0-9e27-4133-a2af-bbbe3706877b/kubernetes-scheduler/lib/hk2-api-2.4.0-b34.jar
inflating: /var/lib/mesos/slave/slaves/66df60ae-ce98-4ce6-968a-f99922382ef3-S0/frameworks/66df60ae-ce98-4ce6-968a-f99922382ef3-0001/executors/kubernetes.eb803f00-daac-11e7-af0a-f66634909198/runs/6f3605e0-9e27-4133-a2af-bbbe3706877b/kubernetes-scheduler/lib/hk2-locator-2.4.0-b34.jar
inflating: /var/lib/mesos/slave/slaves/66df60ae-ce98-4ce6-968a-f99922382ef3-S0/frameworks/66df60ae-ce98-4ce6-968a-f99922382ef3-0001/executors/kubernetes.eb803f00-daac-11e7-af0a-f66634909198/runs/6f3605e0-9e27-4133-a2af-bbbe3706877b/kubernetes-scheduler/lib/osgi-resource-locator-1.0.1.jar
inflating: /var/lib/mesos/slave/slaves/66df60ae-ce98-4ce6-968a-f99922382ef3-S0/frameworks/66df60ae-ce98-4ce6-968a-f99922382ef3-0001/executors/kubernetes.eb803f00-daac-11e7-af0a-f66634909198/runs/6f3605e0-9e27-4133-a2af-bbbe3706877b/kubernetes-scheduler/lib/jersey-client-2.23.jar
inflating: /var/lib/mesos/slave/slaves/66df60ae-ce98-4ce6-968a-f99922382ef3-S0/frameworks/66df60ae-ce98-4ce6-968a-f99922382ef3-0001/executors/kubernetes.eb803f00-daac-11e7-af0a-f66634909198/runs/6f3605e0-9e27-4133-a2af-bbbe3706877b/kubernetes-scheduler/lib/jersey-media-jaxb-2.23.jar
inflating: /var/lib/mesos/slave/slaves/66df60ae-ce98-4ce6-968a-f99922382ef3-S0/frameworks/66df60ae-ce98-4ce6-968a-f99922382ef3-0001/executors/kubernetes.eb803f00-daac-11e7-af0a-f66634909198/runs/6f3605e0-9e27-4133-a2af-bbbe3706877b/kubernetes-scheduler/lib/jackson-module-jaxb-annotations-2.5.4.jar
inflating: /var/lib/mesos/slave/slaves/66df60ae-ce98-4ce6-968a-f99922382ef3-S0/frameworks/66df60ae-ce98-4ce6-968a-f99922382ef3-0001/executors/kubernetes.eb803f00-daac-11e7-af0a-f66634909198/runs/6f3605e0-9e27-4133-a2af-bbbe3706877b/kubernetes-scheduler/lib/annotations-1.3.2.jar
inflating: /var/lib/mesos/slave/slaves/66df60ae-ce98-4ce6-968a-f99922382ef3-S0/frameworks/66df60ae-ce98-4ce6-968a-f99922382ef3-0001/executors/kubernetes.eb803f00-daac-11e7-af0a-f66634909198/runs/6f3605e0-9e27-4133-a2af-bbbe3706877b/kubernetes-scheduler/lib/joda-time-2.9.1.jar
inflating: /var/lib/mesos/slave/slaves/66df60ae-ce98-4ce6-968a-f99922382ef3-S0/frameworks/66df60ae-ce98-4ce6-968a-f99922382ef3-0001/executors/kubernetes.eb803f00-daac-11e7-af0a-f66634909198/runs/6f3605e0-9e27-4133-a2af-bbbe3706877b/kubernetes-scheduler/lib/hk2-utils-2.4.0-b34.jar
inflating: /var/lib/mesos/slave/slaves/66df60ae-ce98-4ce6-968a-f99922382ef3-S0/frameworks/66df60ae-ce98-4ce6-968a-f99922382ef3-0001/executors/kubernetes.eb803f00-daac-11e7-af0a-f66634909198/runs/6f3605e0-9e27-4133-a2af-bbbe3706877b/kubernetes-scheduler/lib/aopalliance-repackaged-2.4.0-b34.jar
inflating: /var/lib/mesos/slave/slaves/66df60ae-ce98-4ce6-968a-f99922382ef3-S0/frameworks/66df60ae-ce98-4ce6-968a-f99922382ef3-0001/executors/kubernetes.eb803f00-daac-11e7-af0a-f66634909198/runs/6f3605e0-9e27-4133-a2af-bbbe3706877b/kubernetes-scheduler/lib/javax.inject-1.jar
inflating: /var/lib/mesos/slave/slaves/66df60ae-ce98-4ce6-968a-f99922382ef3-S0/frameworks/66df60ae-ce98-4ce6-968a-f99922382ef3-0001/executors/kubernetes.eb803f00-daac-11e7-af0a-f66634909198/runs/6f3605e0-9e27-4133-a2af-bbbe3706877b/kubernetes-scheduler/lib/protobuf-java-3.3.1.jar
inflating: /var/lib/mesos/slave/slaves/66df60ae-ce98-4ce6-968a-f99922382ef3-S0/frameworks/66df60ae-ce98-4ce6-968a-f99922382ef3-0001/executors/kubernetes.eb803f00-daac-11e7-af0a-f66634909198/runs/6f3605e0-9e27-4133-a2af-bbbe3706877b/kubernetes-scheduler/lib/jackson-databind-2.6.6.jar
inflating: /var/lib/mesos/slave/slaves/66df60ae-ce98-4ce6-968a-f99922382ef3-S0/frameworks/66df60ae-ce98-4ce6-968a-f99922382ef3-0001/executors/kubernetes.eb803f00-daac-11e7-af0a-f66634909198/runs/6f3605e0-9e27-4133-a2af-bbbe3706877b/kubernetes-scheduler/lib/guava-18.0.jar
inflating: /var/lib/mesos/slave/slaves/66df60ae-ce98-4ce6-968a-f99922382ef3-S0/frameworks/66df60ae-ce98-4ce6-968a-f99922382ef3-0001/executors/kubernetes.eb803f00-daac-11e7-af0a-f66634909198/runs/6f3605e0-9e27-4133-a2af-bbbe3706877b/kubernetes-scheduler/lib/jcip-annotations-1.0.jar
inflating: /var/lib/mesos/slave/slaves/66df60ae-ce98-4ce6-968a-f99922382ef3-S0/frameworks/66df60ae-ce98-4ce6-968a-f99922382ef3-0001/executors/kubernetes.eb803f00-daac-11e7-af0a-f66634909198/runs/6f3605e0-9e27-4133-a2af-bbbe3706877b/kubernetes-scheduler/lib/jetty-server-9.2.3.v20140905.jar
inflating: /var/lib/mesos/slave/slaves/66df60ae-ce98-4ce6-968a-f99922382ef3-S0/frameworks/66df60ae-ce98-4ce6-968a-f99922382ef3-0001/executors/kubernetes.eb803f00-daac-11e7-af0a-f66634909198/runs/6f3605e0-9e27-4133-a2af-bbbe3706877b/kubernetes-scheduler/lib/jackson-core-2.6.6.jar
inflating: /var/lib/mesos/slave/slaves/66df60ae-ce98-4ce6-968a-f99922382ef3-S0/frameworks/66df60ae-ce98-4ce6-968a-f99922382ef3-0001/executors/kubernetes.eb803f00-daac-11e7-af0a-f66634909198/runs/6f3605e0-9e27-4133-a2af-bbbe3706877b/kubernetes-scheduler/lib/jsr305-3.0.1.jar
inflating: /var/lib/mesos/slave/slaves/66df60ae-ce98-4ce6-968a-f99922382ef3-S0/frameworks/66df60ae-ce98-4ce6-968a-f99922382ef3-0001/executors/kubernetes.eb803f00-daac-11e7-af0a-f66634909198/runs/6f3605e0-9e27-4133-a2af-bbbe3706877b/kubernetes-scheduler/lib/jetty-http-9.2.3.v20140905.jar
inflating: /var/lib/mesos/slave/slaves/66df60ae-ce98-4ce6-968a-f99922382ef3-S0/frameworks/66df60ae-ce98-4ce6-968a-f99922382ef3-0001/executors/kubernetes.eb803f00-daac-11e7-af0a-f66634909198/runs/6f3605e0-9e27-4133-a2af-bbbe3706877b/kubernetes-scheduler/lib/jetty-io-9.2.3.v20140905.jar
inflating: /var/lib/mesos/slave/slaves/66df60ae-ce98-4ce6-968a-f99922382ef3-S0/frameworks/66df60ae-ce98-4ce6-968a-f99922382ef3-0001/executors/kubernetes.eb803f00-daac-11e7-af0a-f66634909198/runs/6f3605e0-9e27-4133-a2af-bbbe3706877b/kubernetes-scheduler/lib/jetty-util-9.2.3.v20140905.jar
creating: /var/lib/mesos/slave/slaves/66df60ae-ce98-4ce6-968a-f99922382ef3-S0/frameworks/66df60ae-ce98-4ce6-968a-f99922382ef3-0001/executors/kubernetes.eb803f00-daac-11e7-af0a-f66634909198/runs/6f3605e0-9e27-4133-a2af-bbbe3706877b/kubernetes-scheduler/bin/
inflating: /var/lib/mesos/slave/slaves/66df60ae-ce98-4ce6-968a-f99922382ef3-S0/frameworks/66df60ae-ce98-4ce6-968a-f99922382ef3-0001/executors/kubernetes.eb803f00-daac-11e7-af0a-f66634909198/runs/6f3605e0-9e27-4133-a2af-bbbe3706877b/kubernetes-scheduler/bin/kubernetes
inflating: /var/lib/mesos/slave/slaves/66df60ae-ce98-4ce6-968a-f99922382ef3-S0/frameworks/66df60ae-ce98-4ce6-968a-f99922382ef3-0001/executors/kubernetes.eb803f00-daac-11e7-af0a-f66634909198/runs/6f3605e0-9e27-4133-a2af-bbbe3706877b/kubernetes-scheduler/bin/kubernetes.bat
Executing pre-exec command '{"arguments":["mesos-containerizer","mount","--help=false","--operation=make-rslave","--path=\/"],"shell":false,"value":"\/opt\/mesosphere\/active\/mesos\/libexec\/mesos\/mesos-containerizer"}'
Executing pre-exec command '{"shell":true,"value":"mount -n -t proc proc \/proc -o nosuid,noexec,nodev"}'
INFO 2017-12-06 17:43:45,542 [main] com.mesosphere.sdk.specification.yaml.RawServiceSpec$Builder:build(70): Rendered ServiceSpec from /var/lib/mesos/slave/slaves/66df60ae-ce98-4ce6-968a-f99922382ef3-S0/frameworks/66df60ae-ce98-4ce6-968a-f99922382ef3-0001/executors/kubernetes.eb803f00-daac-11e7-af0a-f66634909198/runs/6f3605e0-9e27-4133-a2af-bbbe3706877b/./kubernetes-scheduler/svc.yml:
Missing template values: [FRAMEWORK_NAME}
KUBERNETES_VERSION: {{KUBERNETES_VERSION@L398, CONFIG_TEMPLATE_PATH@L433, CONFIG_TEMPLATE_PATH@L461, TASK_NAME@L510, CONFIG_TEMPLATE_PATH@L525, TASK_NAME@L533, CONFIG_TEMPLATE_PATH@L546, TASK_NAME@L556, CONFIG_TEMPLATE_PATH@L569]
name: kubernetes
scheduler:
principal: kubernetes
pods:
etcd:
count: 3
allow-decommission: true
placement: hostname:UNIQUE
uris:
- https://storage.googleapis.com/etcd/v3.2.9/etcd-v3.2.9-linux-amd64.tar.gz
resource-sets:
etcd:
cpus: 0.5
memory: 1024
ports:
peer:
port: 2380
env-key: ETCD_LISTEN_PEER_PORT
vip:
prefix: etcd-peer
port: 2380
client:
port: 2379
env-key: ETCD_LISTEN_CLIENT_PORT
vip:
prefix: etcd
port: 2379
volumes:
data:
path: "data-dir"
type: ROOT
size: 3072
wal:
path: "wal-pv"
type: ROOT
size: 512
tasks:
peer:
goal: RUNNING
kill-grace-period: 30
resource-set: etcd
# WARNING do not disable v2 API below with --enable-v2=false
# otherwise the healthcheck will fail.
#
# TODO (@pires)
# add the line below after https://github.com/coreos/etcd/issues/8268 is fixed and released
#--peer-client-cert-auth
cmd: |
printf "\n\n ###### Starting etcd -- ${TASK_NAME} ###### \n"
# In a graceful shutdown, we remove the peer from the cluster
terminated () {
printf "Removing member etcd-$POD_INSTANCE_INDEX-peer \n"
DEAD_PEER_ID=$(
./etcd-v3.2.9-linux-amd64/etcdctl \
--cert-file etcd-crt.pem \
--key-file etcd-key.pem \
--ca-file ca-crt.pem \
--endpoints=https://etcd.kubernetes.l4lb.thisdcos.directory:$ETCD_LISTEN_CLIENT_PORT member list | awk "/etcd-${POD_INSTANCE_INDEX}-peer/ { gsub(\":\", \"\", \$1); print \$1 }" \
)
printf $DEAD_PEER_ID
./etcd-v3.2.9-linux-amd64/etcdctl \
--cert-file etcd-crt.pem \
--key-file etcd-key.pem \
--ca-file ca-crt.pem \
--endpoints https://etcd.kubernetes.l4lb.thisdcos.directory:$ETCD_LISTEN_CLIENT_PORT member remove $DEAD_PEER_ID
rm -rf data-dir/member
printf "Member etcd-$POD_INSTANCE_INDEX-peer removed!\n"
exit 0
}
trap terminated SIGTERM EXIT
echo "Trapping etcd SIGTERM and EXIT!"
./etcd-v3.2.9-linux-amd64/etcd \
--name=infra$POD_INSTANCE_INDEX \
--cert-file=etcd-crt.pem \
--key-file=etcd-key.pem \
--client-cert-auth \
--trusted-ca-file=ca-crt.pem \
--peer-cert-file=etcd-crt.pem \
--peer-key-file=etcd-key.pem \
--peer-trusted-ca-file=ca-crt.pem \
--listen-peer-urls=https://$MESOS_CONTAINER_IP:$ETCD_LISTEN_PEER_PORT \
--initial-advertise-peer-urls=https://etcd-$POD_INSTANCE_INDEX-peer.kubernetes.mesos:$ETCD_LISTEN_PEER_PORT \
--listen-client-urls=https://$MESOS_CONTAINER_IP:$ETCD_LISTEN_CLIENT_PORT \
--advertise-client-urls=https://etcd-$POD_INSTANCE_INDEX-peer.kubernetes.mesos:$ETCD_LISTEN_CLIENT_PORT \
--log-output=stdout \
--quota-backend-bytes=3221225472 \
--election-timeout=5000 \
--heartbeat-interval=250 \
--initial-cluster infra0=https://etcd-0-peer.kubernetes.mesos:$ETCD_LISTEN_PEER_PORT,infra1=https://etcd-1-peer.kubernetes.mesos:$ETCD_LISTEN_PEER_PORT,infra2=https://etcd-2-peer.kubernetes.mesos:$ETCD_LISTEN_PEER_PORT
env:
ETCD_VERSION: v3.2.9
ETCD_INITIAL_CLUSTER_TOKEN: kubernetes
ETCD_DATA_DIR: data-dir
ETCD_WAL_DIR: wal-pv/wal-dir
health-check:
cmd: >
HEALTHY_PEER=$(./etcd-v3.2.9-linux-amd64/etcdctl
--cert-file etcd-crt.pem
--key-file etcd-key.pem
--ca-file ca-crt.pem
--endpoints=https://etcd-$POD_INSTANCE_INDEX-peer.kubernetes.mesos:$ETCD_LISTEN_CLIENT_PORT
cluster-health | grep "etcd-${POD_INSTANCE_INDEX}-peer" | grep -c 'is healthy') && [ "$HEALTHY_PEER" -eq "1" ]
interval: 15
grace-period: 60
max-consecutive-failures: 4
delay: 0
timeout: 10
readiness-check:
cmd: >
HEALTHY_CLUSTER=$(./etcd-v3.2.9-linux-amd64/etcdctl
--cert-file etcd-crt.pem
--key-file etcd-key.pem
--ca-file ca-crt.pem
--endpoints=https://etcd-$POD_INSTANCE_INDEX-peer.kubernetes.mesos:$ETCD_LISTEN_CLIENT_PORT
cluster-health | grep '^cluster ' | grep -c 'cluster is healthy') && [ "$HEALTHY_CLUSTER" -eq "1" ]
interval: 30
delay: 0
timeout: 10
recover:
goal: FINISHED
# ----
# if this task is assigned the same resource-set as etcd-peer then the
# resource-set VIPs will point to it when it is RUNNING and DC/OS will
# assume etcd-peer is healthy when in fact it's not (no etcd is running,
# just the recovery task). Uncomment next line when KUB-124 is fixed:
# resource-set: etcd
# ----
# - determine dead peer from the new peer we're about to recover
# - remove dead peer from member list
# - add new peer to the member list
cmd: >
DEAD_PEER_ID=$(
./etcd-v3.2.9-linux-amd64/etcdctl \
--cert-file etcd-crt.pem \
--key-file etcd-key.pem \
--ca-file ca-crt.pem \
--endpoints=https://etcd.kubernetes.l4lb.thisdcos.directory:$ETCD_LISTEN_CLIENT_PORT member list | awk "/etcd-${POD_INSTANCE_INDEX}-peer/ { gsub(\":\", \"\", \$1); print \$1 }" \
)
./etcd-v3.2.9-linux-amd64/etcdctl
--cert-file etcd-crt.pem
--key-file etcd-key.pem
--ca-file ca-crt.pem
--endpoints https://etcd.kubernetes.l4lb.thisdcos.directory:$ETCD_LISTEN_CLIENT_PORT member remove $DEAD_PEER_ID
rm -rf data-dir/member
./etcd-v3.2.9-linux-amd64/etcdctl
--cert-file etcd-crt.pem
--key-file etcd-key.pem
--ca-file ca-crt.pem
--endpoints https://etcd.kubernetes.l4lb.thisdcos.directory:$ETCD_LISTEN_CLIENT_PORT member add infra$POD_INSTANCE_INDEX https://etcd-$POD_INSTANCE_INDEX-peer.kubernetes.mesos:$ETCD_LISTEN_PEER_PORT
cpus: 0.1
memory: 32
env:
ETCD_LISTEN_CLIENT_PORT: 2379
ETCD_LISTEN_PEER_PORT: 2380
ETCD_VERSION: v3.2.9
kube-apiserver:
count: 3
allow-decommission: true
placement: hostname:UNIQUE
uris:
- https://s3.eu-central-1.amazonaws.com/dcos-kubernetes-test/bootstrap.zip
- https://storage.googleapis.com/kubernetes-release/release/v1.7.11/bin/linux/amd64/kube-apiserver
tasks:
instance:
goal: RUNNING
env:
KUBERNETES_VERSION: v1.7.11
cmd: >
chmod +x kube-apiserver
printf "\n\n ###### Starting Kube API SERVER -- ${TASK_NAME} ###### \n"
./kube-apiserver
--etcd-servers=https://etcd-0-peer.kubernetes.mesos:2379,https://etcd-1-peer.kubernetes.mesos:2379,https://etcd-2-peer.kubernetes.mesos:2379
--etcd-cafile=ca-crt.pem
--etcd-certfile=kube-apiserver-crt.pem
--etcd-keyfile=kube-apiserver-key.pem
--etcd-prefix="/registry/cluster-0"
--etcd-quorum-read
--bind-address=$MESOS_CONTAINER_IP
--insecure-bind-address=$MESOS_CONTAINER_IP
--insecure-port=9000
--secure-port=6443
--apiserver-count=3
--allow-privileged
--service-cluster-ip-range=10.100.0.0/16
--authorization-mode=AlwaysAllow
--admission-control=NamespaceLifecycle,LimitRanger,ServiceAccount,PersistentVolumeLabel,DefaultStorageClass,ResourceQuota,DefaultTolerationSeconds
--runtime-config=batch/v2alpha1=true,admissionregistration.k8s.io/v1alpha1=true
--service-account-key-file=service-account-key.pem
--tls-ca-file=ca-crt.pem
--tls-cert-file=kube-apiserver-crt.pem
--tls-private-key-file=kube-apiserver-key.pem
--client-ca-file=ca-crt.pem
--target-ram-mb=1024 2>&1
cpus: 0.5
memory: 1024
ports:
apiserver:
port: 6443
env-key: KUBE_APISERVER_PORT
vip:
prefix: apiserver
port: 6443
apiserver_insecure:
port: 9000
env-key: KUBE_APISERVER_INSECURE_PORT
vip:
prefix: apiserver-insecure
port: 9000
health-check:
cmd: >
HTTP_CODE=$(/opt/mesosphere/bin/curl
--silent --output /dev/null --fail --write-out "%{http_code}"
--cert kube-apiserver-crt.pem
--key kube-apiserver-key.pem
--cacert ca-crt.pem
https://kube-apiserver-$POD_INSTANCE_INDEX-instance.kubernetes.mesos:6443/healthz)
&& [ "$HTTP_CODE" -eq "200" ]
interval: 15
grace-period: 30
max-consecutive-failures: 3
delay: 0
timeout: 10
kube-controller-manager:
count: 3
allow-decommission: true
placement: hostname:UNIQUE
uris:
- https://s3.eu-central-1.amazonaws.com/dcos-kubernetes-test/bootstrap.zip
- https://storage.googleapis.com/kubernetes-release/release/v1.7.11/bin/linux/amd64/kube-controller-manager
tasks:
instance:
goal: RUNNING
cmd: >
./bootstrap --resolve=false 2>&1
chmod +x kube-controller-manager
printf "\n\n ###### Starting Kube CONTROLLER MANAGER -- ${TASK_NAME} ###### \n"
./kube-controller-manager
--address=$MESOS_CONTAINER_IP
--port=10252
--kubeconfig=kubeconfig.yaml
--root-ca-file=ca-crt.pem
--service-account-private-key-file=service-account-key.pem
--leader-elect 2>&1
env:
USERNAME: kube-controller-manager
KUBERNETES_VERSION: v1.7.11
configs:
kubeconfig:
template: kubeconfig.template.yaml
dest: kubeconfig.yaml
cpus: 0.5
memory: 512
health-check:
cmd: >
HTTP_CODE=$(/opt/mesosphere/bin/curl
--silent --output /dev/null --fail --write-out "%{http_code}"
http://$MESOS_CONTAINER_IP:10252/healthz)
&& [ "$HTTP_CODE" -eq "200" ]
interval: 15
grace-period: 30
max-consecutive-failures: 3
delay: 0
timeout: 10
readiness-check:
cmd: >
HTTP_CODE=$(/opt/mesosphere/bin/curl
--silent --output /dev/null --fail --write-out "%{http_code}"
http://apiserver-insecure.kubernetes.l4lb.thisdcos.directory:9000)
&& [ "$HTTP_CODE" -eq "200" ]
interval: 5
delay: 0
timeout: 10
kube-scheduler:
count: 3
allow-decommission: true
placement: hostname:UNIQUE
uris:
- https://s3.eu-central-1.amazonaws.com/dcos-kubernetes-test/bootstrap.zip
- https://storage.googleapis.com/kubernetes-release/release/v1.7.11/bin/linux/amd64/kube-scheduler
tasks:
instance:
goal: RUNNING
cmd: >
./bootstrap --resolve=false 2>&1
chmod +x kube-scheduler
printf "\n\n ###### Starting Kube SCHEDULER -- ${TASK_NAME} ###### \n"
./kube-scheduler
--address=$MESOS_CONTAINER_IP
--kubeconfig=kubeconfig.yaml
--leader-elect
--kube-api-burst=120
--kube-api-qps=80 2>&1
env:
USERNAME: kube-scheduler
KUBERNETES_VERSION: v1.7.11
configs:
kubeconfig:
template: kubeconfig.template.yaml
dest: kubeconfig.yaml
cpus: 0.5
memory: 512
health-check:
cmd: >
HTTP_CODE=$(/opt/mesosphere/bin/curl
--silent --output /dev/null --fail --write-out "%{http_code}"
http://$MESOS_CONTAINER_IP:10251/healthz)
&& [ "$HTTP_CODE" -eq "200" ]
interval: 15
grace-period: 30
max-consecutive-failures: 3
delay: 0
timeout: 10
kube-node:
count: 1
allow-decommission: true
placement: hostname:UNIQUE
uris:
- https://s3.eu-central-1.amazonaws.com/dcos-kubernetes-test/bootstrap.zip
- https://download.docker.com/linux/static/stable/x86_64/docker-17.09.0-ce.tgz
- https://downloads.mesosphere.com/kubernetes/socat/socat.d-1.7.3.2-2.fc26.tar.gz
- https://storage.googleapis.com/kubernetes-release/release/v1.7.11/bin/linux/amd64/kube-proxy
- https://storage.googleapis.com/kubernetes-release/release/v1.7.11/bin/linux/amd64/kubelet
- https://storage.googleapis.com/kubernetes-release/release/v1.7.11/bin/linux/amd64/kubectl
- https://infinity-artifacts.s3.amazonaws.com/autodelete7d/kubernetes/20171206-172217-Z5QGImH7dVzO3lwz/resource-container.gz
resource-sets:
kube-node-kubelet:
cpus: 3.0
memory: 4100
volumes:
var:
path: "var"
type: ROOT
size: 1024
tasks:
kube-proxy:
goal: RUNNING
cmd: >
./bootstrap --resolve=false 2>&1
chmod +x kube-proxy
printf "\n\n ###### Starting Kube PROXY -- ${TASK_NAME} ###### \n"
./kube-proxy
--hostname-override=kube-node-$POD_INSTANCE_INDEX-kube-proxy.kubernetes.mesos
--bind-address=127.0.0.1
--kubeconfig=kubeconfig.yaml
--resource-container=""
--healthz-port=0 2>&1
env:
PATH: "/usr/local/sbin:/usr/local/bin:/usr/sbin:/usr/bin:/sbin:/bin:/opt/bin:/opt/mesosphere/bin/dcos-path"
USERNAME: kube-node
KUBERNETES_VERSION: v1.7.11
configs:
kubeconfig:
template: kubeconfig.template.yaml
dest: kubeconfig.yaml
cpus: 0.1
memory: 512
kubelet:
goal: RUNNING
kill-grace-period: 5
resource-set: kube-node-kubelet
cmd: |
./bootstrap --resolve=false 2>&1
chmod +x kubelet-wrapper.sh
printf "\n\n ###### Starting Kubelet -- ${TASK_NAME} ###### \n"
./kubelet-wrapper.sh 2>&1
env:
PATH: "/usr/local/sbin:/usr/local/bin:/usr/sbin:/usr/bin:/sbin:/bin:/opt/bin:/opt/mesosphere/bin/dcos-path"
USERNAME: kube-node
FRAMEWORK_NAME:
PAUSE_DOCKER_IMAGE: "gcr.io/google_containers/pause-amd64:3.0"
KUBE_ALLOCATABLE_CPUS: 2
KUBE_ALLOCATABLE_MEM: 2048
KUBE_RESERVED_CPUS: 1
KUBE_RESERVED_MEM: 2052
KUBERNETES_NODE_DOCKER_VERSION: 17.09.0
configs:
kubeconfig:
template: kubeconfig.template.yaml
dest: kubeconfig.yaml
kubelet-wrapper.sh:
template: kubelet-wrapper.template.sh
dest: kubelet-wrapper.sh
health-check:
cmd: >
HTTP_CODE=$(/opt/mesosphere/bin/curl
--silent --output /dev/null --fail --write-out "%{http_code}"
http://$MESOS_CONTAINER_IP:10258/healthz)
&& [ "$HTTP_CODE" -eq "200" ]
interval: 15
grace-period: 30
max-consecutive-failures: 3
delay: 0
timeout: 10
decommission:
goal: FINISHED
cpus: 0.1
memory: 32
env:
USERNAME: kube-node
KUBERNETES_NODE_DOCKER_VERSION: 17.09.0
KUBERNETES_VERSION: v1.7.11
configs:
kubeconfig:
template: /kubeconfig.template.yaml
dest: kubeconfig.yaml
cmd: |
printf "Starting to decommission the node...\n"
./bootstrap --resolve=false 2>&1 ;
chmod +x kubectl ;
NODE_FOUND=$(./kubectl --kubeconfig=kubeconfig.yaml get node --ignore-not-found kube-node-$POD_INSTANCE_INDEX-kubelet.kubernetes.mesos | grep "kube-node-$POD_INSTANCE_INDEX-kubelet.kubernetes.mesos" | grep -c "Ready")
if [ "$IS_UPGRADE_PLAN" = "YES" ] && [ "$NODE_FOUND" -eq "1" ] ; then
printf "Starting to decommission the kube-node-$POD_INSTANCE_INDEX-kubelet.kubernetes.mesos -- ${TASK_NAME} \n" ;
./kubectl --kubeconfig=kubeconfig.yaml drain kube-node-$POD_INSTANCE_INDEX-kubelet.kubernetes.mesos ;
else
printf "Ignored the decommission process of kube-node-$POD_INSTANCE_INDEX-kubelet.kubernetes.mesos!" ;
fi
commission:
goal: FINISHED
cpus: 0.1
memory: 32
env:
USERNAME: kube-node
KUBERNETES_NODE_DOCKER_VERSION: 17.09.0
KUBERNETES_VERSION: v1.7.11
configs:
kubeconfig:
template: /kubeconfig.template.yaml
dest: kubeconfig.yaml
cmd: |
printf "Starting to commission the node...\n"
./bootstrap --resolve=false 2>&1 ;
chmod +x kubectl ;
NODE_FOUND=$(./kubectl --kubeconfig=kubeconfig.yaml get node --ignore-not-found kube-node-$POD_INSTANCE_INDEX-kubelet.kubernetes.mesos | grep -c "kube-node-$POD_INSTANCE_INDEX-kubelet.kubernetes.mesos")
if [ "$IS_UPGRADE_PLAN" = "YES" ] && [ "$NODE_FOUND" -eq "1" ] ; then
printf "Starting to commission the kube-node-$POD_INSTANCE_INDEX-kubelet.kubernetes.mesos -- ${TASK_NAME} \n" ;
./kubectl --kubeconfig=kubeconfig.yaml uncordon kube-node-$POD_INSTANCE_INDEX-kubelet.kubernetes.mesos ;
else
printf "Ignored the commission process of kube-node-$POD_INSTANCE_INDEX-kubelet.kubernetes.mesos!" ;
fi
recover:
goal: FINISHED
resource-set: kube-node-kubelet
cmd: |
./bootstrap --resolve=false 2>&1
chmod +x kubectl
./kubectl --kubeconfig=kubeconfig.yaml delete node kube-node-$POD_INSTANCE_INDEX-kubelet.kubernetes.mesos
env:
USERNAME: kube-node
KUBERNETES_VERSION: v1.7.11
configs:
kubeconfig:
template: kubeconfig.template.yaml
dest: kubeconfig.yaml
mandatory-addons:
count: 1
uris:
- https://s3.eu-central-1.amazonaws.com/dcos-kubernetes-test/bootstrap.zip
- https://storage.googleapis.com/kubernetes-release/release/v1.7.11/bin/linux/amd64/kubectl
resource-sets:
addons:
cpus: 0.1
memory: 32
tasks:
kube-dns:
goal: FINISHED
cmd: |
./bootstrap --resolve=false 2>&1
TASK_NAME= ADDON=kube-dns bash ./install_addon.sh
env:
KUBERNETES_VERSION: v1.7.11
KUBEDNS_DOCKER_IMAGE: gcr.io/google_containers/k8s-dns-kube-dns-amd64:1.14.7
KUBEDNS_DNSMASQ_DOCKER_IMAGE: gcr.io/google_containers/k8s-dns-dnsmasq-nanny-amd64:1.14.7
KUBEDNS_SIDECAR_DOCKER_IMAGE: gcr.io/google_containers/k8s-dns-sidecar-amd64:1.14.7
USERNAME: mandatory-addons
configs:
kubeconfig:
template: kubeconfig.template.yaml
dest: kubeconfig.yaml
kubedns:
template: kubedns.template.yaml
dest: kube-dns.yaml
install_addon.sh:
template: /install_addon.template.sh
dest: install_addon.sh
resource-set: addons
heapster:
goal: FINISHED
cmd: |
./bootstrap --resolve=false 2>&1
TASK_NAME= ADDON=heapster bash ./install_addon.sh
env:
KUBERNETES_VERSION: v1.7.11
HEAPSTER_DOCKER_IMAGE: gcr.io/google_containers/heapster-amd64:v1.4.3
USERNAME: mandatory-addons
configs:
kubeconfig:
template: kubeconfig.template.yaml
dest: kubeconfig.yaml
heapster:
template: heapster.template.yaml
dest: heapster.yaml
install_addon.sh:
template: /install_addon.template.sh
dest: install_addon.sh
cpus: 0.1
memory: 32
resource-set: addons
dashboard:
goal: FINISHED
cmd: |
./bootstrap --resolve=false 2>&1
TASK_NAME= ADDON=dashboard bash ./install_addon.sh
env:
KUBERNETES_VERSION: v1.7.11
DASHBOARD_DOCKER_IMAGE: gcr.io/google_containers/kubernetes-dashboard-amd64:v1.6.3
USERNAME: mandatory-addons
configs:
kubeconfig:
template: kubeconfig.template.yaml
dest: kubeconfig.yaml
dashboard:
template: dashboard.template.yaml
dest: dashboard.yaml
install_addon.sh:
template: /install_addon.template.sh
dest: install_addon.sh
resource-set: addons
plans:
update:
strategy: kubernetes-upgrade
phases:
etcd:
strategy: serial
pod: etcd
steps:
- default: [[peer]]
apiserver:
strategy: serial
pod: kube-apiserver
steps:
- default: [[instance]]
controller-manager:
strategy: serial
pod: kube-controller-manager
steps:
- default: [[instance]]
scheduler:
strategy: serial
pod: kube-scheduler
steps:
- default: [[instance]]
node:
strategy: serial
pod: kube-node
steps:
- default: [[decommission],[kube-proxy,kubelet],[commission]]
mandatory-addons:
strategy: serial
pod: mandatory-addons
steps:
- default: [[kube-dns],[heapster],[dashboard]]
deploy:
strategy: serial
phases:
etcd:
strategy: parallel
pod: etcd
steps:
- default: [[peer]]
apiserver:
strategy: parallel
pod: kube-apiserver
steps:
- default: [[instance]]
controller-manager:
strategy: parallel
pod: kube-controller-manager
steps:
- default: [[instance]]
scheduler:
strategy: parallel
pod: kube-scheduler
steps:
- default: [[instance]]
node:
strategy: parallel
pod: kube-node
steps:
- default: [[kube-proxy,kubelet]]
mandatory-addons:
strategy: serial
pod: mandatory-addons
steps:
- default: [[kube-dns],[heapster],[dashboard]]
replace:
strategy: serial
phases:
etcd:
strategy: serial
pod: etcd
steps:
- default: [[recover],[peer]]
kube-node:
strategy: serial
pod: kube-node
steps:
- default: [[recover],[kube-proxy,kubelet]]
INFO 2017-12-06 17:43:45,980 [main] org.hibernate.validator.internal.util.Version:<clinit>(30): HV000001: Hibernate Validator 5.3.2.Final
INFO 2017-12-06 17:43:47,244 [main] com.mesosphere.sdk.offer.evaluate.placement.MarathonConstraintParser:parseRow(122): Marathon-style row '[hostname, UNIQUE]' resulted in placement rule: 'MaxPerHostnameRule{max=1, task-filter=RegexMatcher{pattern='etcd-.*'}}'
INFO 2017-12-06 17:43:48,126 [main] com.mesosphere.sdk.offer.evaluate.placement.MarathonConstraintParser:parseRow(122): Marathon-style row '[hostname, UNIQUE]' resulted in placement rule: 'MaxPerHostnameRule{max=1, task-filter=RegexMatcher{pattern='kube-apiserver-.*'}}'
INFO 2017-12-06 17:43:48,536 [main] com.mesosphere.sdk.offer.evaluate.placement.MarathonConstraintParser:parseRow(122): Marathon-style row '[hostname, UNIQUE]' resulted in placement rule: 'MaxPerHostnameRule{max=1, task-filter=RegexMatcher{pattern='kube-controller-manager-.*'}}'
INFO 2017-12-06 17:43:48,939 [main] com.mesosphere.sdk.offer.evaluate.placement.MarathonConstraintParser:parseRow(122): Marathon-style row '[hostname, UNIQUE]' resulted in placement rule: 'MaxPerHostnameRule{max=1, task-filter=RegexMatcher{pattern='kube-scheduler-.*'}}'
INFO 2017-12-06 17:43:49,924 [main] com.mesosphere.sdk.offer.evaluate.placement.MarathonConstraintParser:parseRow(122): Marathon-style row '[hostname, UNIQUE]' resulted in placement rule: 'MaxPerHostnameRule{max=1, task-filter=RegexMatcher{pattern='kube-node-.*'}}'
INFO 2017-12-06 17:43:51,044 [main] org.apache.curator.framework.imps.CuratorFrameworkImpl:start(234): Starting
INFO 2017-12-06 17:43:51,128 [main] org.apache.zookeeper.ZooKeeper:logEnv(100): Client environment:zookeeper.version=3.4.6-1569965, built on 02/20/2014 09:09 GMT
INFO 2017-12-06 17:43:51,128 [main] org.apache.zookeeper.ZooKeeper:logEnv(100): Client environment:host.name=vm-kb5q.c.massive-bliss-781.internal
INFO 2017-12-06 17:43:51,129 [main] org.apache.zookeeper.ZooKeeper:logEnv(100): Client environment:java.version=1.8.0_144
INFO 2017-12-06 17:43:51,129 [main] org.apache.zookeeper.ZooKeeper:logEnv(100): Client environment:java.vendor=Oracle Corporation
INFO 2017-12-06 17:43:51,129 [main] org.apache.zookeeper.ZooKeeper:logEnv(100): Client environment:java.home=/var/lib/mesos/slave/slaves/66df60ae-ce98-4ce6-968a-f99922382ef3-S0/frameworks/66df60ae-ce98-4ce6-968a-f99922382ef3-0001/executors/kubernetes.eb803f00-daac-11e7-af0a-f66634909198/runs/6f3605e0-9e27-4133-a2af-bbbe3706877b/jre1.8.0_144
INFO 2017-12-06 17:43:51,129 [main] org.apache.zookeeper.ZooKeeper:logEnv(100): Client environment:java.class.path=/var/lib/mesos/slave/slaves/66df60ae-ce98-4ce6-968a-f99922382ef3-S0/frameworks/66df60ae-ce98-4ce6-968a-f99922382ef3-0001/executors/kubernetes.eb803f00-daac-11e7-af0a-f66634909198/runs/6f3605e0-9e27-4133-a2af-bbbe3706877b/kubernetes-scheduler/lib/kubernetes.jar:/var/lib/mesos/slave/slaves/66df60ae-ce98-4ce6-968a-f99922382ef3-S0/frameworks/66df60ae-ce98-4ce6-968a-f99922382ef3-0001/executors/kubernetes.eb803f00-daac-11e7-af0a-f66634909198/runs/6f3605e0-9e27-4133-a2af-bbbe3706877b/kubernetes-scheduler/lib/scheduler-master.jar:/var/lib/mesos/slave/slaves/66df60ae-ce98-4ce6-968a-f99922382ef3-S0/frameworks/66df60ae-ce98-4ce6-968a-f99922382ef3-0001/executors/kubernetes.eb803f00-daac-11e7-af0a-f66634909198/runs/6f3605e0-9e27-4133-a2af-bbbe3706877b/kubernetes-scheduler/lib/executor-master.jar:/var/lib/mesos/slave/slaves/66df60ae-ce98-4ce6-968a-f99922382ef3-S0/frameworks/66df60ae-ce98-4ce6-968a-f99922382ef3-0001/executors/kubernetes.eb803f00-daac-11e7-af0a-f66634909198/runs/6f3605e0-9e27-4133-a2af-bbbe3706877b/kubernetes-scheduler/lib/mesos-1.4.0-rc1.jar:/var/lib/mesos/slave/slaves/66df60ae-ce98-4ce6-968a-f99922382ef3-S0/frameworks/66df60ae-ce98-4ce6-968a-f99922382ef3-0001/executors/kubernetes.eb803f00-daac-11e7-af0a-f66634909198/runs/6f3605e0-9e27-4133-a2af-bbbe3706877b/kubernetes-scheduler/lib/common-master.jar:/var/lib/mesos/slave/slaves/66df60ae-ce98-4ce6-968a-f99922382ef3-S0/frameworks/66df60ae-ce98-4ce6-968a-f99922382ef3-0001/executors/kubernetes.eb803f00-daac-11e7-af0a-f66634909198/runs/6f3605e0-9e27-4133-a2af-bbbe3706877b/kubernetes-scheduler/lib/jackson-datatype-guava-2.6.3.jar:/var/lib/mesos/slave/slaves/66df60ae-ce98-4ce6-968a-f99922382ef3-S0/frameworks/66df60ae-ce98-4ce6-968a-f99922382ef3-0001/executors/kubernetes.eb803f00-daac-11e7-af0a-f66634909198/runs/6f3605e0-9e27-4133-a2af-bbbe3706877b/kubernetes-scheduler/lib/jackson-datatype-jdk8-2.6.3.jar:/var/lib/mesos/slave/slaves/66df60ae-ce98-4ce6-968a-f99922382ef3-S0/frameworks/66df60ae-ce98-4ce6-968a-f99922382ef3-0001/executors/kubernetes.eb803f00-daac-11e7-af0a-f66634909198/runs/6f3605e0-9e27-4133-a2af-bbbe3706877b/kubernetes-scheduler/lib/jackson-datatype-jsr310-2.6.3.jar:/var/lib/mesos/slave/slaves/66df60ae-ce98-4ce6-968a-f99922382ef3-S0/frameworks/66df60ae-ce98-4ce6-968a-f99922382ef3-0001/executors/kubernetes.eb803f00-daac-11e7-af0a-f66634909198/runs/6f3605e0-9e27-4133-a2af-bbbe3706877b/kubernetes-scheduler/lib/jackson-dataformat-yaml-2.6.3.jar:/var/lib/mesos/slave/slaves/66df60ae-ce98-4ce6-968a-f99922382ef3-S0/frameworks/66df60ae-ce98-4ce6-968a-f99922382ef3-0001/executors/kubernetes.eb803f00-daac-11e7-af0a-f66634909198/runs/6f3605e0-9e27-4133-a2af-bbbe3706877b/kubernetes-scheduler/lib/jackson-datatype-protobuf-0.9.3.jar:/var/lib/mesos/slave/slaves/66df60ae-ce98-4ce6-968a-f99922382ef3-S0/frameworks/66df60ae-ce98-4ce6-968a-f99922382ef3-0001/executors/kubernetes.eb803f00-daac-11e7-af0a-f66634909198/runs/6f3605e0-9e27-4133-a2af-bbbe3706877b/kubernetes-scheduler/lib/annotations-3.0.1u2.jar:/var/lib/mesos/slave/slaves/66df60ae-ce98-4ce6-968a-f99922382ef3-S0/frameworks/66df60ae-ce98-4ce6-968a-f99922382ef3-0001/executors/kubernetes.eb803f00-daac-11e7-af0a-f66634909198/runs/6f3605e0-9e27-4133-a2af-bbbe3706877b/kubernetes-scheduler/lib/commons-collections-3.2.2.jar:/var/lib/mesos/slave/slaves/66df60ae-ce98-4ce6-968a-f99922382ef3-S0/frameworks/66df60ae-ce98-4ce6-968a-f99922382ef3-0001/executors/kubernetes.eb803f00-daac-11e7-af0a-f66634909198/runs/6f3605e0-9e27-4133-a2af-bbbe3706877b/kubernetes-scheduler/lib/commons-io-2.4.jar:/var/lib/mesos/slave/slaves/66df60ae-ce98-4ce6-968a-f99922382ef3-S0/frameworks/66df60ae-ce98-4ce6-968a-f99922382ef3-0001/executors/kubernetes.eb803f00-daac-11e7-af0a-f66634909198/runs/6f3605e0-9e27-4133-a2af-bbbe3706877b/kubernetes-scheduler/lib/antlr4-runtime-4.5.1-1.jar:/var/lib/mesos/slave/slaves/66df60ae-ce98-4ce6-968a-f99922382ef3-S0/frameworks/66df60ae-ce98-4ce6-968a-f99922382ef3-0001/executors/kubernetes.eb803f00-daac-11e7-af0a-f66634909198/runs/6f3605e0-9e27-4133-a2af-bbbe3706877b/kubernetes-scheduler/lib/javax.ws.rs-api-2.0.1.jar:/var/lib/mesos/slave/slaves/66df60ae-ce98-4ce6-968a-f99922382ef3-S0/frameworks/66df60ae-ce98-4ce6-968a-f99922382ef3-0001/executors/kubernetes.eb803f00-daac-11e7-af0a-f66634909198/runs/6f3605e0-9e27-4133-a2af-bbbe3706877b/kubernetes-scheduler/lib/curator-framework-2.9.1.jar:/var/lib/mesos/slave/slaves/66df60ae-ce98-4ce6-968a-f99922382ef3-S0/frameworks/66df60ae-ce98-4ce6-968a-f99922382ef3-0001/executors/kubernetes.eb803f00-daac-11e7-af0a-f66634909198/runs/6f3605e0-9e27-4133-a2af-bbbe3706877b/kubernetes-scheduler/lib/curator-recipes-2.9.1.jar:/var/lib/mesos/slave/slaves/66df60ae-ce98-4ce6-968a-f99922382ef3-S0/frameworks/66df60ae-ce98-4ce6-968a-f99922382ef3-0001/executors/kubernetes.eb803f00-daac-11e7-af0a-f66634909198/runs/6f3605e0-9e27-4133-a2af-bbbe3706877b/kubernetes-scheduler/lib/curator-test-2.9.1.jar:/var/lib/mesos/slave/slaves/66df60ae-ce98-4ce6-968a-f99922382ef3-S0/frameworks/66df60ae-ce98-4ce6-968a-f99922382ef3-0001/executors/kubernetes.eb803f00-daac-11e7-af0a-f66634909198/runs/6f3605e0-9e27-4133-a2af-bbbe3706877b/kubernetes-scheduler/lib/httpclient-4.5.2.jar:/var/lib/mesos/slave/slaves/66df60ae-ce98-4ce6-968a-f99922382ef3-S0/frameworks/66df60ae-ce98-4ce6-968a-f99922382ef3-0001/executors/kubernetes.eb803f00-daac-11e7-af0a-f66634909198/runs/6f3605e0-9e27-4133-a2af-bbbe3706877b/kubernetes-scheduler/lib/fluent-hc-4.5.2.jar:/var/lib/mesos/slave/slaves/66df60ae-ce98-4ce6-968a-f99922382ef3-S0/frameworks/66df60ae-ce98-4ce6-968a-f99922382ef3-0001/executors/kubernetes.eb803f00-daac-11e7-af0a-f66634909198/runs/6f3605e0-9e27-4133-a2af-bbbe3706877b/kubernetes-scheduler/lib/commons-lang3-3.4.jar:/var/lib/mesos/slave/slaves/66df60ae-ce98-4ce6-968a-f99922382ef3-S0/frameworks/66df60ae-ce98-4ce6-968a-f99922382ef3-0001/executors/kubernetes.eb803f00-daac-11e7-af0a-f66634909198/runs/6f3605e0-9e27-4133-a2af-bbbe3706877b/kubernetes-scheduler/lib/protobuf-java-format-1.4.jar:/var/lib/mesos/slave/slaves/66df60ae-ce98-4ce6-968a-f99922382ef3-S0/frameworks/66df60ae-ce98-4ce6-968a-f99922382ef3-0001/executors/kubernetes.eb803f00-daac-11e7-af0a-f66634909198/runs/6f3605e0-9e27-4133-a2af-bbbe3706877b/kubernetes-scheduler/lib/json-20160212.jar:/var/lib/mesos/slave/slaves/66df60ae-ce98-4ce6-968a-f99922382ef3-S0/frameworks/66df60ae-ce98-4ce6-968a-f99922382ef3-0001/executors/kubernetes.eb803f00-daac-11e7-af0a-f66634909198/runs/6f3605e0-9e27-4133-a2af-bbbe3706877b/kubernetes-scheduler/lib/diffutils-1.3.0.jar:/var/lib/mesos/slave/slaves/66df60ae-ce98-4ce6-968a-f99922382ef3-S0/frameworks/66df60ae-ce98-4ce6-968a-f99922382ef3-0001/executors/kubernetes.eb803f00-daac-11e7-af0a-f66634909198/runs/6f3605e0-9e27-4133-a2af-bbbe3706877b/kubernetes-scheduler/lib/jersey-container-jetty-http-2.23.jar:/var/lib/mesos/slave/slaves/66df60ae-ce98-4ce6-968a-f99922382ef3-S0/frameworks/66df60ae-ce98-4ce6-968a-f99922382ef3-0001/executors/kubernetes.eb803f00-daac-11e7-af0a-f66634909198/runs/6f3605e0-9e27-4133-a2af-bbbe3706877b/kubernetes-scheduler/lib/jersey-container-servlet-core-2.23.jar:/var/lib/mesos/slave/slaves/66df60ae-ce98-4ce6-968a-f99922382ef3-S0/frameworks/66df60ae-ce98-4ce6-968a-f99922382ef3-0001/executors/kubernetes.eb803f00-daac-11e7-af0a-f66634909198/runs/6f3605e0-9e27-4133-a2af-bbbe3706877b/kubernetes-scheduler/lib/jersey-media-json-jackson-2.23.jar:/var/lib/mesos/slave/slaves/66df60ae-ce98-4ce6-968a-f99922382ef3-S0/frameworks/66df60ae-ce98-4ce6-968a-f99922382ef3-0001/executors/kubernetes.eb803f00-daac-11e7-af0a-f66634909198/runs/6f3605e0-9e27-4133-a2af-bbbe3706877b/kubernetes-scheduler/lib/jersey-media-multipart-2.23.jar:/var/lib/mesos/slave/slaves/66df60ae-ce98-4ce6-968a-f99922382ef3-S0/frameworks/66df60ae-ce98-4ce6-968a-f99922382ef3-0001/executors/kubernetes.eb803f00-daac-11e7-af0a-f66634909198/runs/6f3605e0-9e27-4133-a2af-bbbe3706877b/kubernetes-scheduler/lib/mimepull-1.9.6.jar:/var/lib/mesos/slave/slaves/66df60ae-ce98-4ce6-968a-f99922382ef3-S0/frameworks/66df60ae-ce98-4ce6-968a-f99922382ef3-0001/executors/kubernetes.eb803f00-daac-11e7-af0a-f66634909198/runs/6f3605e0-9e27-4133-a2af-bbbe3706877b/kubernetes-scheduler/lib/jetty-servlet-9.2.3.v20140905.jar:/var/lib/mesos/slave/slaves/66df60ae-ce98-4ce6-968a-f99922382ef3-S0/frameworks/66df60ae-ce98-4ce6-968a-f99922382ef3-0001/executors/kubernetes.eb803f00-daac-11e7-af0a-f66634909198/runs/6f3605e0-9e27-4133-a2af-bbbe3706877b/kubernetes-scheduler/lib/hibernate-validator-5.3.2.Final.jar:/var/lib/mesos/slave/slaves/66df60ae-ce98-4ce6-968a-f99922382ef3-S0/frameworks/66df60ae-ce98-4ce6-968a-f99922382ef3-0001/executors/kubernetes.eb803f00-daac-11e7-af0a-f66634909198/runs/6f3605e0-9e27-4133-a2af-bbbe3706877b/kubernetes-scheduler/lib/javax.el-api-2.2.4.jar:/var/lib/mesos/slave/slaves/66df60ae-ce98-4ce6-968a-f99922382ef3-S0/frameworks/66df60ae-ce98-4ce6-968a-f99922382ef3-0001/executors/kubernetes.eb803f00-daac-11e7-af0a-f66634909198/runs/6f3605e0-9e27-4133-a2af-bbbe3706877b/kubernetes-scheduler/lib/javax.el-2.2.4.jar:/var/lib/mesos/slave/slaves/66df60ae-ce98-4ce6-968a-f99922382ef3-S0/frameworks/66df60ae-ce98-4ce6-968a-f99922382ef3-0001/executors/kubernetes.eb803f00-daac-11e7-af0a-f66634909198/runs/6f3605e0-9e27-4133-a2af-bbbe3706877b/kubernetes-scheduler/lib/mesos-http-adapter-0.4.1.jar:/var/lib/mesos/slave/slaves/66df60ae-ce98-4ce6-968a-f99922382ef3-S0/frameworks/66df60ae-ce98-4ce6-968a-f99922382ef3-0001/executors/kubernetes.eb803f00-daac-11e7-af0a-f66634909198/runs/6f3605e0-9e27-4133-a2af-bbbe3706877b/kubernetes-scheduler/lib/java-jwt-3.2.0.jar:/var/lib/mesos/slave/slaves/66df60ae-ce98-4ce6-968a-f99922382ef3-S0/frameworks/66df60ae-ce98-4ce6-968a-f99922382ef3-0001/executors/kubernetes.eb803f00-daac-11e7-af0a-f66634909198/runs/6f3605e0-9e27-4133-a2af-bbbe3706877b/kubernetes-scheduler/lib/bcprov-jdk15on-1.57.jar:/var/lib/mesos/slave/slaves/66df60ae-ce98-4ce6-968a-f99922382ef3-S0/frameworks/66df60ae-ce98-4ce6-968a-f99922382ef3-0001/executors/kubernetes.eb803f00-daac-11e7-af0a-f66634909198/runs/6f3605e0-9e27-4133-a2af-bbbe3706877b/kubernetes-scheduler/lib/bcpkix-jdk15on-1.57.jar:/var/lib/mesos/slave/slaves/66df60ae-ce98-4ce6-968a-f99922382ef3-S0/frameworks/66df60ae-ce98-4ce6-968a-f99922382ef3-0001/executors/kubernetes.eb803f00-daac-11e7-af0a-f66634909198/runs/6f3605e0-9e27-4133-a2af-bbbe3706877b/kubernetes-scheduler/lib/metrics-core-3.2.5.jar:/var/lib/mesos/slave/slaves/66df60ae-ce98-4ce6-968a-f99922382ef3-S0/frameworks/66df60ae-ce98-4ce6-968a-f99922382ef3-0001/executors/kubernetes.eb803f00-daac-11e7-af0a-f66634909198/runs/6f3605e0-9e27-4133-a2af-bbbe3706877b/kubernetes-scheduler/lib/metrics-servlet-3.2.5.jar:/var/lib/mesos/slave/slaves/66df60ae-ce98-4ce6-968a-f99922382ef3-S0/frameworks/66df60ae-ce98-4ce6-968a-f99922382ef3-0001/executors/kubernetes.eb803f00-daac-11e7-af0a-f66634909198/runs/6f3605e0-9e27-4133-a2af-bbbe3706877b/kubernetes-scheduler/lib/metrics-servlets-3.2.5.jar:/var/lib/mesos/slave/slaves/66df60ae-ce98-4ce6-968a-f99922382ef3-S0/frameworks/66df60ae-ce98-4ce6-968a-f99922382ef3-0001/executors/kubernetes.eb803f00-daac-11e7-af0a-f66634909198/runs/6f3605e0-9e27-4133-a2af-bbbe3706877b/kubernetes-scheduler/lib/simpleclient_dropwizard-0.0.26.jar:/var/lib/mesos/slave/slaves/66df60ae-ce98-4ce6-968a-f99922382ef3-S0/frameworks/66df60ae-ce98-4ce6-968a-f99922382ef3-0001/executors/kubernetes.eb803f00-daac-11e7-af0a-f66634909198/runs/6f3605e0-9e27-4133-a2af-bbbe3706877b/kubernetes-scheduler/lib/simpleclient_servlet-0.0.26.jar:/var/lib/mesos/slave/slaves/66df60ae-ce98-4ce6-968a-f99922382ef3-S0/frameworks/66df60ae-ce98-4ce6-968a-f99922382ef3-0001/executors/kubernetes.eb803f00-daac-11e7-af0a-f66634909198/runs/6f3605e0-9e27-4133-a2af-bbbe3706877b/kubernetes-scheduler/lib/metrics3-statsd-4.2.0.jar:/var/lib/mesos/slave/slaves/66df60ae-ce98-4ce6-968a-f99922382ef3-S0/frameworks/66df60ae-ce98-4ce6-968a-f99922382ef3-0001/executors/kubernetes.eb803f00-daac-11e7-af0a-f66634909198/runs/6f3605e0-9e27-4133-a2af-bbbe3706877b/kubernetes-scheduler/lib/slf4j-api-1.7.25.jar:/var/lib/mesos/slave/slaves/66df60ae-ce98-4ce6-968a-f99922382ef3-S0/frameworks/66df60ae-ce98-4ce6-968a-f99922382ef3-0001/executors/kubernetes.eb803f00-daac-11e7-af0a-f66634909198/runs/6f3605e0-9e27-4133-a2af-bbbe3706877b/kubernetes-scheduler/lib/log4j-core-2.8.1.jar:/var/lib/mesos/slave/slaves/66df60ae-ce98-4ce6-968a-f99922382ef3-S0/frameworks/66df60ae-ce98-4ce6-968a-f99922382ef3-0001/executors/kubernetes.eb803f00-daac-11e7-af0a-f66634909198/runs/6f3605e0-9e27-4133-a2af-bbbe3706877b/kubernetes-scheduler/lib/log4j-slf4j-impl-2.8.1.jar:/var/lib/mesos/slave/slaves/66df60ae-ce98-4ce6-968a-f99922382ef3-S0/frameworks/66df60ae-ce98-4ce6-968a-f99922382ef3-0001/executors/kubernetes.eb803f00-daac-11e7-af0a-f66634909198/runs/6f3605e0-9e27-4133-a2af-bbbe3706877b/kubernetes-scheduler/lib/compiler-0.9.2.jar:/var/lib/mesos/slave/slaves/66df60ae-ce98-4ce6-968a-f99922382ef3-S0/frameworks/66df60ae-ce98-4ce6-968a-f99922382ef3-0001/executors/kubernetes.eb803f00-daac-11e7-af0a-f66634909198/runs/6f3605e0-9e27-4133-a2af-bbbe3706877b/kubernetes-scheduler/lib/commons-codec-1.10.jar:/var/lib/mesos/slave/slaves/66df60ae-ce98-4ce6-968a-f99922382ef3-S0/frameworks/66df60ae-ce98-4ce6-968a-f99922382ef3-0001/executors/kubernetes.eb803f00-daac-11e7-af0a-f66634909198/runs/6f3605e0-9e27-4133-a2af-bbbe3706877b/kubernetes-scheduler/lib/snakeyaml-1.15.jar:/var/lib/mesos/slave/slaves/66df60ae-ce98-4ce6-968a-f99922382ef3-S0/frameworks/66df60ae-ce98-4ce6-968a-f99922382ef3-0001/executors/kubernetes.eb803f00-daac-11e7-af0a-f66634909198/runs/6f3605e0-9e27-4133-a2af-bbbe3706877b/kubernetes-scheduler/lib/jackson-annotations-2.6.0.jar:/var/lib/mesos/slave/slaves/66df60ae-ce98-4ce6-968a-f99922382ef3-S0/frameworks/66df60ae-ce98-4ce6-968a-f99922382ef3-0001/executors/kubernetes.eb803f00-daac-11e7-af0a-f66634909198/runs/6f3605e0-9e27-4133-a2af-bbbe3706877b/kubernetes-scheduler/lib/curator-client-2.9.1.jar:/var/lib/mesos/slave/slaves/66df60ae-ce98-4ce6-968a-f99922382ef3-S0/frameworks/66df60ae-ce98-4ce6-968a-f99922382ef3-0001/executors/kubernetes.eb803f00-daac-11e7-af0a-f66634909198/runs/6f3605e0-9e27-4133-a2af-bbbe3706877b/kubernetes-scheduler/lib/zookeeper-3.4.6.jar:/var/lib/mesos/slave/slaves/66df60ae-ce98-4ce6-968a-f99922382ef3-S0/frameworks/66df60ae-ce98-4ce6-968a-f99922382ef3-0001/executors/kubernetes.eb803f00-daac-11e7-af0a-f66634909198/runs/6f3605e0-9e27-4133-a2af-bbbe3706877b/kubernetes-scheduler/lib/javassist-3.18.1-GA.jar:/var/lib/mesos/slave/slaves/66df60ae-ce98-4ce6-968a-f99922382ef3-S0/frameworks/66df60ae-ce98-4ce6-968a-f99922382ef3-0001/executors/kubernetes.eb803f00-daac-11e7-af0a-f66634909198/runs/6f3605e0-9e27-4133-a2af-bbbe3706877b/kubernetes-scheduler/lib/commons-math-2.2.jar:/var/lib/mesos/slave/slaves/66df60ae-ce98-4ce6-968a-f99922382ef3-S0/frameworks/66df60ae-ce98-4ce6-968a-f99922382ef3-0001/executors/kubernetes.eb803f00-daac-11e7-af0a-f66634909198/runs/6f3605e0-9e27-4133-a2af-bbbe3706877b/kubernetes-scheduler/lib/httpcore-4.4.4.jar:/var/lib/mesos/slave/slaves/66df60ae-ce98-4ce6-968a-f99922382ef3-S0/frameworks/66df60ae-ce98-4ce6-968a-f99922382ef3-0001/executors/kubernetes.eb803f00-daac-11e7-af0a-f66634909198/runs/6f3605e0-9e27-4133-a2af-bbbe3706877b/kubernetes-scheduler/lib/commons-logging-1.2.jar:/var/lib/mesos/slave/slaves/66df60ae-ce98-4ce6-968a-f99922382ef3-S0/frameworks/66df60ae-ce98-4ce6-968a-f99922382ef3-0001/executors/kubernetes.eb803f00-daac-11e7-af0a-f66634909198/runs/6f3605e0-9e27-4133-a2af-bbbe3706877b/kubernetes-scheduler/lib/javax.inject-2.4.0-b34.jar:/var/lib/mesos/slave/slaves/66df60ae-ce98-4ce6-968a-f99922382ef3-S0/frameworks/66df60ae-ce98-4ce6-968a-f99922382ef3-0001/executors/kubernetes.eb803f00-daac-11e7-af0a-f66634909198/runs/6f3605e0-9e27-4133-a2af-bbbe3706877b/kubernetes-scheduler/lib/jetty-continuation-9.1.1.v20140108.jar:/var/lib/mesos/slave/slaves/66df60ae-ce98-4ce6-968a-f99922382ef3-S0/frameworks/66df60ae-ce98-4ce6-968a-f99922382ef3-0001/executors/kubernetes.eb803f00-daac-11e7-af0a-f66634909198/runs/6f3605e0-9e27-4133-a2af-bbbe3706877b/kubernetes-scheduler/lib/jersey-common-2.23.jar:/var/lib/mesos/slave/slaves/66df60ae-ce98-4ce6-968a-f99922382ef3-S0/frameworks/66df60ae-ce98-4ce6-968a-f99922382ef3-0001/executors/kubernetes.eb803f00-daac-11e7-af0a-f66634909198/runs/6f3605e0-9e27-4133-a2af-bbbe3706877b/kubernetes-scheduler/lib/jersey-server-2.23.jar:/var/lib/mesos/slave/slaves/66df60ae-ce98-4ce6-968a-f99922382ef3-S0/frameworks/66df60ae-ce98-4ce6-968a-f99922382ef3-0001/executors/kubernetes.eb803f00-daac-11e7-af0a-f66634909198/runs/6f3605e0-9e27-4133-a2af-bbbe3706877b/kubernetes-scheduler/lib/jersey-entity-filtering-2.23.jar:/var/lib/mesos/slave/slaves/66df60ae-ce98-4ce6-968a-f99922382ef3-S0/frameworks/66df60ae-ce98-4ce6-968a-f99922382ef3-0001/executors/kubernetes.eb803f00-daac-11e7-af0a-f66634909198/runs/6f3605e0-9e27-4133-a2af-bbbe3706877b/kubernetes-scheduler/lib/jackson-jaxrs-base-2.5.4.jar:/var/lib/mesos/slave/slaves/66df60ae-ce98-4ce6-968a-f99922382ef3-S0/frameworks/66df60ae-ce98-4ce6-968a-f99922382ef3-0001/executors/kubernetes.eb803f00-daac-11e7-af0a-f66634909198/runs/6f3605e0-9e27-4133-a2af-bbbe3706877b/kubernetes-scheduler/lib/jackson-jaxrs-json-provider-2.5.4.jar:/var/lib/mesos/slave/slaves/66df60ae-ce98-4ce6-968a-f99922382ef3-S0/frameworks/66df60ae-ce98-4ce6-968a-f99922382ef3-0001/executors/kubernetes.eb803f00-daac-11e7-af0a-f66634909198/runs/6f3605e0-9e27-4133-a2af-bbbe3706877b/kubernetes-scheduler/lib/jetty-security-9.2.3.v20140905.jar:/var/lib/mesos/slave/slaves/66df60ae-ce98-4ce6-968a-f99922382ef3-S0/frameworks/66df60ae-ce98-4ce6-968a-f99922382ef3-0001/executors/kubernetes.eb803f00-daac-11e7-af0a-f66634909198/runs/6f3605e0-9e27-4133-a2af-bbbe3706877b/kubernetes-scheduler/lib/validation-api-1.1.0.Final.jar:/var/lib/mesos/slave/slaves/66df60ae-ce98-4ce6-968a-f99922382ef3-S0/frameworks/66df60ae-ce98-4ce6-968a-f99922382ef3-0001/executors/kubernetes.eb803f00-daac-11e7-af0a-f66634909198/runs/6f3605e0-9e27-4133-a2af-bbbe3706877b/kubernetes-scheduler/lib/jboss-logging-3.3.0.Final.jar:/var/lib/mesos/slave/slaves/66df60ae-ce98-4ce6-968a-f99922382ef3-S0/frameworks/66df60ae-ce98-4ce6-968a-f99922382ef3-0001/executors/kubernetes.eb803f00-daac-11e7-af0a-f66634909198/runs/6f3605e0-9e27-4133-a2af-bbbe3706877b/kubernetes-scheduler/lib/classmate-1.3.1.jar:/var/lib/mesos/slave/slaves/66df60ae-ce98-4ce6-968a-f99922382ef3-S0/frameworks/66df60ae-ce98-4ce6-968a-f99922382ef3-0001/executors/kubernetes.eb803f00-daac-11e7-af0a-f66634909198/runs/6f3605e0-9e27-4133-a2af-bbbe3706877b/kubernetes-scheduler/lib/edu-umd-cs-findbugs-annotations-1.3.2-201002241900.nbm:/var/lib/mesos/slave/slaves/66df60ae-ce98-4ce6-968a-f99922382ef3-S0/frameworks/66df60ae-ce98-4ce6-968a-f99922382ef3-0001/executors/kubernetes.eb803f00-daac-11e7-af0a-f66634909198/runs/6f3605e0-9e27-4133-a2af-bbbe3706877b/kubernetes-scheduler/lib/log4j-over-slf4j-1.7.10.jar:/var/lib/mesos/slave/slaves/66df60ae-ce98-4ce6-968a-f99922382ef3-S0/frameworks/66df60ae-ce98-4ce6-968a-f99922382ef3-0001/executors/kubernetes.eb803f00-daac-11e7-af0a-f66634909198/runs/6f3605e0-9e27-4133-a2af-bbbe3706877b/kubernetes-scheduler/lib/jcl-over-slf4j-1.7.10.jar:/var/lib/mesos/slave/slaves/66df60ae-ce98-4ce6-968a-f99922382ef3-S0/frameworks/66df60ae-ce98-4ce6-968a-f99922382ef3-0001/executors/kubernetes.eb803f00-daac-11e7-af0a-f66634909198/runs/6f3605e0-9e27-4133-a2af-bbbe3706877b/kubernetes-scheduler/lib/google-http-client-1.20.0.jar:/var/lib/mesos/slave/slaves/66df60ae-ce98-4ce6-968a-f99922382ef3-S0/frameworks/66df60ae-ce98-4ce6-968a-f99922382ef3-0001/executors/kubernetes.eb803f00-daac-11e7-af0a-f66634909198/runs/6f3605e0-9e27-4133-a2af-bbbe3706877b/kubernetes-scheduler/lib/metrics-healthchecks-3.2.5.jar:/var/lib/mesos/slave/slaves/66df60ae-ce98-4ce6-968a-f99922382ef3-S0/frameworks/66df60ae-ce98-4ce6-968a-f99922382ef3-0001/executors/kubernetes.eb803f00-daac-11e7-af0a-f66634909198/runs/6f3605e0-9e27-4133-a2af-bbbe3706877b/kubernetes-scheduler/lib/metrics-json-3.2.5.jar:/var/lib/mesos/slave/slaves/66df60ae-ce98-4ce6-968a-f99922382ef3-S0/frameworks/66df60ae-ce98-4ce6-968a-f99922382ef3-0001/executors/kubernetes.eb803f00-daac-11e7-af0a-f66634909198/runs/6f3605e0-9e27-4133-a2af-bbbe3706877b/kubernetes-scheduler/lib/metrics-jvm-3.2.5.jar:/var/lib/mesos/slave/slaves/66df60ae-ce98-4ce6-968a-f99922382ef3-S0/frameworks/66df60ae-ce98-4ce6-968a-f99922382ef3-0001/executors/kubernetes.eb803f00-daac-11e7-af0a-f66634909198/runs/6f3605e0-9e27-4133-a2af-bbbe3706877b/kubernetes-scheduler/lib/profiler-1.0.2.jar:/var/lib/mesos/slave/slaves/66df60ae-ce98-4ce6-968a-f99922382ef3-S0/frameworks/66df60ae-ce98-4ce6-968a-f99922382ef3-0001/executors/kubernetes.eb803f00-daac-11e7-af0a-f66634909198/runs/6f3605e0-9e27-4133-a2af-bbbe3706877b/kubernetes-scheduler/lib/simpleclient-0.0.26.jar:/var/lib/mesos/slave/slaves/66df60ae-ce98-4ce6-968a-f99922382ef3-S0/frameworks/66df60ae-ce98-4ce6-968a-f99922382ef3-0001/executors/kubernetes.eb803f00-daac-11e7-af0a-f66634909198/runs/6f3605e0-9e27-4133-a2af-bbbe3706877b/kubernetes-scheduler/lib/simpleclient_common-0.0.26.jar:/var/lib/mesos/slave/slaves/66df60ae-ce98-4ce6-968a-f99922382ef3-S0/frameworks/66df60ae-ce98-4ce6-968a-f99922382ef3-0001/executors/kubernetes.eb803f00-daac-11e7-af0a-f66634909198/runs/6f3605e0-9e27-4133-a2af-bbbe3706877b/kubernetes-scheduler/lib/metrics-statsd-common-4.2.0.jar:/var/lib/mesos/slave/slaves/66df60ae-ce98-4ce6-968a-f99922382ef3-S0/frameworks/66df60ae-ce98-4ce6-968a-f99922382ef3-0001/executors/kubernetes.eb803f00-daac-11e7-af0a-f66634909198/runs/6f3605e0-9e27-4133-a2af-bbbe3706877b/kubernetes-scheduler/lib/log4j-api-2.8.1.jar:/var/lib/mesos/slave/slaves/66df60ae-ce98-4ce6-968a-f99922382ef3-S0/frameworks/66df60ae-ce98-4ce6-968a-f99922382ef3-0001/executors/kubernetes.eb803f00-daac-11e7-af0a-f66634909198/runs/6f3605e0-9e27-4133-a2af-bbbe3706877b/kubernetes-scheduler/lib/log4j-1.2.16.jar:/var/lib/mesos/slave/slaves/66df60ae-ce98-4ce6-968a-f99922382ef3-S0/frameworks/66df60ae-ce98-4ce6-968a-f99922382ef3-0001/executors/kubernetes.eb803f00-daac-11e7-af0a-f66634909198/runs/6f3605e0-9e27-4133-a2af-bbbe3706877b/kubernetes-scheduler/lib/jline-0.9.94.jar:/var/lib/mesos/slave/slaves/66df60ae-ce98-4ce6-968a-f99922382ef3-S0/frameworks/66df60ae-ce98-4ce6-968a-f99922382ef3-0001/executors/kubernetes.eb803f00-daac-11e7-af0a-f66634909198/runs/6f3605e0-9e27-4133-a2af-bbbe3706877b/kubernetes-scheduler/lib/netty-3.7.0.Final.jar:/var/lib/mesos/slave/slaves/66df60ae-ce98-4ce6-968a-f99922382ef3-S0/frameworks/66df60ae-ce98-4ce6-968a-f99922382ef3-0001/executors/kubernetes.eb803f00-daac-11e7-af0a-f66634909198/runs/6f3605e0-9e27-4133-a2af-bbbe3706877b/kubernetes-scheduler/lib/javax.servlet-api-3.1.0.jar:/var/lib/mesos/slave/slaves/66df60ae-ce98-4ce6-968a-f99922382ef3-S0/frameworks/66df60ae-ce98-4ce6-968a-f99922382ef3-0001/executors/kubernetes.eb803f00-daac-11e7-af0a-f66634909198/runs/6f3605e0-9e27-4133-a2af-bbbe3706877b/kubernetes-scheduler/lib/javax.annotation-api-1.2.jar:/var/lib/mesos/slave/slaves/66df60ae-ce98-4ce6-968a-f99922382ef3-S0/frameworks/66df60ae-ce98-4ce6-968a-f99922382ef3-0001/executors/kubernetes.eb803f00-daac-11e7-af0a-f66634909198/runs/6f3605e0-9e27-4133-a2af-bbbe3706877b/kubernetes-scheduler/lib/jersey-guava-2.23.jar:/var/lib/mesos/slave/slaves/66df60ae-ce98-4ce6-968a-f99922382ef3-S0/frameworks/66df60ae-ce98-4ce6-968a-f99922382ef3-0001/executors/kubernetes.eb803f00-daac-11e7-af0a-f66634909198/runs/6f3605e0-9e27-4133-a2af-bbbe3706877b/kubernetes-scheduler/lib/hk2-api-2.4.0-b34.jar:/var/lib/mesos/slave/slaves/66df60ae-ce98-4ce6-968a-f99922382ef3-S0/frameworks/66df60ae-ce98-4ce6-968a-f99922382ef3-0001/executors/kubernetes.eb803f00-daac-11e7-af0a-f66634909198/runs/6f3605e0-9e27-4133-a2af-bbbe3706877b/kubernetes-scheduler/lib/hk2-locator-2.4.0-b34.jar:/var/lib/mesos/slave/slaves/66df60ae-ce98-4ce6-968a-f99922382ef3-S0/frameworks/66df60ae-ce98-4ce6-968a-f99922382ef3-0001/executors/kubernetes.eb803f00-daac-11e7-af0a-f66634909198/runs/6f3605e0-9e27-4133-a2af-bbbe3706877b/kubernetes-scheduler/lib/osgi-resource-locator-1.0.1.jar:/var/lib/mesos/slave/slaves/66df60ae-ce98-4ce6-968a-f99922382ef3-S0/frameworks/66df60ae-ce98-4ce6-968a-f99922382ef3-0001/executors/kubernetes.eb803f00-daac-11e7-af0a-f66634909198/runs/6f3605e0-9e27-4133-a2af-bbbe3706877b/kubernetes-scheduler/lib/jersey-client-2.23.jar:/var/lib/mesos/slave/slaves/66df60ae-ce98-4ce6-968a-f99922382ef3-S0/frameworks/66df60ae-ce98-4ce6-968a-f99922382ef3-0001/executors/kubernetes.eb803f00-daac-11e7-af0a-f66634909198/runs/6f3605e0-9e27-4133-a2af-bbbe3706877b/kubernetes-scheduler/lib/jersey-media-jaxb-2.23.jar:/var/lib/mesos/slave/slaves/66df60ae-ce98-4ce6-968a-f99922382ef3-S0/frameworks/66df60ae-ce98-4ce6-968a-f99922382ef3-0001/executors/kubernetes.eb803f00-daac-11e7-af0a-f66634909198/runs/6f3605e0-9e27-4133-a2af-bbbe3706877b/kubernetes-scheduler/lib/jackson-module-jaxb-annotations-2.5.4.jar:/var/lib/mesos/slave/slaves/66df60ae-ce98-4ce6-968a-f99922382ef3-S0/frameworks/66df60ae-ce98-4ce6-968a-f99922382ef3-0001/executors/kubernetes.eb803f00-daac-11e7-af0a-f66634909198/runs/6f3605e0-9e27-4133-a2af-bbbe3706877b/kubernetes-scheduler/lib/annotations-1.3.2.jar:/var/lib/mesos/slave/slaves/66df60ae-ce98-4ce6-968a-f99922382ef3-S0/frameworks/66df60ae-ce98-4ce6-968a-f99922382ef3-0001/executors/kubernetes.eb803f00-daac-11e7-af0a-f66634909198/runs/6f3605e0-9e27-4133-a2af-bbbe3706877b/kubernetes-scheduler/lib/joda-time-2.9.1.jar:/var/lib/mesos/slave/slaves/66df60ae-ce98-4ce6-968a-f99922382ef3-S0/frameworks/66df60ae-ce98-4ce6-968a-f99922382ef3-0001/executors/kubernetes.eb803f00-daac-11e7-af0a-f66634909198/runs/6f3605e0-9e27-4133-a2af-bbbe3706877b/kubernetes-scheduler/lib/hk2-utils-2.4.0-b34.jar:/var/lib/mesos/slave/slaves/66df60ae-ce98-4ce6-968a-f99922382ef3-S0/frameworks/66df60ae-ce98-4ce6-968a-f99922382ef3-0001/executors/kubernetes.eb803f00-daac-11e7-af0a-f66634909198/runs/6f3605e0-9e27-4133-a2af-bbbe3706877b/kubernetes-scheduler/lib/aopalliance-repackaged-2.4.0-b34.jar:/var/lib/mesos/slave/slaves/66df60ae-ce98-4ce6-968a-f99922382ef3-S0/frameworks/66df60ae-ce98-4ce6-968a-f99922382ef3-0001/executors/kubernetes.eb803f00-daac-11e7-af0a-f66634909198/runs/6f3605e0-9e27-4133-a2af-bbbe3706877b/kubernetes-scheduler/lib/javax.inject-1.jar:/var/lib/mesos/slave/slaves/66df60ae-ce98-4ce6-968a-f99922382ef3-S0/frameworks/66df60ae-ce98-4ce6-968a-f99922382ef3-0001/executors/kubernetes.eb803f00-daac-11e7-af0a-f66634909198/runs/6f3605e0-9e27-4133-a2af-bbbe3706877b/kubernetes-scheduler/lib/protobuf-java-3.3.1.jar:/var/lib/mesos/slave/slaves/66df60ae-ce98-4ce6-968a-f99922382ef3-S0/frameworks/66df60ae-ce98-4ce6-968a-f99922382ef3-0001/executors/kubernetes.eb803f00-daac-11e7-af0a-f66634909198/runs/6f3605e0-9e27-4133-a2af-bbbe3706877b/kubernetes-scheduler/lib/jackson-databind-2.6.6.jar:/var/lib/mesos/slave/slaves/66df60ae-ce98-4ce6-968a-f99922382ef3-S0/frameworks/66df60ae-ce98-4ce6-968a-f99922382ef3-0001/executors/kubernetes.eb803f00-daac-11e7-af0a-f66634909198/runs/6f3605e0-9e27-4133-a2af-bbbe3706877b/kubernetes-scheduler/lib/guava-18.0.jar:/var/lib/mesos/slave/slaves/66df60ae-ce98-4ce6-968a-f99922382ef3-S0/frameworks/66df60ae-ce98-4ce6-968a-f99922382ef3-0001/executors/kubernetes.eb803f00-daac-11e7-af0a-f66634909198/runs/6f3605e0-9e27-4133-a2af-bbbe3706877b/kubernetes-scheduler/lib/jcip-annotations-1.0.jar:/var/lib/mesos/slave/slaves/66df60ae-ce98-4ce6-968a-f99922382ef3-S0/frameworks/66df60ae-ce98-4ce6-968a-f99922382ef3-0001/executors/kubernetes.eb803f00-daac-11e7-af0a-f66634909198/runs/6f3605e0-9e27-4133-a2af-bbbe3706877b/kubernetes-scheduler/lib/jetty-server-9.2.3.v20140905.jar:/var/lib/mesos/slave/slaves/66df60ae-ce98-4ce6-968a-f99922382ef3-S0/frameworks/66df60ae-ce98-4ce6-968a-f99922382ef3-0001/executors/kubernetes.eb803f00-daac-11e7-af0a-f66634909198/runs/6f3605e0-9e27-4133-a2af-bbbe3706877b/kubernetes-scheduler/lib/jackson-core-2.6.6.jar:/var/lib/mesos/slave/slaves/66df60ae-ce98-4ce6-968a-f99922382ef3-S0/frameworks/66df60ae-ce98-4ce6-968a-f99922382ef3-0001/executors/kubernetes.eb803f00-daac-11e7-af0a-f66634909198/runs/6f3605e0-9e27-4133-a2af-bbbe3706877b/kubernetes-scheduler/lib/jsr305-3.0.1.jar:/var/lib/mesos/slave/slaves/66df60ae-ce98-4ce6-968a-f99922382ef3-S0/frameworks/66df60ae-ce98-4ce6-968a-f99922382ef3-0001/executors/kubernetes.eb803f00-daac-11e7-af0a-f66634909198/runs/6f3605e0-9e27-4133-a2af-bbbe3706877b/kubernetes-scheduler/lib/jetty-http-9.2.3.v20140905.jar:/var/lib/mesos/slave/slaves/66df60ae-ce98-4ce6-968a-f99922382ef3-S0/frameworks/66df60ae-ce98-4ce6-968a-f99922382ef3-0001/executors/kubernetes.eb803f00-daac-11e7-af0a-f66634909198/runs/6f3605e0-9e27-4133-a2af-bbbe3706877b/kubernetes-scheduler/lib/jetty-io-9.2.3.v20140905.jar:/var/lib/mesos/slave/slaves/66df60ae-ce98-4ce6-968a-f99922382ef3-S0/frameworks/66df60ae-ce98-4ce6-968a-f99922382ef3-0001/executors/kubernetes.eb803f00-daac-11e7-af0a-f66634909198/runs/6f3605e0-9e27-4133-a2af-bbbe3706877b/kubernetes-scheduler/lib/jetty-util-9.2.3.v20140905.jar
INFO 2017-12-06 17:43:51,131 [main] org.apache.zookeeper.ZooKeeper:logEnv(100): Client environment:java.library.path=/var/lib/mesos/slave/slaves/66df60ae-ce98-4ce6-968a-f99922382ef3-S0/frameworks/66df60ae-ce98-4ce6-968a-f99922382ef3-0001/executors/kubernetes.eb803f00-daac-11e7-af0a-f66634909198/runs/6f3605e0-9e27-4133-a2af-bbbe3706877b/libmesos-bundle/lib:/opt/mesosphere/lib:/usr/java/packages/lib/amd64:/usr/lib64:/lib64:/lib:/usr/lib
INFO 2017-12-06 17:43:51,131 [main] org.apache.zookeeper.ZooKeeper:logEnv(100): Client environment:java.io.tmpdir=/tmp
INFO 2017-12-06 17:43:51,131 [main] org.apache.zookeeper.ZooKeeper:logEnv(100): Client environment:java.compiler=<NA>
INFO 2017-12-06 17:43:51,131 [main] org.apache.zookeeper.ZooKeeper:logEnv(100): Client environment:os.name=Linux
INFO 2017-12-06 17:43:51,132 [main] org.apache.zookeeper.ZooKeeper:logEnv(100): Client environment:os.arch=amd64
INFO 2017-12-06 17:43:51,132 [main] org.apache.zookeeper.ZooKeeper:logEnv(100): Client environment:os.version=4.12.7-coreos
INFO 2017-12-06 17:43:51,132 [main] org.apache.zookeeper.ZooKeeper:logEnv(100): Client environment:user.name=root
INFO 2017-12-06 17:43:51,132 [main] org.apache.zookeeper.ZooKeeper:logEnv(100): Client environment:user.home=/root
INFO 2017-12-06 17:43:51,132 [main] org.apache.zookeeper.ZooKeeper:logEnv(100): Client environment:user.dir=/var/lib/mesos/slave/slaves/66df60ae-ce98-4ce6-968a-f99922382ef3-S0/frameworks/66df60ae-ce98-4ce6-968a-f99922382ef3-0001/executors/kubernetes.eb803f00-daac-11e7-af0a-f66634909198/runs/6f3605e0-9e27-4133-a2af-bbbe3706877b
INFO 2017-12-06 17:43:51,133 [main] org.apache.zookeeper.ZooKeeper:<init>(438): Initiating client connection, connectString=master.mesos:2181 sessionTimeout=60000 watcher=org.apache.curator.ConnectionState@6330987e
INFO 2017-12-06 17:43:51,324 [main-SendThread(vm-xppt.c.massive-bliss-781.internal:2181)] org.apache.zookeeper.ClientCnxn:logStartConnect(975): Opening socket connection to server vm-xppt.c.massive-bliss-781.internal/10.138.0.3:2181. Will not attempt to authenticate using SASL (unknown error)
INFO 2017-12-06 17:43:51,332 [main-SendThread(vm-xppt.c.massive-bliss-781.internal:2181)] org.apache.zookeeper.ClientCnxn:primeConnection(852): Socket connection established to vm-xppt.c.massive-bliss-781.internal/10.138.0.3:2181, initiating session
INFO 2017-12-06 17:43:51,340 [main-SendThread(vm-xppt.c.massive-bliss-781.internal:2181)] org.apache.zookeeper.ClientCnxn:onConnected(1235): Session establishment complete on server vm-xppt.c.massive-bliss-781.internal/10.138.0.3:2181, sessionid = 0x1602c8203b400fd, negotiated timeout = 40000
INFO 2017-12-06 17:43:51,344 [main-EventThread] org.apache.curator.framework.state.ConnectionStateManager:postState(228): State change: CONNECTED
INFO 2017-12-06 17:43:52,830 [main] com.mesosphere.sdk.kubernetes.scheduler.tls.SecretsClientTLSStore:cleanupTempSecrets(145): Deleting leftover temporary secrets.
INFO 2017-12-06 17:43:53,550 [main] com.mesosphere.sdk.kubernetes.scheduler.tls.SecretsClientTLSStore:cleanupTempSecrets(165): Deleted leftover temporary secrets.
INFO 2017-12-06 17:43:54,136 [main] com.mesosphere.sdk.kubernetes.scheduler.tls.TLSProvisioner:initializeServiceAccountKeyPair(69): Loaded valid service account keypair data.
INFO 2017-12-06 17:43:58,828 [main] com.mesosphere.sdk.kubernetes.scheduler.tls.TLSProvisioner:initialize(249): TLS provisioning is finished.
INFO 2017-12-06 17:43:58,829 [main] com.mesosphere.sdk.scheduler.SchedulerRunner:<init>(84): Build information:
- kubernetes: stub-universe, built 2017-12-06T17:22:21.321Z
- SDK: master/2e7ada1-dirty, built 2017-11-28T17:16:08.166Z
INFO 2017-12-06 17:43:58,843 [main] org.apache.curator.framework.imps.CuratorFrameworkImpl:start(234): Starting
INFO 2017-12-06 17:43:58,920 [main] org.apache.zookeeper.ZooKeeper:<init>(438): Initiating client connection, connectString=master.mesos:2181 sessionTimeout=60000 watcher=org.apache.curator.ConnectionState@2d63dd21
INFO 2017-12-06 17:43:58,923 [main-SendThread(vm-xppt.c.massive-bliss-781.internal:2181)] org.apache.zookeeper.ClientCnxn:logStartConnect(975): Opening socket connection to server vm-xppt.c.massive-bliss-781.internal/10.138.0.3:2181. Will not attempt to authenticate using SASL (unknown error)
INFO 2017-12-06 17:43:58,924 [main-SendThread(vm-xppt.c.massive-bliss-781.internal:2181)] org.apache.zookeeper.ClientCnxn:primeConnection(852): Socket connection established to vm-xppt.c.massive-bliss-781.internal/10.138.0.3:2181, initiating session
INFO 2017-12-06 17:43:58,927 [main-SendThread(vm-xppt.c.massive-bliss-781.internal:2181)] org.apache.zookeeper.ClientCnxn:onConnected(1235): Session establishment complete on server vm-xppt.c.massive-bliss-781.internal/10.138.0.3:2181, sessionid = 0x1602c8203b400fe, negotiated timeout = 40000
INFO 2017-12-06 17:43:58,928 [main] com.mesosphere.sdk.curator.CuratorUtils:lock(52): Acquiring ZK lock on /dcos-service-kubernetes/lock...
INFO 2017-12-06 17:43:58,928 [main-EventThread] org.apache.curator.framework.state.ConnectionStateManager:postState(228): State change: CONNECTED
INFO 2017-12-06 17:43:59,025 [main] com.mesosphere.sdk.curator.CuratorUtils:lock(60): 1/3 Lock acquired.
INFO 2017-12-06 17:43:59,435 [main] com.mesosphere.sdk.storage.PersisterCache:getCache(149): Loaded data from persister:
ROOT: NULL
ConfigTarget: 36 bytes
Configurations: 0 bytes
dd27fcb7-4095-4c86-8f9d-b5aa708a6ca5: 64287 bytes
FrameworkID: 43 bytes
Properties: 0 bytes
etcd-0-peer:task-status: 286 bytes
etcd-1-peer:task-status: 286 bytes
etcd-2-peer:task-status: 286 bytes
kube-apiserver-0-instance:task-status: 302 bytes
kube-apiserver-1-instance:task-status: 302 bytes
kube-apiserver-2-instance:task-status: 302 bytes
kube-controller-manager-0-instance:task-status: 328 bytes
kube-controller-manager-1-instance:task-status: 328 bytes
kube-controller-manager-2-instance:task-status: 328 bytes
kube-node-0-kube-proxy:task-status: 290 bytes
kube-node-0-kubelet:task-status: 291 bytes
kube-node-1-kube-proxy:task-status: 290 bytes
kube-node-1-kubelet:task-status: 291 bytes
kube-scheduler-0-instance:task-status: 302 bytes
kube-scheduler-1-instance:task-status: 302 bytes
kube-scheduler-2-instance:task-status: 302 bytes
mandatory-addons-0-dashboard:task-status: 303 bytes
mandatory-addons-0-heapster:task-status: 302 bytes
mandatory-addons-0-kube-dns:task-status: 302 bytes
SchemaVersion: 1 byte
Tasks: 10 bytes
etcd-0-peer: 10 bytes
TaskInfo: 6407 bytes
TaskStatus: 286 bytes
etcd-0-recover: 10 bytes
TaskInfo: 3098 bytes
etcd-1-peer: 10 bytes
TaskInfo: 6407 bytes
TaskStatus: 286 bytes
etcd-1-recover: 10 bytes
TaskInfo: 3098 bytes
etcd-2-peer: 10 bytes
TaskInfo: 6407 bytes
TaskStatus: 286 bytes
etcd-2-recover: 10 bytes
TaskInfo: 3098 bytes
kube-apiserver-0-instance: 10 bytes
TaskInfo: 4597 bytes
TaskStatus: 302 bytes
kube-apiserver-1-instance: 10 bytes
TaskInfo: 4597 bytes
TaskStatus: 302 bytes
kube-apiserver-2-instance: 10 bytes
TaskInfo: 4597 bytes
TaskStatus: 302 bytes
kube-controller-manager-0-instance: 10 bytes
TaskInfo: 4320 bytes
TaskStatus: 328 bytes
kube-controller-manager-1-instance: 10 bytes
TaskInfo: 4320 bytes
TaskStatus: 328 bytes
kube-controller-manager-2-instance: 10 bytes
TaskInfo: 4320 bytes
TaskStatus: 328 bytes
kube-node-0-commission: 10 bytes
TaskInfo: 3648 bytes
kube-node-0-decommission: 10 bytes
TaskInfo: 3674 bytes
kube-node-0-kube-proxy: 10 bytes
TaskInfo: 3369 bytes
TaskStatus: 290 bytes
kube-node-0-kubelet: 10 bytes
TaskInfo: 4560 bytes
TaskStatus: 291 bytes
kube-node-1-commission: 10 bytes
TaskInfo: 3648 bytes
kube-node-1-decommission: 10 bytes
TaskInfo: 3674 bytes
kube-node-1-kube-proxy: 10 bytes
TaskInfo: 3369 bytes
TaskStatus: 290 bytes
kube-node-1-kubelet: 10 bytes
TaskInfo: 4560 bytes
TaskStatus: 291 bytes
kube-scheduler-0-instance: 10 bytes
TaskInfo: 3227 bytes
TaskStatus: 302 bytes
kube-scheduler-1-instance: 10 bytes
TaskInfo: 3227 bytes
TaskStatus: 302 bytes
kube-scheduler-2-instance: 10 bytes
TaskInfo: 3227 bytes
TaskStatus: 302 bytes
mandatory-addons-0-dashboard: 10 bytes
TaskInfo: 3078 bytes
TaskStatus: 314 bytes
mandatory-addons-0-heapster: 10 bytes
TaskInfo: 3052 bytes
TaskStatus: 313 bytes
mandatory-addons-0-kube-dns: 10 bytes
TaskInfo: 3235 bytes
TaskStatus: 313 bytes
lock: 0 bytes
leases: 0 bytes
_c_e652ace0-f273-4d97-812b-4c4385b43537-lease-0000000013: 10 bytes
locks: 0 bytes
servicename: 10 bytes
WARN 2017-12-06 17:43:59,721 [main] com.mesosphere.sdk.state.StateStore:fetchStatus(349): No TaskStatus found for the requested name: etcd-0-recover at: Tasks/etcd-0-recover/TaskStatus
WARN 2017-12-06 17:43:59,721 [main] com.mesosphere.sdk.state.StateStoreUtils:repairTaskIDs(191): Found StateStore status inconsistency for task etcd-0-recover: task.taskId=value: ""
, no status
WARN 2017-12-06 17:43:59,929 [main] com.mesosphere.sdk.state.StateStore:fetchStatus(349): No TaskStatus found for the requested name: etcd-1-recover at: Tasks/etcd-1-recover/TaskStatus
WARN 2017-12-06 17:43:59,929 [main] com.mesosphere.sdk.state.StateStoreUtils:repairTaskIDs(191): Found StateStore status inconsistency for task etcd-1-recover: task.taskId=value: ""
, no status
WARN 2017-12-06 17:43:59,930 [main] com.mesosphere.sdk.state.StateStore:fetchStatus(349): No TaskStatus found for the requested name: etcd-2-recover at: Tasks/etcd-2-recover/TaskStatus
WARN 2017-12-06 17:43:59,930 [main] com.mesosphere.sdk.state.StateStoreUtils:repairTaskIDs(191): Found StateStore status inconsistency for task etcd-2-recover: task.taskId=value: ""
, no status
WARN 2017-12-06 17:43:59,930 [main] com.mesosphere.sdk.state.StateStore:fetchStatus(349): No TaskStatus found for the requested name: kube-node-0-commission at: Tasks/kube-node-0-commission/TaskStatus
WARN 2017-12-06 17:43:59,930 [main] com.mesosphere.sdk.state.StateStoreUtils:repairTaskIDs(191): Found StateStore status inconsistency for task kube-node-0-commission: task.taskId=value: ""
, no status
WARN 2017-12-06 17:43:59,931 [main] com.mesosphere.sdk.state.StateStore:fetchStatus(349): No TaskStatus found for the requested name: kube-node-0-decommission at: Tasks/kube-node-0-decommission/TaskStatus
WARN 2017-12-06 17:43:59,931 [main] com.mesosphere.sdk.state.StateStoreUtils:repairTaskIDs(191): Found StateStore status inconsistency for task kube-node-0-decommission: task.taskId=value: ""
, no status
WARN 2017-12-06 17:43:59,931 [main] com.mesosphere.sdk.state.StateStore:fetchStatus(349): No TaskStatus found for the requested name: kube-node-1-commission at: Tasks/kube-node-1-commission/TaskStatus
WARN 2017-12-06 17:43:59,931 [main] com.mesosphere.sdk.state.StateStoreUtils:repairTaskIDs(191): Found StateStore status inconsistency for task kube-node-1-commission: task.taskId=value: ""
, no status
WARN 2017-12-06 17:43:59,931 [main] com.mesosphere.sdk.state.StateStore:fetchStatus(349): No TaskStatus found for the requested name: kube-node-1-decommission at: Tasks/kube-node-1-decommission/TaskStatus
WARN 2017-12-06 17:43:59,931 [main] com.mesosphere.sdk.state.StateStoreUtils:repairTaskIDs(191): Found StateStore status inconsistency for task kube-node-1-decommission: task.taskId=value: ""
, no status
INFO 2017-12-06 17:44:01,924 [main] com.mesosphere.sdk.state.ConfigStore:fetch(100): Fetching configuration with ID=dd27fcb7-4095-4c86-8f9d-b5aa708a6ca5 from Configurations/dd27fcb7-4095-4c86-8f9d-b5aa708a6ca5
INFO 2017-12-06 17:44:03,130 [main] com.mesosphere.sdk.scheduler.plan.DefaultStepFactory:getStep(40): Generating step for pod: etcd-0, with tasks: [recover]
INFO 2017-12-06 17:44:03,139 [main] com.mesosphere.sdk.scheduler.plan.DeploymentStep:setStatus(65): etcd-0:[recover]: changed status from: PENDING to: PENDING (interrupted=false)
WARN 2017-12-06 17:44:03,142 [main] com.mesosphere.sdk.state.StateStore:fetchStatus(349): No TaskStatus found for the requested name: etcd-0-recover at: Tasks/etcd-0-recover/TaskStatus
INFO 2017-12-06 17:44:03,221 [main] com.mesosphere.sdk.scheduler.plan.DefaultStepFactory:getStatus(140): Deployment of task 'etcd-0-recover' is PENDING: onTarget=true reachedGoal=false permanentlyFailed=false
INFO 2017-12-06 17:44:03,221 [main] com.mesosphere.sdk.scheduler.plan.DeploymentStep:setStatus(65): etcd-0:[recover]: changed status from: PENDING to: PENDING (interrupted=false)
INFO 2017-12-06 17:44:03,225 [main] com.mesosphere.sdk.scheduler.plan.DefaultStepFactory:getStep(40): Generating step for pod: etcd-0, with tasks: [peer]
INFO 2017-12-06 17:44:03,226 [main] com.mesosphere.sdk.scheduler.plan.DeploymentStep:setStatus(65): etcd-0:[peer]: changed status from: PENDING to: PENDING (interrupted=false)
INFO 2017-12-06 17:44:03,227 [main] com.mesosphere.sdk.scheduler.plan.DefaultStepFactory:getStatus(137): Deployment of task 'etcd-0-peer' is COMPLETE: onTarget=true reachedGoal=true permanentlyFailed=false
INFO 2017-12-06 17:44:03,227 [main] com.mesosphere.sdk.scheduler.plan.DeploymentStep:setStatus(65): etcd-0:[peer]: changed status from: PENDING to: COMPLETE (interrupted=false)
INFO 2017-12-06 17:44:03,228 [main] com.mesosphere.sdk.scheduler.plan.DefaultStepFactory:getStep(40): Generating step for pod: etcd-1, with tasks: [recover]
INFO 2017-12-06 17:44:03,228 [main] com.mesosphere.sdk.scheduler.plan.DeploymentStep:setStatus(65): etcd-1:[recover]: changed status from: PENDING to: PENDING (interrupted=false)
WARN 2017-12-06 17:44:03,229 [main] com.mesosphere.sdk.state.StateStore:fetchStatus(349): No TaskStatus found for the requested name: etcd-1-recover at: Tasks/etcd-1-recover/TaskStatus
INFO 2017-12-06 17:44:03,229 [main] com.mesosphere.sdk.scheduler.plan.DefaultStepFactory:getStatus(140): Deployment of task 'etcd-1-recover' is PENDING: onTarget=true reachedGoal=false permanentlyFailed=false
INFO 2017-12-06 17:44:03,229 [main] com.mesosphere.sdk.scheduler.plan.DeploymentStep:setStatus(65): etcd-1:[recover]: changed status from: PENDING to: PENDING (interrupted=false)
INFO 2017-12-06 17:44:03,229 [main] com.mesosphere.sdk.scheduler.plan.DefaultStepFactory:getStep(40): Generating step for pod: etcd-1, with tasks: [peer]
INFO 2017-12-06 17:44:03,231 [main] com.mesosphere.sdk.scheduler.plan.DeploymentStep:setStatus(65): etcd-1:[peer]: changed status from: PENDING to: PENDING (interrupted=false)
INFO 2017-12-06 17:44:03,231 [main] com.mesosphere.sdk.scheduler.plan.DefaultStepFactory:getStatus(137): Deployment of task 'etcd-1-peer' is COMPLETE: onTarget=true reachedGoal=true permanentlyFailed=false
INFO 2017-12-06 17:44:03,231 [main] com.mesosphere.sdk.scheduler.plan.DeploymentStep:setStatus(65): etcd-1:[peer]: changed status from: PENDING to: COMPLETE (interrupted=false)
INFO 2017-12-06 17:44:03,232 [main] com.mesosphere.sdk.scheduler.plan.DefaultStepFactory:getStep(40): Generating step for pod: etcd-2, with tasks: [recover]
INFO 2017-12-06 17:44:03,232 [main] com.mesosphere.sdk.scheduler.plan.DeploymentStep:setStatus(65): etcd-2:[recover]: changed status from: PENDING to: PENDING (interrupted=false)
WARN 2017-12-06 17:44:03,234 [main] com.mesosphere.sdk.state.StateStore:fetchStatus(349): No TaskStatus found for the requested name: etcd-2-recover at: Tasks/etcd-2-recover/TaskStatus
INFO 2017-12-06 17:44:03,235 [main] com.mesosphere.sdk.scheduler.plan.DefaultStepFactory:getStatus(140): Deployment of task 'etcd-2-recover' is PENDING: onTarget=true reachedGoal=false permanentlyFailed=false
INFO 2017-12-06 17:44:03,235 [main] com.mesosphere.sdk.scheduler.plan.DeploymentStep:setStatus(65): etcd-2:[recover]: changed status from: PENDING to: PENDING (interrupted=false)
INFO 2017-12-06 17:44:03,235 [main] com.mesosphere.sdk.scheduler.plan.DefaultStepFactory:getStep(40): Generating step for pod: etcd-2, with tasks: [peer]
INFO 2017-12-06 17:44:03,237 [main] com.mesosphere.sdk.scheduler.plan.DeploymentStep:setStatus(65): etcd-2:[peer]: changed status from: PENDING to: PENDING (interrupted=false)
INFO 2017-12-06 17:44:03,238 [main] com.mesosphere.sdk.scheduler.plan.DefaultStepFactory:getStatus(137): Deployment of task 'etcd-2-peer' is COMPLETE: onTarget=true reachedGoal=true permanentlyFailed=false
INFO 2017-12-06 17:44:03,238 [main] com.mesosphere.sdk.scheduler.plan.DeploymentStep:setStatus(65): etcd-2:[peer]: changed status from: PENDING to: COMPLETE (interrupted=false)
INFO 2017-12-06 17:44:03,241 [main] com.mesosphere.sdk.scheduler.plan.DefaultStepFactory:getStep(40): Generating step for pod: kube-node-0, with tasks: [recover]
WARN 2017-12-06 17:44:03,241 [main] com.mesosphere.sdk.state.StateStore:fetchTask(290): No TaskInfo found for the requested name: kube-node-0-recover at: Tasks/kube-node-0-recover/TaskInfo
INFO 2017-12-06 17:44:03,241 [main] com.mesosphere.sdk.scheduler.plan.DeploymentStep:setStatus(65): kube-node-0:[recover]: changed status from: PENDING to: PENDING (interrupted=false)
INFO 2017-12-06 17:44:03,241 [main] com.mesosphere.sdk.scheduler.plan.DeploymentStep:setStatus(65): kube-node-0:[recover]: changed status from: PENDING to: PENDING (interrupted=false)
INFO 2017-12-06 17:44:03,241 [main] com.mesosphere.sdk.scheduler.plan.DefaultStepFactory:getStep(40): Generating step for pod: kube-node-0, with tasks: [kube-proxy, kubelet]
INFO 2017-12-06 17:44:03,242 [main] com.mesosphere.sdk.scheduler.plan.DeploymentStep:setStatus(65): kube-node-0:[kube-proxy, kubelet]: changed status from: PENDING to: PENDING (interrupted=false)
INFO 2017-12-06 17:44:03,243 [main] com.mesosphere.sdk.scheduler.plan.DefaultStepFactory:getStatus(137): Deployment of task 'kube-node-0-kube-proxy' is COMPLETE: onTarget=true reachedGoal=true permanentlyFailed=false
INFO 2017-12-06 17:44:03,243 [main] com.mesosphere.sdk.scheduler.plan.DefaultStepFactory:getStatus(137): Deployment of task 'kube-node-0-kubelet' is COMPLETE: onTarget=true reachedGoal=true permanentlyFailed=false
INFO 2017-12-06 17:44:03,243 [main] com.mesosphere.sdk.scheduler.plan.DeploymentStep:setStatus(65): kube-node-0:[kube-proxy, kubelet]: changed status from: PENDING to: COMPLETE (interrupted=false)
INFO 2017-12-06 17:44:03,243 [main] com.mesosphere.sdk.scheduler.plan.DefaultStepFactory:getStep(40): Generating step for pod: kube-node-1, with tasks: [recover]
WARN 2017-12-06 17:44:03,244 [main] com.mesosphere.sdk.state.StateStore:fetchTask(290): No TaskInfo found for the requested name: kube-node-1-recover at: Tasks/kube-node-1-recover/TaskInfo
INFO 2017-12-06 17:44:03,244 [main] com.mesosphere.sdk.scheduler.plan.DeploymentStep:setStatus(65): kube-node-1:[recover]: changed status from: PENDING to: PENDING (interrupted=false)
INFO 2017-12-06 17:44:03,244 [main] com.mesosphere.sdk.scheduler.plan.DeploymentStep:setStatus(65): kube-node-1:[recover]: changed status from: PENDING to: PENDING (interrupted=false)
INFO 2017-12-06 17:44:03,244 [main] com.mesosphere.sdk.scheduler.plan.DefaultStepFactory:getStep(40): Generating step for pod: kube-node-1, with tasks: [kube-proxy, kubelet]
INFO 2017-12-06 17:44:03,245 [main] com.mesosphere.sdk.scheduler.plan.DeploymentStep:setStatus(65): kube-node-1:[kube-proxy, kubelet]: changed status from: PENDING to: PENDING (interrupted=false)
INFO 2017-12-06 17:44:03,245 [main] com.mesosphere.sdk.scheduler.plan.DefaultStepFactory:getStatus(137): Deployment of task 'kube-node-1-kube-proxy' is COMPLETE: onTarget=true reachedGoal=true permanentlyFailed=false
INFO 2017-12-06 17:44:03,246 [main] com.mesosphere.sdk.scheduler.plan.DefaultStepFactory:getStatus(137): Deployment of task 'kube-node-1-kubelet' is COMPLETE: onTarget=true reachedGoal=true permanentlyFailed=false
INFO 2017-12-06 17:44:03,246 [main] com.mesosphere.sdk.scheduler.plan.DeploymentStep:setStatus(65): kube-node-1:[kube-proxy, kubelet]: changed status from: PENDING to: COMPLETE (interrupted=false)
INFO 2017-12-06 17:44:03,247 [main] com.mesosphere.sdk.scheduler.plan.DefaultStepFactory:getStep(40): Generating step for pod: etcd-0, with tasks: [peer]
INFO 2017-12-06 17:44:03,248 [main] com.mesosphere.sdk.scheduler.plan.DeploymentStep:setStatus(65): etcd-0:[peer]: changed status from: PENDING to: PENDING (interrupted=false)
INFO 2017-12-06 17:44:03,248 [main] com.mesosphere.sdk.scheduler.plan.DefaultStepFactory:getStatus(137): Deployment of task 'etcd-0-peer' is COMPLETE: onTarget=true reachedGoal=true permanentlyFailed=false
INFO 2017-12-06 17:44:03,248 [main] com.mesosphere.sdk.scheduler.plan.DeploymentStep:setStatus(65): etcd-0:[peer]: changed status from: PENDING to: COMPLETE (interrupted=false)
INFO 2017-12-06 17:44:03,249 [main] com.mesosphere.sdk.scheduler.plan.DefaultStepFactory:getStep(40): Generating step for pod: etcd-1, with tasks: [peer]
INFO 2017-12-06 17:44:03,249 [main] com.mesosphere.sdk.scheduler.plan.DeploymentStep:setStatus(65): etcd-1:[peer]: changed status from: PENDING to: PENDING (interrupted=false)
INFO 2017-12-06 17:44:03,249 [main] com.mesosphere.sdk.scheduler.plan.DefaultStepFactory:getStatus(137): Deployment of task 'etcd-1-peer' is COMPLETE: onTarget=true reachedGoal=true permanentlyFailed=false
INFO 2017-12-06 17:44:03,250 [main] com.mesosphere.sdk.scheduler.plan.DeploymentStep:setStatus(65): etcd-1:[peer]: changed status from: PENDING to: COMPLETE (interrupted=false)
INFO 2017-12-06 17:44:03,250 [main] com.mesosphere.sdk.scheduler.plan.DefaultStepFactory:getStep(40): Generating step for pod: etcd-2, with tasks: [peer]
INFO 2017-12-06 17:44:03,321 [main] com.mesosphere.sdk.scheduler.plan.DeploymentStep:setStatus(65): etcd-2:[peer]: changed status from: PENDING to: PENDING (interrupted=false)
INFO 2017-12-06 17:44:03,322 [main] com.mesosphere.sdk.scheduler.plan.DefaultStepFactory:getStatus(137): Deployment of task 'etcd-2-peer' is COMPLETE: onTarget=true reachedGoal=true permanentlyFailed=false
INFO 2017-12-06 17:44:03,322 [main] com.mesosphere.sdk.scheduler.plan.DeploymentStep:setStatus(65): etcd-2:[peer]: changed status from: PENDING to: COMPLETE (interrupted=false)
INFO 2017-12-06 17:44:03,322 [main] com.mesosphere.sdk.scheduler.plan.DefaultStepFactory:getStep(40): Generating step for pod: kube-apiserver-0, with tasks: [instance]
INFO 2017-12-06 17:44:03,323 [main] com.mesosphere.sdk.scheduler.plan.DeploymentStep:setStatus(65): kube-apiserver-0:[instance]: changed status from: PENDING to: PENDING (interrupted=false)
INFO 2017-12-06 17:44:03,323 [main] com.mesosphere.sdk.scheduler.plan.DefaultStepFactory:getStatus(137): Deployment of task 'kube-apiserver-0-instance' is COMPLETE: onTarget=true reachedGoal=true permanentlyFailed=false
INFO 2017-12-06 17:44:03,323 [main] com.mesosphere.sdk.scheduler.plan.DeploymentStep:setStatus(65): kube-apiserver-0:[instance]: changed status from: PENDING to: COMPLETE (interrupted=false)
INFO 2017-12-06 17:44:03,323 [main] com.mesosphere.sdk.scheduler.plan.DefaultStepFactory:getStep(40): Generating step for pod: kube-apiserver-1, with tasks: [instance]
INFO 2017-12-06 17:44:03,324 [main] com.mesosphere.sdk.scheduler.plan.DeploymentStep:setStatus(65): kube-apiserver-1:[instance]: changed status from: PENDING to: PENDING (interrupted=false)
INFO 2017-12-06 17:44:03,324 [main] com.mesosphere.sdk.scheduler.plan.DefaultStepFactory:getStatus(137): Deployment of task 'kube-apiserver-1-instance' is COMPLETE: onTarget=true reachedGoal=true permanentlyFailed=false
INFO 2017-12-06 17:44:03,324 [main] com.mesosphere.sdk.scheduler.plan.DeploymentStep:setStatus(65): kube-apiserver-1:[instance]: changed status from: PENDING to: COMPLETE (interrupted=false)
INFO 2017-12-06 17:44:03,324 [main] com.mesosphere.sdk.scheduler.plan.DefaultStepFactory:getStep(40): Generating step for pod: kube-apiserver-2, with tasks: [instance]
INFO 2017-12-06 17:44:03,325 [main] com.mesosphere.sdk.scheduler.plan.DeploymentStep:setStatus(65): kube-apiserver-2:[instance]: changed status from: PENDING to: PENDING (interrupted=false)
INFO 2017-12-06 17:44:03,325 [main] com.mesosphere.sdk.scheduler.plan.DefaultStepFactory:getStatus(137): Deployment of task 'kube-apiserver-2-instance' is COMPLETE: onTarget=true reachedGoal=true permanentlyFailed=false
INFO 2017-12-06 17:44:03,326 [main] com.mesosphere.sdk.scheduler.plan.DeploymentStep:setStatus(65): kube-apiserver-2:[instance]: changed status from: PENDING to: COMPLETE (interrupted=false)
INFO 2017-12-06 17:44:03,326 [main] com.mesosphere.sdk.scheduler.plan.DefaultStepFactory:getStep(40): Generating step for pod: kube-controller-manager-0, with tasks: [instance]
INFO 2017-12-06 17:44:03,326 [main] com.mesosphere.sdk.scheduler.plan.DeploymentStep:setStatus(65): kube-controller-manager-0:[instance]: changed status from: PENDING to: PENDING (interrupted=false)
INFO 2017-12-06 17:44:03,327 [main] com.mesosphere.sdk.scheduler.plan.DefaultStepFactory:getStatus(137): Deployment of task 'kube-controller-manager-0-instance' is COMPLETE: onTarget=true reachedGoal=true permanentlyFailed=false
INFO 2017-12-06 17:44:03,327 [main] com.mesosphere.sdk.scheduler.plan.DeploymentStep:setStatus(65): kube-controller-manager-0:[instance]: changed status from: PENDING to: COMPLETE (interrupted=false)
INFO 2017-12-06 17:44:03,327 [main] com.mesosphere.sdk.scheduler.plan.DefaultStepFactory:getStep(40): Generating step for pod: kube-controller-manager-1, with tasks: [instance]
INFO 2017-12-06 17:44:03,327 [main] com.mesosphere.sdk.scheduler.plan.DeploymentStep:setStatus(65): kube-controller-manager-1:[instance]: changed status from: PENDING to: PENDING (interrupted=false)
INFO 2017-12-06 17:44:03,328 [main] com.mesosphere.sdk.scheduler.plan.DefaultStepFactory:getStatus(137): Deployment of task 'kube-controller-manager-1-instance' is COMPLETE: onTarget=true reachedGoal=true permanentlyFailed=false
INFO 2017-12-06 17:44:03,328 [main] com.mesosphere.sdk.scheduler.plan.DeploymentStep:setStatus(65): kube-controller-manager-1:[instance]: changed status from: PENDING to: COMPLETE (interrupted=false)
INFO 2017-12-06 17:44:03,328 [main] com.mesosphere.sdk.scheduler.plan.DefaultStepFactory:getStep(40): Generating step for pod: kube-controller-manager-2, with tasks: [instance]
INFO 2017-12-06 17:44:03,329 [main] com.mesosphere.sdk.scheduler.plan.DeploymentStep:setStatus(65): kube-controller-manager-2:[instance]: changed status from: PENDING to: PENDING (interrupted=false)
INFO 2017-12-06 17:44:03,329 [main] com.mesosphere.sdk.scheduler.plan.DefaultStepFactory:getStatus(137): Deployment of task 'kube-controller-manager-2-instance' is COMPLETE: onTarget=true reachedGoal=true permanentlyFailed=false
INFO 2017-12-06 17:44:03,329 [main] com.mesosphere.sdk.scheduler.plan.DeploymentStep:setStatus(65): kube-controller-manager-2:[instance]: changed status from: PENDING to: COMPLETE (interrupted=false)
INFO 2017-12-06 17:44:03,329 [main] com.mesosphere.sdk.scheduler.plan.DefaultStepFactory:getStep(40): Generating step for pod: kube-scheduler-0, with tasks: [instance]
INFO 2017-12-06 17:44:03,330 [main] com.mesosphere.sdk.scheduler.plan.DeploymentStep:setStatus(65): kube-scheduler-0:[instance]: changed status from: PENDING to: PENDING (interrupted=false)
INFO 2017-12-06 17:44:03,330 [main] com.mesosphere.sdk.scheduler.plan.DefaultStepFactory:getStatus(137): Deployment of task 'kube-scheduler-0-instance' is COMPLETE: onTarget=true reachedGoal=true permanentlyFailed=false
INFO 2017-12-06 17:44:03,330 [main] com.mesosphere.sdk.scheduler.plan.DeploymentStep:setStatus(65): kube-scheduler-0:[instance]: changed status from: PENDING to: COMPLETE (interrupted=false)
INFO 2017-12-06 17:44:03,331 [main] com.mesosphere.sdk.scheduler.plan.DefaultStepFactory:getStep(40): Generating step for pod: kube-scheduler-1, with tasks: [instance]
INFO 2017-12-06 17:44:03,331 [main] com.mesosphere.sdk.scheduler.plan.DeploymentStep:setStatus(65): kube-scheduler-1:[instance]: changed status from: PENDING to: PENDING (interrupted=false)
INFO 2017-12-06 17:44:03,331 [main] com.mesosphere.sdk.scheduler.plan.DefaultStepFactory:getStatus(137): Deployment of task 'kube-scheduler-1-instance' is COMPLETE: onTarget=true reachedGoal=true permanentlyFailed=false
INFO 2017-12-06 17:44:03,331 [main] com.mesosphere.sdk.scheduler.plan.DeploymentStep:setStatus(65): kube-scheduler-1:[instance]: changed status from: PENDING to: COMPLETE (interrupted=false)
INFO 2017-12-06 17:44:03,332 [main] com.mesosphere.sdk.scheduler.plan.DefaultStepFactory:getStep(40): Generating step for pod: kube-scheduler-2, with tasks: [instance]
INFO 2017-12-06 17:44:03,332 [main] com.mesosphere.sdk.scheduler.plan.DeploymentStep:setStatus(65): kube-scheduler-2:[instance]: changed status from: PENDING to: PENDING (interrupted=false)
INFO 2017-12-06 17:44:03,332 [main] com.mesosphere.sdk.scheduler.plan.DefaultStepFactory:getStatus(137): Deployment of task 'kube-scheduler-2-instance' is COMPLETE: onTarget=true reachedGoal=true permanentlyFailed=false
INFO 2017-12-06 17:44:03,333 [main] com.mesosphere.sdk.scheduler.plan.DeploymentStep:setStatus(65): kube-scheduler-2:[instance]: changed status from: PENDING to: COMPLETE (interrupted=false)
INFO 2017-12-06 17:44:03,333 [main] com.mesosphere.sdk.scheduler.plan.DefaultStepFactory:getStep(40): Generating step for pod: kube-node-0, with tasks: [decommission]
INFO 2017-12-06 17:44:03,333 [main] com.mesosphere.sdk.scheduler.plan.DeploymentStep:setStatus(65): kube-node-0:[decommission]: changed status from: PENDING to: PENDING (interrupted=false)
WARN 2017-12-06 17:44:03,333 [main] com.mesosphere.sdk.state.StateStore:fetchStatus(349): No TaskStatus found for the requested name: kube-node-0-decommission at: Tasks/kube-node-0-decommission/TaskStatus
INFO 2017-12-06 17:44:03,334 [main] com.mesosphere.sdk.scheduler.plan.DefaultStepFactory:getStatus(140): Deployment of task 'kube-node-0-decommission' is PENDING: onTarget=true reachedGoal=false permanentlyFailed=false
INFO 2017-12-06 17:44:03,334 [main] com.mesosphere.sdk.scheduler.plan.DeploymentStep:setStatus(65): kube-node-0:[decommission]: changed status from: PENDING to: PENDING (interrupted=false)
INFO 2017-12-06 17:44:03,334 [main] com.mesosphere.sdk.scheduler.plan.DefaultStepFactory:getStep(40): Generating step for pod: kube-node-0, with tasks: [kube-proxy, kubelet]
INFO 2017-12-06 17:44:03,335 [main] com.mesosphere.sdk.scheduler.plan.DeploymentStep:setStatus(65): kube-node-0:[kube-proxy, kubelet]: changed status from: PENDING to: PENDING (interrupted=false)
INFO 2017-12-06 17:44:03,335 [main] com.mesosphere.sdk.scheduler.plan.DefaultStepFactory:getStatus(137): Deployment of task 'kube-node-0-kube-proxy' is COMPLETE: onTarget=true reachedGoal=true permanentlyFailed=false
INFO 2017-12-06 17:44:03,335 [main] com.mesosphere.sdk.scheduler.plan.DefaultStepFactory:getStatus(137): Deployment of task 'kube-node-0-kubelet' is COMPLETE: onTarget=true reachedGoal=true permanentlyFailed=false
INFO 2017-12-06 17:44:03,336 [main] com.mesosphere.sdk.scheduler.plan.DeploymentStep:setStatus(65): kube-node-0:[kube-proxy, kubelet]: changed status from: PENDING to: COMPLETE (interrupted=false)
INFO 2017-12-06 17:44:03,336 [main] com.mesosphere.sdk.scheduler.plan.DefaultStepFactory:getStep(40): Generating step for pod: kube-node-0, with tasks: [commission]
INFO 2017-12-06 17:44:03,336 [main] com.mesosphere.sdk.scheduler.plan.DeploymentStep:setStatus(65): kube-node-0:[commission]: changed status from: PENDING to: PENDING (interrupted=false)
WARN 2017-12-06 17:44:03,337 [main] com.mesosphere.sdk.state.StateStore:fetchStatus(349): No TaskStatus found for the requested name: kube-node-0-commission at: Tasks/kube-node-0-commission/TaskStatus
INFO 2017-12-06 17:44:03,337 [main] com.mesosphere.sdk.scheduler.plan.DefaultStepFactory:getStatus(140): Deployment of task 'kube-node-0-commission' is PENDING: onTarget=true reachedGoal=false permanentlyFailed=false
INFO 2017-12-06 17:44:03,337 [main] com.mesosphere.sdk.scheduler.plan.DeploymentStep:setStatus(65): kube-node-0:[commission]: changed status from: PENDING to: PENDING (interrupted=false)
INFO 2017-12-06 17:44:03,337 [main] com.mesosphere.sdk.scheduler.plan.DefaultStepFactory:getStep(40): Generating step for pod: kube-node-1, with tasks: [decommission]
INFO 2017-12-06 17:44:03,338 [main] com.mesosphere.sdk.scheduler.plan.DeploymentStep:setStatus(65): kube-node-1:[decommission]: changed status from: PENDING to: PENDING (interrupted=false)
WARN 2017-12-06 17:44:03,338 [main] com.mesosphere.sdk.state.StateStore:fetchStatus(349): No TaskStatus found for the requested name: kube-node-1-decommission at: Tasks/kube-node-1-decommission/TaskStatus
INFO 2017-12-06 17:44:03,338 [main] com.mesosphere.sdk.scheduler.plan.DefaultStepFactory:getStatus(140): Deployment of task 'kube-node-1-decommission' is PENDING: onTarget=true reachedGoal=false permanentlyFailed=false
INFO 2017-12-06 17:44:03,338 [main] com.mesosphere.sdk.scheduler.plan.DeploymentStep:setStatus(65): kube-node-1:[decommission]: changed status from: PENDING to: PENDING (interrupted=false)
INFO 2017-12-06 17:44:03,338 [main] com.mesosphere.sdk.scheduler.plan.DefaultStepFactory:getStep(40): Generating step for pod: kube-node-1, with tasks: [kube-proxy, kubelet]
INFO 2017-12-06 17:44:03,339 [main] com.mesosphere.sdk.scheduler.plan.DeploymentStep:setStatus(65): kube-node-1:[kube-proxy, kubelet]: changed status from: PENDING to: PENDING (interrupted=false)
INFO 2017-12-06 17:44:03,339 [main] com.mesosphere.sdk.scheduler.plan.DefaultStepFactory:getStatus(137): Deployment of task 'kube-node-1-kube-proxy' is COMPLETE: onTarget=true reachedGoal=true permanentlyFailed=false
INFO 2017-12-06 17:44:03,340 [main] com.mesosphere.sdk.scheduler.plan.DefaultStepFactory:getStatus(137): Deployment of task 'kube-node-1-kubelet' is COMPLETE: onTarget=true reachedGoal=true permanentlyFailed=false
INFO 2017-12-06 17:44:03,340 [main] com.mesosphere.sdk.scheduler.plan.DeploymentStep:setStatus(65): kube-node-1:[kube-proxy, kubelet]: changed status from: PENDING to: COMPLETE (interrupted=false)
INFO 2017-12-06 17:44:03,340 [main] com.mesosphere.sdk.scheduler.plan.DefaultStepFactory:getStep(40): Generating step for pod: kube-node-1, with tasks: [commission]
INFO 2017-12-06 17:44:03,340 [main] com.mesosphere.sdk.scheduler.plan.DeploymentStep:setStatus(65): kube-node-1:[commission]: changed status from: PENDING to: PENDING (interrupted=false)
WARN 2017-12-06 17:44:03,341 [main] com.mesosphere.sdk.state.StateStore:fetchStatus(349): No TaskStatus found for the requested name: kube-node-1-commission at: Tasks/kube-node-1-commission/TaskStatus
INFO 2017-12-06 17:44:03,341 [main] com.mesosphere.sdk.scheduler.plan.DefaultStepFactory:getStatus(140): Deployment of task 'kube-node-1-commission' is PENDING: onTarget=true reachedGoal=false permanentlyFailed=false
INFO 2017-12-06 17:44:03,341 [main] com.mesosphere.sdk.scheduler.plan.DeploymentStep:setStatus(65): kube-node-1:[commission]: changed status from: PENDING to: PENDING (interrupted=false)
INFO 2017-12-06 17:44:03,341 [main] com.mesosphere.sdk.scheduler.plan.DefaultStepFactory:getStep(40): Generating step for pod: mandatory-addons-0, with tasks: [kube-dns]
INFO 2017-12-06 17:44:03,342 [main] com.mesosphere.sdk.scheduler.plan.DeploymentStep:setStatus(65): mandatory-addons-0:[kube-dns]: changed status from: PENDING to: PENDING (interrupted=false)
INFO 2017-12-06 17:44:03,342 [main] com.mesosphere.sdk.scheduler.plan.DefaultStepFactory:getStatus(127): Automatically on target configuration due to having reached FINISHED goal.
INFO 2017-12-06 17:44:03,342 [main] com.mesosphere.sdk.scheduler.plan.DefaultStepFactory:getStatus(137): Deployment of task 'mandatory-addons-0-kube-dns' is COMPLETE: onTarget=true reachedGoal=true permanentlyFailed=false
INFO 2017-12-06 17:44:03,342 [main] com.mesosphere.sdk.scheduler.plan.DeploymentStep:setStatus(65): mandatory-addons-0:[kube-dns]: changed status from: PENDING to: COMPLETE (interrupted=false)
INFO 2017-12-06 17:44:03,342 [main] com.mesosphere.sdk.scheduler.plan.DefaultStepFactory:getStep(40): Generating step for pod: mandatory-addons-0, with tasks: [heapster]
INFO 2017-12-06 17:44:03,343 [main] com.mesosphere.sdk.scheduler.plan.DeploymentStep:setStatus(65): mandatory-addons-0:[heapster]: changed status from: PENDING to: PENDING (interrupted=false)
INFO 2017-12-06 17:44:03,420 [main] com.mesosphere.sdk.scheduler.plan.DefaultStepFactory:getStatus(127): Automatically on target configuration due to having reached FINISHED goal.
INFO 2017-12-06 17:44:03,421 [main] com.mesosphere.sdk.scheduler.plan.DefaultStepFactory:getStatus(137): Deployment of task 'mandatory-addons-0-heapster' is COMPLETE: onTarget=true reachedGoal=true permanentlyFailed=false
INFO 2017-12-06 17:44:03,421 [main] com.mesosphere.sdk.scheduler.plan.DeploymentStep:setStatus(65): mandatory-addons-0:[heapster]: changed status from: PENDING to: COMPLETE (interrupted=false)
INFO 2017-12-06 17:44:03,421 [main] com.mesosphere.sdk.scheduler.plan.DefaultStepFactory:getStep(40): Generating step for pod: mandatory-addons-0, with tasks: [dashboard]
INFO 2017-12-06 17:44:03,422 [main] com.mesosphere.sdk.scheduler.plan.DeploymentStep:setStatus(65): mandatory-addons-0:[dashboard]: changed status from: PENDING to: PENDING (interrupted=false)
INFO 2017-12-06 17:44:03,422 [main] com.mesosphere.sdk.scheduler.plan.DefaultStepFactory:getStatus(127): Automatically on target configuration due to having reached FINISHED goal.
INFO 2017-12-06 17:44:03,422 [main] com.mesosphere.sdk.scheduler.plan.DefaultStepFactory:getStatus(137): Deployment of task 'mandatory-addons-0-dashboard' is COMPLETE: onTarget=true reachedGoal=true permanentlyFailed=false
INFO 2017-12-06 17:44:03,422 [main] com.mesosphere.sdk.scheduler.plan.DeploymentStep:setStatus(65): mandatory-addons-0:[dashboard]: changed status from: PENDING to: COMPLETE (interrupted=false)
INFO 2017-12-06 17:44:03,424 [main] com.mesosphere.sdk.scheduler.plan.DefaultStepFactory:getStep(40): Generating step for pod: etcd-0, with tasks: [peer]
INFO 2017-12-06 17:44:03,425 [main] com.mesosphere.sdk.scheduler.plan.DeploymentStep:setStatus(65): etcd-0:[peer]: changed status from: PENDING to: PENDING (interrupted=false)
INFO 2017-12-06 17:44:03,426 [main] com.mesosphere.sdk.scheduler.plan.DefaultStepFactory:getStatus(137): Deployment of task 'etcd-0-peer' is COMPLETE: onTarget=true reachedGoal=true permanentlyFailed=false
INFO 2017-12-06 17:44:03,426 [main] com.mesosphere.sdk.scheduler.plan.DeploymentStep:setStatus(65): etcd-0:[peer]: changed status from: PENDING to: COMPLETE (interrupted=false)
INFO 2017-12-06 17:44:03,426 [main] com.mesosphere.sdk.scheduler.plan.DefaultStepFactory:getStep(40): Generating step for pod: etcd-1, with tasks: [peer]
INFO 2017-12-06 17:44:03,426 [main] com.mesosphere.sdk.scheduler.plan.DeploymentStep:setStatus(65): etcd-1:[peer]: changed status from: PENDING to: PENDING (interrupted=false)
INFO 2017-12-06 17:44:03,427 [main] com.mesosphere.sdk.scheduler.plan.DefaultStepFactory:getStatus(137): Deployment of task 'etcd-1-peer' is COMPLETE: onTarget=true reachedGoal=true permanentlyFailed=false
INFO 2017-12-06 17:44:03,427 [main] com.mesosphere.sdk.scheduler.plan.DeploymentStep:setStatus(65): etcd-1:[peer]: changed status from: PENDING to: COMPLETE (interrupted=false)
INFO 2017-12-06 17:44:03,427 [main] com.mesosphere.sdk.scheduler.plan.DefaultStepFactory:getStep(40): Generating step for pod: etcd-2, with tasks: [peer]
INFO 2017-12-06 17:44:03,427 [main] com.mesosphere.sdk.scheduler.plan.DeploymentStep:setStatus(65): etcd-2:[peer]: changed status from: PENDING to: PENDING (interrupted=false)
INFO 2017-12-06 17:44:03,428 [main] com.mesosphere.sdk.scheduler.plan.DefaultStepFactory:getStatus(137): Deployment of task 'etcd-2-peer' is COMPLETE: onTarget=true reachedGoal=true permanentlyFailed=false
INFO 2017-12-06 17:44:03,428 [main] com.mesosphere.sdk.scheduler.plan.DeploymentStep:setStatus(65): etcd-2:[peer]: changed status from: PENDING to: COMPLETE (interrupted=false)
INFO 2017-12-06 17:44:03,429 [main] com.mesosphere.sdk.scheduler.plan.DefaultStepFactory:getStep(40): Generating step for pod: kube-apiserver-0, with tasks: [instance]
INFO 2017-12-06 17:44:03,429 [main] com.mesosphere.sdk.scheduler.plan.DeploymentStep:setStatus(65): kube-apiserver-0:[instance]: changed status from: PENDING to: PENDING (interrupted=false)
INFO 2017-12-06 17:44:03,430 [main] com.mesosphere.sdk.scheduler.plan.DefaultStepFactory:getStatus(137): Deployment of task 'kube-apiserver-0-instance' is COMPLETE: onTarget=true reachedGoal=true permanentlyFailed=false
INFO 2017-12-06 17:44:03,430 [main] com.mesosphere.sdk.scheduler.plan.DeploymentStep:setStatus(65): kube-apiserver-0:[instance]: changed status from: PENDING to: COMPLETE (interrupted=false)
INFO 2017-12-06 17:44:03,430 [main] com.mesosphere.sdk.scheduler.plan.DefaultStepFactory:getStep(40): Generating step for pod: kube-apiserver-1, with tasks: [instance]
INFO 2017-12-06 17:44:03,430 [main] com.mesosphere.sdk.scheduler.plan.DeploymentStep:setStatus(65): kube-apiserver-1:[instance]: changed status from: PENDING to: PENDING (interrupted=false)
INFO 2017-12-06 17:44:03,431 [main] com.mesosphere.sdk.scheduler.plan.DefaultStepFactory:getStatus(137): Deployment of task 'kube-apiserver-1-instance' is COMPLETE: onTarget=true reachedGoal=true permanentlyFailed=false
INFO 2017-12-06 17:44:03,431 [main] com.mesosphere.sdk.scheduler.plan.DeploymentStep:setStatus(65): kube-apiserver-1:[instance]: changed status from: PENDING to: COMPLETE (interrupted=false)
INFO 2017-12-06 17:44:03,431 [main] com.mesosphere.sdk.scheduler.plan.DefaultStepFactory:getStep(40): Generating step for pod: kube-apiserver-2, with tasks: [instance]
INFO 2017-12-06 17:44:03,432 [main] com.mesosphere.sdk.scheduler.plan.DeploymentStep:setStatus(65): kube-apiserver-2:[instance]: changed status from: PENDING to: PENDING (interrupted=false)
INFO 2017-12-06 17:44:03,432 [main] com.mesosphere.sdk.scheduler.plan.DefaultStepFactory:getStatus(137): Deployment of task 'kube-apiserver-2-instance' is COMPLETE: onTarget=true reachedGoal=true permanentlyFailed=false
INFO 2017-12-06 17:44:03,432 [main] com.mesosphere.sdk.scheduler.plan.DeploymentStep:setStatus(65): kube-apiserver-2:[instance]: changed status from: PENDING to: COMPLETE (interrupted=false)
INFO 2017-12-06 17:44:03,432 [main] com.mesosphere.sdk.scheduler.plan.DefaultStepFactory:getStep(40): Generating step for pod: kube-controller-manager-0, with tasks: [instance]
INFO 2017-12-06 17:44:03,433 [main] com.mesosphere.sdk.scheduler.plan.DeploymentStep:setStatus(65): kube-controller-manager-0:[instance]: changed status from: PENDING to: PENDING (interrupted=false)
INFO 2017-12-06 17:44:03,433 [main] com.mesosphere.sdk.scheduler.plan.DefaultStepFactory:getStatus(137): Deployment of task 'kube-controller-manager-0-instance' is COMPLETE: onTarget=true reachedGoal=true permanentlyFailed=false
INFO 2017-12-06 17:44:03,433 [main] com.mesosphere.sdk.scheduler.plan.DeploymentStep:setStatus(65): kube-controller-manager-0:[instance]: changed status from: PENDING to: COMPLETE (interrupted=false)
INFO 2017-12-06 17:44:03,433 [main] com.mesosphere.sdk.scheduler.plan.DefaultStepFactory:getStep(40): Generating step for pod: kube-controller-manager-1, with tasks: [instance]
INFO 2017-12-06 17:44:03,434 [main] com.mesosphere.sdk.scheduler.plan.DeploymentStep:setStatus(65): kube-controller-manager-1:[instance]: changed status from: PENDING to: PENDING (interrupted=false)
INFO 2017-12-06 17:44:03,435 [main] com.mesosphere.sdk.scheduler.plan.DefaultStepFactory:getStatus(137): Deployment of task 'kube-controller-manager-1-instance' is COMPLETE: onTarget=true reachedGoal=true permanentlyFailed=false
INFO 2017-12-06 17:44:03,435 [main] com.mesosphere.sdk.scheduler.plan.DeploymentStep:setStatus(65): kube-controller-manager-1:[instance]: changed status from: PENDING to: COMPLETE (interrupted=false)
INFO 2017-12-06 17:44:03,435 [main] com.mesosphere.sdk.scheduler.plan.DefaultStepFactory:getStep(40): Generating step for pod: kube-controller-manager-2, with tasks: [instance]
INFO 2017-12-06 17:44:03,436 [main] com.mesosphere.sdk.scheduler.plan.DeploymentStep:setStatus(65): kube-controller-manager-2:[instance]: changed status from: PENDING to: PENDING (interrupted=false)
INFO 2017-12-06 17:44:03,436 [main] com.mesosphere.sdk.scheduler.plan.DefaultStepFactory:getStatus(137): Deployment of task 'kube-controller-manager-2-instance' is COMPLETE: onTarget=true reachedGoal=true permanentlyFailed=false
INFO 2017-12-06 17:44:03,436 [main] com.mesosphere.sdk.scheduler.plan.DeploymentStep:setStatus(65): kube-controller-manager-2:[instance]: changed status from: PENDING to: COMPLETE (interrupted=false)
INFO 2017-12-06 17:44:03,437 [main] com.mesosphere.sdk.scheduler.plan.DefaultStepFactory:getStep(40): Generating step for pod: kube-scheduler-0, with tasks: [instance]
INFO 2017-12-06 17:44:03,437 [main] com.mesosphere.sdk.scheduler.plan.DeploymentStep:setStatus(65): kube-scheduler-0:[instance]: changed status from: PENDING to: PENDING (interrupted=false)
INFO 2017-12-06 17:44:03,438 [main] com.mesosphere.sdk.scheduler.plan.DefaultStepFactory:getStatus(137): Deployment of task 'kube-scheduler-0-instance' is COMPLETE: onTarget=true reachedGoal=true permanentlyFailed=false
INFO 2017-12-06 17:44:03,438 [main] com.mesosphere.sdk.scheduler.plan.DeploymentStep:setStatus(65): kube-scheduler-0:[instance]: changed status from: PENDING to: COMPLETE (interrupted=false)
INFO 2017-12-06 17:44:03,438 [main] com.mesosphere.sdk.scheduler.plan.DefaultStepFactory:getStep(40): Generating step for pod: kube-scheduler-1, with tasks: [instance]
INFO 2017-12-06 17:44:03,439 [main] com.mesosphere.sdk.scheduler.plan.DeploymentStep:setStatus(65): kube-scheduler-1:[instance]: changed status from: PENDING to: PENDING (interrupted=false)
INFO 2017-12-06 17:44:03,440 [main] com.mesosphere.sdk.scheduler.plan.DefaultStepFactory:getStatus(137): Deployment of task 'kube-scheduler-1-instance' is COMPLETE: onTarget=true reachedGoal=true permanentlyFailed=false
INFO 2017-12-06 17:44:03,440 [main] com.mesosphere.sdk.scheduler.plan.DeploymentStep:setStatus(65): kube-scheduler-1:[instance]: changed status from: PENDING to: COMPLETE (interrupted=false)
INFO 2017-12-06 17:44:03,440 [main] com.mesosphere.sdk.scheduler.plan.DefaultStepFactory:getStep(40): Generating step for pod: kube-scheduler-2, with tasks: [instance]
INFO 2017-12-06 17:44:03,441 [main] com.mesosphere.sdk.scheduler.plan.DeploymentStep:setStatus(65): kube-scheduler-2:[instance]: changed status from: PENDING to: PENDING (interrupted=false)
INFO 2017-12-06 17:44:03,441 [main] com.mesosphere.sdk.scheduler.plan.DefaultStepFactory:getStatus(137): Deployment of task 'kube-scheduler-2-instance' is COMPLETE: onTarget=true reachedGoal=true permanentlyFailed=false
INFO 2017-12-06 17:44:03,441 [main] com.mesosphere.sdk.scheduler.plan.DeploymentStep:setStatus(65): kube-scheduler-2:[instance]: changed status from: PENDING to: COMPLETE (interrupted=false)
INFO 2017-12-06 17:44:03,442 [main] com.mesosphere.sdk.scheduler.plan.DefaultStepFactory:getStep(40): Generating step for pod: kube-node-0, with tasks: [kube-proxy, kubelet]
INFO 2017-12-06 17:44:03,521 [main] com.mesosphere.sdk.scheduler.plan.DeploymentStep:setStatus(65): kube-node-0:[kube-proxy, kubelet]: changed status from: PENDING to: PENDING (interrupted=false)
INFO 2017-12-06 17:44:03,522 [main] com.mesosphere.sdk.scheduler.plan.DefaultStepFactory:getStatus(137): Deployment of task 'kube-node-0-kube-proxy' is COMPLETE: onTarget=true reachedGoal=true permanentlyFailed=false
INFO 2017-12-06 17:44:03,522 [main] com.mesosphere.sdk.scheduler.plan.DefaultStepFactory:getStatus(137): Deployment of task 'kube-node-0-kubelet' is COMPLETE: onTarget=true reachedGoal=true permanentlyFailed=false
INFO 2017-12-06 17:44:03,522 [main] com.mesosphere.sdk.scheduler.plan.DeploymentStep:setStatus(65): kube-node-0:[kube-proxy, kubelet]: changed status from: PENDING to: COMPLETE (interrupted=false)
INFO 2017-12-06 17:44:03,522 [main] com.mesosphere.sdk.scheduler.plan.DefaultStepFactory:getStep(40): Generating step for pod: kube-node-1, with tasks: [kube-proxy, kubelet]
INFO 2017-12-06 17:44:03,523 [main] com.mesosphere.sdk.scheduler.plan.DeploymentStep:setStatus(65): kube-node-1:[kube-proxy, kubelet]: changed status from: PENDING to: PENDING (interrupted=false)
INFO 2017-12-06 17:44:03,523 [main] com.mesosphere.sdk.scheduler.plan.DefaultStepFactory:getStatus(137): Deployment of task 'kube-node-1-kube-proxy' is COMPLETE: onTarget=true reachedGoal=true permanentlyFailed=false
INFO 2017-12-06 17:44:03,524 [main] com.mesosphere.sdk.scheduler.plan.DefaultStepFactory:getStatus(137): Deployment of task 'kube-node-1-kubelet' is COMPLETE: onTarget=true reachedGoal=true permanentlyFailed=false
INFO 2017-12-06 17:44:03,524 [main] com.mesosphere.sdk.scheduler.plan.DeploymentStep:setStatus(65): kube-node-1:[kube-proxy, kubelet]: changed status from: PENDING to: COMPLETE (interrupted=false)
INFO 2017-12-06 17:44:03,524 [main] com.mesosphere.sdk.scheduler.plan.DefaultStepFactory:getStep(40): Generating step for pod: mandatory-addons-0, with tasks: [kube-dns]
INFO 2017-12-06 17:44:03,524 [main] com.mesosphere.sdk.scheduler.plan.DeploymentStep:setStatus(65): mandatory-addons-0:[kube-dns]: changed status from: PENDING to: PENDING (interrupted=false)
INFO 2017-12-06 17:44:03,525 [main] com.mesosphere.sdk.scheduler.plan.DefaultStepFactory:getStatus(127): Automatically on target configuration due to having reached FINISHED goal.
INFO 2017-12-06 17:44:03,525 [main] com.mesosphere.sdk.scheduler.plan.DefaultStepFactory:getStatus(137): Deployment of task 'mandatory-addons-0-kube-dns' is COMPLETE: onTarget=true reachedGoal=true permanentlyFailed=false
INFO 2017-12-06 17:44:03,525 [main] com.mesosphere.sdk.scheduler.plan.DeploymentStep:setStatus(65): mandatory-addons-0:[kube-dns]: changed status from: PENDING to: COMPLETE (interrupted=false)
INFO 2017-12-06 17:44:03,525 [main] com.mesosphere.sdk.scheduler.plan.DefaultStepFactory:getStep(40): Generating step for pod: mandatory-addons-0, with tasks: [heapster]
INFO 2017-12-06 17:44:03,526 [main] com.mesosphere.sdk.scheduler.plan.DeploymentStep:setStatus(65): mandatory-addons-0:[heapster]: changed status from: PENDING to: PENDING (interrupted=false)
INFO 2017-12-06 17:44:03,526 [main] com.mesosphere.sdk.scheduler.plan.DefaultStepFactory:getStatus(127): Automatically on target configuration due to having reached FINISHED goal.
INFO 2017-12-06 17:44:03,526 [main] com.mesosphere.sdk.scheduler.plan.DefaultStepFactory:getStatus(137): Deployment of task 'mandatory-addons-0-heapster' is COMPLETE: onTarget=true reachedGoal=true permanentlyFailed=false
INFO 2017-12-06 17:44:03,526 [main] com.mesosphere.sdk.scheduler.plan.DeploymentStep:setStatus(65): mandatory-addons-0:[heapster]: changed status from: PENDING to: COMPLETE (interrupted=false)
INFO 2017-12-06 17:44:03,526 [main] com.mesosphere.sdk.scheduler.plan.DefaultStepFactory:getStep(40): Generating step for pod: mandatory-addons-0, with tasks: [dashboard]
INFO 2017-12-06 17:44:03,527 [main] com.mesosphere.sdk.scheduler.plan.DeploymentStep:setStatus(65): mandatory-addons-0:[dashboard]: changed status from: PENDING to: PENDING (interrupted=false)
INFO 2017-12-06 17:44:03,527 [main] com.mesosphere.sdk.scheduler.plan.DefaultStepFactory:getStatus(127): Automatically on target configuration due to having reached FINISHED goal.
INFO 2017-12-06 17:44:03,527 [main] com.mesosphere.sdk.scheduler.plan.DefaultStepFactory:getStatus(137): Deployment of task 'mandatory-addons-0-dashboard' is COMPLETE: onTarget=true reachedGoal=true permanentlyFailed=false
INFO 2017-12-06 17:44:03,527 [main] com.mesosphere.sdk.scheduler.plan.DeploymentStep:setStatus(65): mandatory-addons-0:[dashboard]: changed status from: PENDING to: COMPLETE (interrupted=false)
INFO 2017-12-06 17:44:03,528 [main] com.mesosphere.sdk.scheduler.SchedulerBuilder:getPlans(375): Got 3 YAML plans: [replace, update, deploy]
INFO 2017-12-06 17:44:03,534 [main] com.mesosphere.sdk.scheduler.SchedulerBuilder:getDefaultScheduler(276): Marking deployment as having been previously completed
INFO 2017-12-06 17:44:03,539 [main] com.mesosphere.sdk.scheduler.SchedulerBuilder:updateConfig(461): Updating config with 8 validators...
INFO 2017-12-06 17:44:03,540 [main] com.mesosphere.sdk.config.DefaultConfigurationUpdater:updateConfiguration(66): Loading current target configuration: dd27fcb7-4095-4c86-8f9d-b5aa708a6ca5
INFO 2017-12-06 17:44:03,624 [main] com.mesosphere.sdk.config.DefaultConfigurationUpdater:updateConfiguration(78): New prospective config:
{
"name" : "kubernetes",
"role" : "kubernetes-role",
"principal" : "kubernetes",
"web-url" : null,
"zookeeper" : "master.mesos:2181",
"pod-specs" : [ {
"type" : "etcd",
"user" : "root",
"count" : 3,
"image" : null,
"networks" : [ ],
"rlimits" : [ ],
"uris" : [ "https://storage.googleapis.com/etcd/v3.2.9/etcd-v3.2.9-linux-amd64.tar.gz", "https://s3.eu-central-1.amazonaws.com/dcos-kubernetes-test/executor.zip" ],
"task-specs" : [ {
"name" : "peer",
"goal" : "RUNNING",
"resource-set" : {
"id" : "etcd",
"resource-specifications" : [ {
"@type" : "DefaultResourceSpec",
"name" : "cpus",
"value" : {
"type" : "SCALAR",
"scalar" : {
"value" : 0.5
},
"ranges" : null,
"set" : null,
"text" : null
},
"role" : "kubernetes-role",
"pre-reserved-role" : "*",
"principal" : "kubernetes"
}, {
"@type" : "DefaultResourceSpec",
"name" : "mem",
"value" : {
"type" : "SCALAR",
"scalar" : {
"value" : 1024.0
},
"ranges" : null,
"set" : null,
"text" : null
},
"role" : "kubernetes-role",
"pre-reserved-role" : "*",
"principal" : "kubernetes"
}, {
"@type" : "NamedVIPSpec",
"value" : {
"type" : "RANGES",
"scalar" : null,
"ranges" : {
"range" : [ {
"begin" : 2380,
"end" : 2380
} ]
},
"set" : null,
"text" : null
},
"role" : "kubernetes-role",
"pre-reserved-role" : "*",
"principal" : "kubernetes",
"env-key" : "ETCD_LISTEN_PEER_PORT",
"port-name" : "peer",
"protocol" : "tcp",
"visibility" : "CLUSTER",
"vip-name" : "etcd-peer",
"vip-port" : 2380,
"network-names" : [ ],
"name" : "ports"
}, {
"@type" : "NamedVIPSpec",
"value" : {
"type" : "RANGES",
"scalar" : null,
"ranges" : {
"range" : [ {
"begin" : 2379,
"end" : 2379
} ]
},
"set" : null,
"text" : null
},
"role" : "kubernetes-role",
"pre-reserved-role" : "*",
"principal" : "kubernetes",
"env-key" : "ETCD_LISTEN_CLIENT_PORT",
"port-name" : "client",
"protocol" : "tcp",
"visibility" : "CLUSTER",
"vip-name" : "etcd",
"vip-port" : 2379,
"network-names" : [ ],
"name" : "ports"
} ],
"volume-specifications" : [ ],
"role" : "kubernetes-role",
"principal" : "kubernetes"
},
"command-spec" : {
"value" : "printf \"\\n\\n ###### Starting etcd -- ${TASK_NAME} ###### \\n\"\n\n# In a graceful shutdown, we remove the peer from the cluster\nterminated () {\n\n printf \"Removing member etcd-$POD_INSTANCE_INDEX-peer \\n\"\n\n DEAD_PEER_ID=$(\n ./etcd-v3.2.9-linux-amd64/etcdctl \\\n --cert-file etcd-crt.pem \\\n --key-file etcd-key.pem \\\n --ca-file ca-crt.pem \\\n --endpoints=https://etcd.kubernetes.l4lb.thisdcos.directory:$ETCD_LISTEN_CLIENT_PORT member list | awk \"/etcd-${POD_INSTANCE_INDEX}-peer/ { gsub(\\\":\\\", \\\"\\\", \\$1); print \\$1 }\" \\\n )\n\n printf $DEAD_PEER_ID\n\n ./etcd-v3.2.9-linux-amd64/etcdctl \\\n --cert-file etcd-crt.pem \\\n --key-file etcd-key.pem \\\n --ca-file ca-crt.pem \\\n --endpoints https://etcd.kubernetes.l4lb.thisdcos.directory:$ETCD_LISTEN_CLIENT_PORT member remove $DEAD_PEER_ID\n\n rm -rf data-dir/member\n\n printf \"Member etcd-$POD_INSTANCE_INDEX-peer removed!\\n\"\n\n exit 0\n}\n\ntrap terminated SIGTERM EXIT\n\necho \"Trapping etcd SIGTERM and EXIT!\"\n\n./etcd-v3.2.9-linux-amd64/etcd \\\n--name=infra$POD_INSTANCE_INDEX \\\n--cert-file=etcd-crt.pem \\\n--key-file=etcd-key.pem \\\n--client-cert-auth \\\n--trusted-ca-file=ca-crt.pem \\\n--peer-cert-file=etcd-crt.pem \\\n--peer-key-file=etcd-key.pem \\\n--peer-trusted-ca-file=ca-crt.pem \\\n--listen-peer-urls=https://$MESOS_CONTAINER_IP:$ETCD_LISTEN_PEER_PORT \\\n--initial-advertise-peer-urls=https://etcd-$POD_INSTANCE_INDEX-peer.kubernetes.mesos:$ETCD_LISTEN_PEER_PORT \\\n--listen-client-urls=https://$MESOS_CONTAINER_IP:$ETCD_LISTEN_CLIENT_PORT \\\n--advertise-client-urls=https://etcd-$POD_INSTANCE_INDEX-peer.kubernetes.mesos:$ETCD_LISTEN_CLIENT_PORT \\\n--log-output=stdout \\\n--quota-backend-bytes=3221225472 \\\n--election-timeout=5000 \\\n--heartbeat-interval=250 \\\n--initial-cluster infra0=https://etcd-0-peer.kubernetes.mesos:$ETCD_LISTEN_PEER_PORT,infra1=https://etcd-1-peer.kubernetes.mesos:$ETCD_LISTEN_PEER_PORT,infra2=https://etcd-2-peer.kubernetes.mesos:$ETCD_LISTEN_PEER_PORT\n",
"environment" : {
"ETCD_DATA_DIR" : "data-dir",
"ETCD_INITIAL_CLUSTER_TOKEN" : "kubernetes",
"ETCD_VERSION" : "v3.2.9",
"ETCD_WAL_DIR" : "wal-pv/wal-dir"
}
},
"health-check-spec" : {
"command" : "HEALTHY_PEER=$(./etcd-v3.2.9-linux-amd64/etcdctl --cert-file etcd-crt.pem --key-file etcd-key.pem --ca-file ca-crt.pem --endpoints=https://etcd-$POD_INSTANCE_INDEX-peer.kubernetes.mesos:$ETCD_LISTEN_CLIENT_PORT cluster-health | grep \"etcd-${POD_INSTANCE_INDEX}-peer\" | grep -c 'is healthy') && [ \"$HEALTHY_PEER\" -eq \"1\" ]\n",
"max-consecutive-failures" : 4,
"delay" : 0,
"interval" : 15,
"timeout" : 10,
"gracePeriod" : 60
},
"readiness-check-spec" : {
"command" : "HEALTHY_CLUSTER=$(./etcd-v3.2.9-linux-amd64/etcdctl --cert-file etcd-crt.pem --key-file etcd-key.pem --ca-file ca-crt.pem --endpoints=https://etcd-$POD_INSTANCE_INDEX-peer.kubernetes.mesos:$ETCD_LISTEN_CLIENT_PORT cluster-health | grep '^cluster ' | grep -c 'cluster is healthy') && [ \"$HEALTHY_CLUSTER\" -eq \"1\" ]\n",
"delay" : 0,
"interval" : 30,
"timeout" : 10
},
"config-files" : [ ],
"discovery-spec" : null,
"kill-grace-period" : 30,
"transport-encryption" : [ ]
}, {
"name" : "recover",
"goal" : "FINISHED",
"resource-set" : {
"id" : "recover-resource-set",
"resource-specifications" : [ {
"@type" : "DefaultResourceSpec",
"name" : "cpus",
"value" : {
"type" : "SCALAR",
"scalar" : {
"value" : 0.1
},
"ranges" : null,
"set" : null,
"text" : null
},
"role" : "kubernetes-role",
"pre-reserved-role" : "*",
"principal" : "kubernetes"
}, {
"@type" : "DefaultResourceSpec",
"name" : "mem",
"value" : {
"type" : "SCALAR",
"scalar" : {
"value" : 32.0
},
"ranges" : null,
"set" : null,
"text" : null
},
"role" : "kubernetes-role",
"pre-reserved-role" : "*",
"principal" : "kubernetes"
} ],
"volume-specifications" : [ ],
"role" : "kubernetes-role",
"principal" : "kubernetes"
},
"command-spec" : {
"value" : "DEAD_PEER_ID=$(\n ./etcd-v3.2.9-linux-amd64/etcdctl \\\n --cert-file etcd-crt.pem \\\n --key-file etcd-key.pem \\\n --ca-file ca-crt.pem \\\n --endpoints=https://etcd.kubernetes.l4lb.thisdcos.directory:$ETCD_LISTEN_CLIENT_PORT member list | awk \"/etcd-${POD_INSTANCE_INDEX}-peer/ { gsub(\\\":\\\", \\\"\\\", \\$1); print \\$1 }\" \\\n)\n./etcd-v3.2.9-linux-amd64/etcdctl --cert-file etcd-crt.pem --key-file etcd-key.pem --ca-file ca-crt.pem --endpoints https://etcd.kubernetes.l4lb.thisdcos.directory:$ETCD_LISTEN_CLIENT_PORT member remove $DEAD_PEER_ID\nrm -rf data-dir/member\n./etcd-v3.2.9-linux-amd64/etcdctl --cert-file etcd-crt.pem --key-file etcd-key.pem --ca-file ca-crt.pem --endpoints https://etcd.kubernetes.l4lb.thisdcos.directory:$ETCD_LISTEN_CLIENT_PORT member add infra$POD_INSTANCE_INDEX https://etcd-$POD_INSTANCE_INDEX-peer.kubernetes.mesos:$ETCD_LISTEN_PEER_PORT\n",
"environment" : {
"ETCD_LISTEN_CLIENT_PORT" : "2379",
"ETCD_LISTEN_PEER_PORT" : "2380",
"ETCD_VERSION" : "v3.2.9"
}
},
"health-check-spec" : null,
"readiness-check-spec" : null,
"config-files" : [ ],
"discovery-spec" : null,
"kill-grace-period" : 0,
"transport-encryption" : [ ]
} ],
"placement-rule" : {
"@type" : "MaxPerHostnameRule",
"max" : 1,
"task-filter" : {
"@type" : "RegexMatcher",
"pattern" : "etcd-.*"
}
},
"volumes" : [ {
"@type" : "DefaultVolumeSpec",
"type" : "ROOT",
"container-path" : "data-dir",
"name" : "disk",
"value" : {
"type" : "SCALAR",
"scalar" : {
"value" : 3072.0
},
"ranges" : null,
"set" : null,
"text" : null
},
"role" : "kubernetes-role",
"pre-reserved-role" : "*",
"principal" : "kubernetes"
}, {
"@type" : "DefaultVolumeSpec",
"type" : "ROOT",
"container-path" : "wal-pv",
"name" : "disk",
"value" : {
"type" : "SCALAR",
"scalar" : {
"value" : 512.0
},
"ranges" : null,
"set" : null,
"text" : null
},
"role" : "kubernetes-role",
"pre-reserved-role" : "*",
"principal" : "kubernetes"
} ],
"pre-reserved-role" : "*",
"secrets" : [ {
"secret" : "kubernetes/ca_crt__tmp",
"env-key" : null,
"file" : "ca-crt.pem"
}, {
"secret" : "kubernetes/etcd_chain__tmp",
"env-key" : null,
"file" : "etcd-crt.pem"
}, {
"secret" : "kubernetes/etcd_private_key__tmp",
"env-key" : null,
"file" : "etcd-key.pem"
} ],
"share-pid-namespace" : false,
"allow-decommission" : true
}, {
"type" : "kube-apiserver",
"user" : "root",
"count" : 3,
"image" : null,
"networks" : [ ],
"rlimits" : [ ],
"uris" : [ "https://s3.eu-central-1.amazonaws.com/dcos-kubernetes-test/bootstrap.zip", "https://storage.googleapis.com/kubernetes-release/release/v1.7.11/bin/linux/amd64/kube-apiserver", "https://s3.eu-central-1.amazonaws.com/dcos-kubernetes-test/executor.zip" ],
"task-specs" : [ {
"name" : "instance",
"goal" : "RUNNING",
"resource-set" : {
"id" : "instance-resource-set",
"resource-specifications" : [ {
"@type" : "DefaultResourceSpec",
"name" : "cpus",
"value" : {
"type" : "SCALAR",
"scalar" : {
"value" : 0.5
},
"ranges" : null,
"set" : null,
"text" : null
},
"role" : "kubernetes-role",
"pre-reserved-role" : "*",
"principal" : "kubernetes"
}, {
"@type" : "DefaultResourceSpec",
"name" : "mem",
"value" : {
"type" : "SCALAR",
"scalar" : {
"value" : 1024.0
},
"ranges" : null,
"set" : null,
"text" : null
},
"role" : "kubernetes-role",
"pre-reserved-role" : "*",
"principal" : "kubernetes"
}, {
"@type" : "NamedVIPSpec",
"value" : {
"type" : "RANGES",
"scalar" : null,
"ranges" : {
"range" : [ {
"begin" : 6443,
"end" : 6443
} ]
},
"set" : null,
"text" : null
},
"role" : "kubernetes-role",
"pre-reserved-role" : "*",
"principal" : "kubernetes",
"env-key" : "KUBE_APISERVER_PORT",
"port-name" : "apiserver",
"protocol" : "tcp",
"visibility" : "CLUSTER",
"vip-name" : "apiserver",
"vip-port" : 6443,
"network-names" : [ ],
"name" : "ports"
}, {
"@type" : "NamedVIPSpec",
"value" : {
"type" : "RANGES",
"scalar" : null,
"ranges" : {
"range" : [ {
"begin" : 9000,
"end" : 9000
} ]
},
"set" : null,
"text" : null
},
"role" : "kubernetes-role",
"pre-reserved-role" : "*",
"principal" : "kubernetes",
"env-key" : "KUBE_APISERVER_INSECURE_PORT",
"port-name" : "apiserver_insecure",
"protocol" : "tcp",
"visibility" : "CLUSTER",
"vip-name" : "apiserver-insecure",
"vip-port" : 9000,
"network-names" : [ ],
"name" : "ports"
} ],
"volume-specifications" : [ ],
"role" : "kubernetes-role",
"principal" : "kubernetes"
},
"command-spec" : {
"value" : "\nchmod +x kube-apiserver\nprintf \"\\n\\n ###### Starting Kube API SERVER -- ${TASK_NAME} ###### \\n\"\n./kube-apiserver --etcd-servers=https://etcd-0-peer.kubernetes.mesos:2379,https://etcd-1-peer.kubernetes.mesos:2379,https://etcd-2-peer.kubernetes.mesos:2379 --etcd-cafile=ca-crt.pem --etcd-certfile=kube-apiserver-crt.pem --etcd-keyfile=kube-apiserver-key.pem --etcd-prefix=\"/registry/cluster-0\" --etcd-quorum-read --bind-address=$MESOS_CONTAINER_IP --insecure-bind-address=$MESOS_CONTAINER_IP --insecure-port=9000 --secure-port=6443 --apiserver-count=3 --allow-privileged --service-cluster-ip-range=10.100.0.0/16 --authorization-mode=AlwaysAllow --admission-control=NamespaceLifecycle,LimitRanger,ServiceAccount,PersistentVolumeLabel,DefaultStorageClass,ResourceQuota,DefaultTolerationSeconds --runtime-config=batch/v2alpha1=true,admissionregistration.k8s.io/v1alpha1=true --service-account-key-file=service-account-key.pem --tls-ca-file=ca-crt.pem --tls-cert-file=kube-apiserver-crt.pem --tls-private-key-file=kube-apiserver-key.pem --client-ca-file=ca-crt.pem --target-ram-mb=1024 2>&1\n",
"environment" : {
"KUBERNETES_VERSION" : "v1.7.11"
}
},
"health-check-spec" : {
"command" : "HTTP_CODE=$(/opt/mesosphere/bin/curl --silent --output /dev/null --fail --write-out \"%{http_code}\" --cert kube-apiserver-crt.pem --key kube-apiserver-key.pem --cacert ca-crt.pem https://kube-apiserver-$POD_INSTANCE_INDEX-instance.kubernetes.mesos:6443/healthz) && [ \"$HTTP_CODE\" -eq \"200\" ]\n",
"max-consecutive-failures" : 3,
"delay" : 0,
"interval" : 15,
"timeout" : 10,
"gracePeriod" : 30
},
"readiness-check-spec" : null,
"config-files" : [ ],
"discovery-spec" : null,
"kill-grace-period" : 0,
"transport-encryption" : [ ]
} ],
"placement-rule" : {
"@type" : "MaxPerHostnameRule",
"max" : 1,
"task-filter" : {
"@type" : "RegexMatcher",
"pattern" : "kube-apiserver-.*"
}
},
"volumes" : [ ],
"pre-reserved-role" : "*",
"secrets" : [ {
"secret" : "kubernetes/service_account_private_key__tmp",
"env-key" : null,
"file" : "service-account-key.pem"
}, {
"secret" : "kubernetes/ca_crt__tmp",
"env-key" : null,
"file" : "ca-crt.pem"
}, {
"secret" : "kubernetes/kube_apiserver_chain__tmp",
"env-key" : null,
"file" : "kube-apiserver-crt.pem"
}, {
"secret" : "kubernetes/kube_apiserver_private_key__tmp",
"env-key" : null,
"file" : "kube-apiserver-key.pem"
} ],
"share-pid-namespace" : false,
"allow-decommission" : true
}, {
"type" : "kube-controller-manager",
"user" : "root",
"count" : 3,
"image" : null,
"networks" : [ ],
"rlimits" : [ ],
"uris" : [ "https://s3.eu-central-1.amazonaws.com/dcos-kubernetes-test/bootstrap.zip", "https://storage.googleapis.com/kubernetes-release/release/v1.7.11/bin/linux/amd64/kube-controller-manager", "https://s3.eu-central-1.amazonaws.com/dcos-kubernetes-test/executor.zip" ],
"task-specs" : [ {
"name" : "instance",
"goal" : "RUNNING",
"resource-set" : {
"id" : "instance-resource-set",
"resource-specifications" : [ {
"@type" : "DefaultResourceSpec",
"name" : "cpus",
"value" : {
"type" : "SCALAR",
"scalar" : {
"value" : 0.5
},
"ranges" : null,
"set" : null,
"text" : null
},
"role" : "kubernetes-role",
"pre-reserved-role" : "*",
"principal" : "kubernetes"
}, {
"@type" : "DefaultResourceSpec",
"name" : "mem",
"value" : {
"type" : "SCALAR",
"scalar" : {
"value" : 512.0
},
"ranges" : null,
"set" : null,
"text" : null
},
"role" : "kubernetes-role",
"pre-reserved-role" : "*",
"principal" : "kubernetes"
} ],
"volume-specifications" : [ ],
"role" : "kubernetes-role",
"principal" : "kubernetes"
},
"command-spec" : {
"value" : "./bootstrap --resolve=false 2>&1\nchmod +x kube-controller-manager\nprintf \"\\n\\n ###### Starting Kube CONTROLLER MANAGER -- ${TASK_NAME} ###### \\n\"\n./kube-controller-manager --address=$MESOS_CONTAINER_IP --port=10252 --kubeconfig=kubeconfig.yaml --root-ca-file=ca-crt.pem --service-account-private-key-file=service-account-key.pem --leader-elect 2>&1\n",
"environment" : {
"KUBERNETES_VERSION" : "v1.7.11",
"USERNAME" : "kube-controller-manager"
}
},
"health-check-spec" : {
"command" : "HTTP_CODE=$(/opt/mesosphere/bin/curl --silent --output /dev/null --fail --write-out \"%{http_code}\" http://$MESOS_CONTAINER_IP:10252/healthz) && [ \"$HTTP_CODE\" -eq \"200\" ]\n",
"max-consecutive-failures" : 3,
"delay" : 0,
"interval" : 15,
"timeout" : 10,
"gracePeriod" : 30
},
"readiness-check-spec" : {
"command" : "HTTP_CODE=$(/opt/mesosphere/bin/curl --silent --output /dev/null --fail --write-out \"%{http_code}\" http://apiserver-insecure.kubernetes.l4lb.thisdcos.directory:9000) && [ \"$HTTP_CODE\" -eq \"200\" ]\n",
"delay" : 0,
"interval" : 5,
"timeout" : 10
},
"config-files" : [ {
"name" : "kubeconfig",
"relative-path" : "kubeconfig.yaml",
"template-content" : "apiVersion: v1\nkind: Config\nclusters:\n- name: local\n cluster:\n certificate-authority: ca-crt.pem\n server: https://apiserver.kubernetes.l4lb.thisdcos.directory:6443\nusers:\n- name: {{USERNAME}}\n user:\n client-certificate: {{USERNAME}}-crt.pem\n client-key: {{USERNAME}}-key.pem\ncontexts:\n- context:\n cluster: local\n user: {{USERNAME}}\n name: default\ncurrent-context: default\n"
} ],
"discovery-spec" : null,
"kill-grace-period" : 0,
"transport-encryption" : [ ]
} ],
"placement-rule" : {
"@type" : "MaxPerHostnameRule",
"max" : 1,
"task-filter" : {
"@type" : "RegexMatcher",
"pattern" : "kube-controller-manager-.*"
}
},
"volumes" : [ ],
"pre-reserved-role" : "*",
"secrets" : [ {
"secret" : "kubernetes/service_account_private_key__tmp",
"env-key" : null,
"file" : "service-account-key.pem"
}, {
"secret" : "kubernetes/ca_crt__tmp",
"env-key" : null,
"file" : "ca-crt.pem"
}, {
"secret" : "kubernetes/kube_controller_manager_chain__tmp",
"env-key" : null,
"file" : "kube-controller-manager-crt.pem"
}, {
"secret" : "kubernetes/kube_controller_manager_private_key__tmp",
"env-key" : null,
"file" : "kube-controller-manager-key.pem"
} ],
"share-pid-namespace" : false,
"allow-decommission" : true
}, {
"type" : "kube-scheduler",
"user" : "root",
"count" : 3,
"image" : null,
"networks" : [ ],
"rlimits" : [ ],
"uris" : [ "https://s3.eu-central-1.amazonaws.com/dcos-kubernetes-test/bootstrap.zip", "https://storage.googleapis.com/kubernetes-release/release/v1.7.11/bin/linux/amd64/kube-scheduler", "https://s3.eu-central-1.amazonaws.com/dcos-kubernetes-test/executor.zip" ],
"task-specs" : [ {
"name" : "instance",
"goal" : "RUNNING",
"resource-set" : {
"id" : "instance-resource-set",
"resource-specifications" : [ {
"@type" : "DefaultResourceSpec",
"name" : "cpus",
"value" : {
"type" : "SCALAR",
"scalar" : {
"value" : 0.5
},
"ranges" : null,
"set" : null,
"text" : null
},
"role" : "kubernetes-role",
"pre-reserved-role" : "*",
"principal" : "kubernetes"
}, {
"@type" : "DefaultResourceSpec",
"name" : "mem",
"value" : {
"type" : "SCALAR",
"scalar" : {
"value" : 512.0
},
"ranges" : null,
"set" : null,
"text" : null
},
"role" : "kubernetes-role",
"pre-reserved-role" : "*",
"principal" : "kubernetes"
} ],
"volume-specifications" : [ ],
"role" : "kubernetes-role",
"principal" : "kubernetes"
},
"command-spec" : {
"value" : "./bootstrap --resolve=false 2>&1\nchmod +x kube-scheduler\nprintf \"\\n\\n ###### Starting Kube SCHEDULER -- ${TASK_NAME} ###### \\n\"\n./kube-scheduler --address=$MESOS_CONTAINER_IP --kubeconfig=kubeconfig.yaml --leader-elect --kube-api-burst=120 --kube-api-qps=80 2>&1\n",
"environment" : {
"KUBERNETES_VERSION" : "v1.7.11",
"USERNAME" : "kube-scheduler"
}
},
"health-check-spec" : {
"command" : "HTTP_CODE=$(/opt/mesosphere/bin/curl --silent --output /dev/null --fail --write-out \"%{http_code}\" http://$MESOS_CONTAINER_IP:10251/healthz) && [ \"$HTTP_CODE\" -eq \"200\" ]\n",
"max-consecutive-failures" : 3,
"delay" : 0,
"interval" : 15,
"timeout" : 10,
"gracePeriod" : 30
},
"readiness-check-spec" : null,
"config-files" : [ {
"name" : "kubeconfig",
"relative-path" : "kubeconfig.yaml",
"template-content" : "apiVersion: v1\nkind: Config\nclusters:\n- name: local\n cluster:\n certificate-authority: ca-crt.pem\n server: https://apiserver.kubernetes.l4lb.thisdcos.directory:6443\nusers:\n- name: {{USERNAME}}\n user:\n client-certificate: {{USERNAME}}-crt.pem\n client-key: {{USERNAME}}-key.pem\ncontexts:\n- context:\n cluster: local\n user: {{USERNAME}}\n name: default\ncurrent-context: default\n"
} ],
"discovery-spec" : null,
"kill-grace-period" : 0,
"transport-encryption" : [ ]
} ],
"placement-rule" : {
"@type" : "MaxPerHostnameRule",
"max" : 1,
"task-filter" : {
"@type" : "RegexMatcher",
"pattern" : "kube-scheduler-.*"
}
},
"volumes" : [ ],
"pre-reserved-role" : "*",
"secrets" : [ {
"secret" : "kubernetes/ca_crt__tmp",
"env-key" : null,
"file" : "ca-crt.pem"
}, {
"secret" : "kubernetes/kube_scheduler_chain__tmp",
"env-key" : null,
"file" : "kube-scheduler-crt.pem"
}, {
"secret" : "kubernetes/kube_scheduler_private_key__tmp",
"env-key" : null,
"file" : "kube-scheduler-key.pem"
} ],
"share-pid-namespace" : false,
"allow-decommission" : true
}, {
"type" : "kube-node",
"user" : "root",
"count" : 1,
"image" : null,
"networks" : [ ],
"rlimits" : [ ],
"uris" : [ "https://s3.eu-central-1.amazonaws.com/dcos-kubernetes-test/bootstrap.zip", "https://download.docker.com/linux/static/stable/x86_64/docker-17.09.0-ce.tgz", "https://downloads.mesosphere.com/kubernetes/socat/socat.d-1.7.3.2-2.fc26.tar.gz", "https://storage.googleapis.com/kubernetes-release/release/v1.7.11/bin/linux/amd64/kube-proxy", "https://storage.googleapis.com/kubernetes-release/release/v1.7.11/bin/linux/amd64/kubelet", "https://storage.googleapis.com/kubernetes-release/release/v1.7.11/bin/linux/amd64/kubectl", "https://infinity-artifacts.s3.amazonaws.com/autodelete7d/kubernetes/20171206-172217-Z5QGImH7dVzO3lwz/resource-container.gz", "https://s3.eu-central-1.amazonaws.com/dcos-kubernetes-test/executor.zip" ],
"task-specs" : [ {
"name" : "kube-proxy",
"goal" : "RUNNING",
"resource-set" : {
"id" : "kube-proxy-resource-set",
"resource-specifications" : [ {
"@type" : "DefaultResourceSpec",
"name" : "cpus",
"value" : {
"type" : "SCALAR",
"scalar" : {
"value" : 0.1
},
"ranges" : null,
"set" : null,
"text" : null
},
"role" : "kubernetes-role",
"pre-reserved-role" : "*",
"principal" : "kubernetes"
}, {
"@type" : "DefaultResourceSpec",
"name" : "mem",
"value" : {
"type" : "SCALAR",
"scalar" : {
"value" : 512.0
},
"ranges" : null,
"set" : null,
"text" : null
},
"role" : "kubernetes-role",
"pre-reserved-role" : "*",
"principal" : "kubernetes"
} ],
"volume-specifications" : [ ],
"role" : "kubernetes-role",
"principal" : "kubernetes"
},
"command-spec" : {
"value" : "./bootstrap --resolve=false 2>&1\nchmod +x kube-proxy\nprintf \"\\n\\n ###### Starting Kube PROXY -- ${TASK_NAME} ###### \\n\"\n./kube-proxy --hostname-override=kube-node-$POD_INSTANCE_INDEX-kube-proxy.kubernetes.mesos --bind-address=127.0.0.1 --kubeconfig=kubeconfig.yaml --resource-container=\"\" --healthz-port=0 2>&1\n",
"environment" : {
"KUBERNETES_VERSION" : "v1.7.11",
"PATH" : "/usr/local/sbin:/usr/local/bin:/usr/sbin:/usr/bin:/sbin:/bin:/opt/bin:/opt/mesosphere/bin/dcos-path",
"USERNAME" : "kube-node"
}
},
"health-check-spec" : null,
"readiness-check-spec" : null,
"config-files" : [ {
"name" : "kubeconfig",
"relative-path" : "kubeconfig.yaml",
"template-content" : "apiVersion: v1\nkind: Config\nclusters:\n- name: local\n cluster:\n certificate-authority: ca-crt.pem\n server: https://apiserver.kubernetes.l4lb.thisdcos.directory:6443\nusers:\n- name: {{USERNAME}}\n user:\n client-certificate: {{USERNAME}}-crt.pem\n client-key: {{USERNAME}}-key.pem\ncontexts:\n- context:\n cluster: local\n user: {{USERNAME}}\n name: default\ncurrent-context: default\n"
} ],
"discovery-spec" : null,
"kill-grace-period" : 0,
"transport-encryption" : [ ]
}, {
"name" : "kubelet",
"goal" : "RUNNING",
"resource-set" : {
"id" : "kube-node-kubelet",
"resource-specifications" : [ {
"@type" : "DefaultResourceSpec",
"name" : "cpus",
"value" : {
"type" : "SCALAR",
"scalar" : {
"value" : 3.0
},
"ranges" : null,
"set" : null,
"text" : null
},
"role" : "kubernetes-role",
"pre-reserved-role" : "*",
"principal" : "kubernetes"
}, {
"@type" : "DefaultResourceSpec",
"name" : "mem",
"value" : {
"type" : "SCALAR",
"scalar" : {
"value" : 4100.0
},
"ranges" : null,
"set" : null,
"text" : null
},
"role" : "kubernetes-role",
"pre-reserved-role" : "*",
"principal" : "kubernetes"
} ],
"volume-specifications" : [ ],
"role" : "kubernetes-role",
"principal" : "kubernetes"
},
"command-spec" : {
"value" : "./bootstrap --resolve=false 2>&1\n\nchmod +x kubelet-wrapper.sh\n\nprintf \"\\n\\n ###### Starting Kubelet -- ${TASK_NAME} ###### \\n\"\n\n./kubelet-wrapper.sh 2>&1\n",
"environment" : {
"FRAMEWORK_NAME" : "",
"KUBERNETES_NODE_DOCKER_VERSION" : "17.09.0",
"KUBE_ALLOCATABLE_CPUS" : "2",
"KUBE_ALLOCATABLE_MEM" : "2048",
"KUBE_RESERVED_CPUS" : "1",
"KUBE_RESERVED_MEM" : "2052",
"PATH" : "/usr/local/sbin:/usr/local/bin:/usr/sbin:/usr/bin:/sbin:/bin:/opt/bin:/opt/mesosphere/bin/dcos-path",
"PAUSE_DOCKER_IMAGE" : "gcr.io/google_containers/pause-amd64:3.0",
"USERNAME" : "kube-node"
}
},
"health-check-spec" : {
"command" : "HTTP_CODE=$(/opt/mesosphere/bin/curl --silent --output /dev/null --fail --write-out \"%{http_code}\" http://$MESOS_CONTAINER_IP:10258/healthz) && [ \"$HTTP_CODE\" -eq \"200\" ]\n",
"max-consecutive-failures" : 3,
"delay" : 0,
"interval" : 15,
"timeout" : 10,
"gracePeriod" : 30
},
"readiness-check-spec" : null,
"config-files" : [ {
"name" : "kubeconfig",
"relative-path" : "kubeconfig.yaml",
"template-content" : "apiVersion: v1\nkind: Config\nclusters:\n- name: local\n cluster:\n certificate-authority: ca-crt.pem\n server: https://apiserver.kubernetes.l4lb.thisdcos.directory:6443\nusers:\n- name: {{USERNAME}}\n user:\n client-certificate: {{USERNAME}}-crt.pem\n client-key: {{USERNAME}}-key.pem\ncontexts:\n- context:\n cluster: local\n user: {{USERNAME}}\n name: default\ncurrent-context: default\n"
}, {
"name" : "kubelet-wrapper.sh",
"relative-path" : "kubelet-wrapper.sh",
"template-content" : "#!/bin/bash\n\nset -e\n\n# In a graceful shutdown, we delete the node from the cluster\nterminated () {\n chmod +x kubectl\n\n printf \"Deleting node kube-node-$POD_INSTANCE_INDEX-kubelet.$FRAMEWORK_NAME.mesos \\n\"\n ./kubectl --kubeconfig=kubeconfig.yaml delete node kube-node-$POD_INSTANCE_INDEX-kubelet.$FRAMEWORK_NAME.mesos\n\n exit 0\n}\n\ntrap terminated SIGTERM EXIT\necho \"Trapping SIGTERM and EXIT!\"\n\nprintf \"Configuring task environment...\\n\"\n\n[ -z \"$KUBE_ALLOCATABLE_CPUS\" ] && (printf \"Error: KUBE_ALLOCATABLE_CPUS not set or empty!\" >&2 ; exit 1)\n[ -z \"$KUBE_ALLOCATABLE_MEM\" ] && (printf \"Error: KUBELET_ALLOCATABLE_MEM not set or empty!\" >&2 ; exit 1)\n[ -z \"$KUBE_RESERVED_CPUS\" ] && (printf \"Error: KUBE_RESERVED_CPUS not set or empty!\" >&2 ; exit 1)\n[ -z \"$KUBE_RESERVED_MEM\" ] && (printf \"Error: KUBELET_RESERVED_MEM not set or empty!\" >&2 ; exit 1)\n\n# use the DC/OS proxy for dockerd and the kubelet\n# reading this file from /opt/mesosphere is a hack. Ideally these vars would be injected by mesos\nset -o allexport\nsource /opt/mesosphere/etc/proxy.env\nset +o allexport\n\n# The kubelet sees all of the host resources.\n# To override the resources it will advertise, we set the allocatable resources\n# as follows:\n# - reserved_cpu:\n# read total cpu available\n# subtract amount configured by user\n# convert to millicores format expected by kubelet\n# - reserved_mem is calculated as follows:\n# read total system memory in Kb\n# subtract memory the user configured (in Mb)\n#\nSYSTEM_RESERVED_CPUS=$(lscpu | awk -v requested=$KUBE_ALLOCATABLE_CPUS -v reserved=$KUBE_RESERVED_CPUS '/^CPU\\(s\\)/ {print ($NF - requested - reserved) * 1000}')\nSYSTEM_RESERVED_MEM=$(awk -v requested=$KUBE_ALLOCATABLE_MEM -v reserved=$KUBE_RESERVED_MEM '/MemTotal/ {print int(($2 - requested * 1000 - reserved * 1000))}' /proc/meminfo)\nCGROUP_ROOT=$(grep memory /proc/self/cgroup | cut -d: -f3)\n\n[ -z \"$CGROUP_ROOT\" ] && (printf \"Error: Unable to find CGROUP_ROOT!\" >&2 ; exit 1)\n\n# Docker must run with a few special arguments.\n# data-root is important, because it seems /var/lib/docker\n# mount bind we will be doing below doesn't work (at least)\n# on CentOS 7. FYI, it works fine (at least) on CoreOS.\nDOCKER_ARGS=(\n --iptables=false\n --ip-masq=false\n --cgroup-parent=${CGROUP_ROOT}\n --data-root=var/new/lib/docker\n)\n\n# For now, we enforce Docker storage driver to overlay2.\nDOCKER_ARGS+=(\n --storage-driver=overlay2\n --storage-opt=\"overlay2.override_kernel_check=true\"\n)\n\n# Before running the kubelet, we need to make sure it supports\n# the hairpin mode.\necho 1 > /proc/sys/net/bridge/bridge-nf-call-iptables\n\n# We need to convert this to millicores\nKUBE_RESERVED_CPUS_M=$((${KUBE_RESERVED_CPUS} * 1000))\nKUBE_RESERVED_MEM_M=$((${KUBE_RESERVED_MEM} * 1000))\n\n# Kubelet must run with a few special arguments.\n#\n# FRAMEWORK_NAME, KUBELET_CPUS and KUBELET_MEM are framework variables\n# set by the framework scheduler when processing the service spec.\nKUBELET_ARGS=(\n --address=$MESOS_CONTAINER_IP\n --hostname-override=kube-node-$POD_INSTANCE_INDEX-kubelet.$FRAMEWORK_NAME.mesos\n --node-ip=$MESOS_CONTAINER_IP\n --require-kubeconfig\n --allow-privileged\n --network-plugin=cni\n --cni-bin-dir=/opt/mesosphere/active/cni\n --cni-conf-dir=/opt/mesosphere/etc/dcos/network/cni\n --healthz-bind-address=$MESOS_CONTAINER_IP\n --healthz-port=10258\n --cluster-dns=10.100.0.10\n --cluster-domain=cluster.local\n --system-reserved=\"cpu=${SYSTEM_RESERVED_CPUS}m,memory=${SYSTEM_RESERVED_MEM}Ki\"\n --kube-reserved=\"cpu=${KUBE_RESERVED_CPUS_M}m,memory=${KUBE_RESERVED_MEM_M}Ki\"\n --cgroup-driver=cgroupfs\n --kube-api-qps=15\n --kube-api-burst=30\n --event-qps=15\n --event-burst=30\n --max-pods=100\n --cgroup-root=${CGROUP_ROOT}\n --pod-infra-container-image=$PAUSE_DOCKER_IMAGE\n --kubeconfig=kubeconfig.yaml\n --tls-cert-file=kube-node-crt.pem\n --tls-private-key-file=kube-node-key.pem\n --kube-reserved-cgroup=${CGROUP_ROOT}/podruntime\n)\n\nprintf \"Sandboxing...\\n\"\n\n# Since the persistent volume \"var\" may have been previously used by the same\n# task, we need to make sure it's empty before proceeding.\nrm -rf var/*\n\nDIRS=( containerd docker dockershim kubelet )\n\nfor DIR in \"${DIRS[@]}\"\ndo\n mkdir -p /var/lib/${DIR} var/new/lib/${DIR}\n mount --bind var/new/lib/${DIR} /var/lib/${DIR}\ndone\n\nmkdir -p var/new/log\nmount --bind var/new/log /var/log\n\n# Isolate docker daemon from any existing configuration in /etc/docker\nmkdir -p etc/docker/\nmount --bind etc/docker /etc/docker\n\n# Move mount /run to ./run and mount bind only the necessary paths (/run/dcos\n# and /run/mesos). This is done in order to present a clean /run to both dockerd\n# and the kubelet.\n\nmkdir -p run\nmount -n -t tmpfs tmpfs run\n\n# On CentOS it exits with a non 0 error code but move mount works anyway. ?\\_(?)_/?\nmount -M /run/ run || true\n# Double-check if move mount worked.\n[ -d run/mesos ] || (printf \"Error: Mount move failed.\\n\" >&2 && exit 1)\n# Clean up /run before continuing and mount bind only what's necessary.\nrm -rf /run/*\n\nDIRS=( dcos mesos )\n\n# Ubuntu requires lxcfs\nif [ -d run/lxcfs ]; then\n DIRS+=( lxcfs )\nfi\n\nfor DIR in \"${DIRS[@]}\"\ndo\n mkdir /run/${DIR}\n mount --bind run/${DIR} /run/${DIR}\ndone\n\nprintf \"Configuring network...\\n\"\n\n# For now, we use DC/OS overlay CNI integration.\nCONFDIR=/opt/mesosphere/etc/dcos/network/cni\nif [ -f $CONFDIR/dcos.conf ]; then\n mv $CONFDIR/dcos.conf $CONFDIR/dcos.1.conf\n CNICONF='{\"name\":\"dcos\",'`cat ${CONFDIR}/dcos.1.conf | cut -d \"{\" -f3-5 | cut -d \"}\" -f1-4`\n echo ${CNICONF} > ${CONFDIR}/dcos.1.conf\nfi\n\n# socat is needed for kubectl port-forward.\ncat << EOF > socat.d/socat\n#! /bin/bash\nPATH=/usr/bin:/bin:/usr/sbin:/sbin:$(pwd)/socat.d/\nLD_LIBRARY_PATH=$(pwd)/socat.d/lib:$LD_LIBRARY_PATH exec $(pwd)/socat.d/bin/socat \"\\$@\"\nEOF\n\nchmod +x kubelet socat.d/socat\n\nprintf \"Starting docker...\\n\"\n\nPATH=$(pwd)/docker:$(pwd)/socat.d:$PATH ./docker/dockerd ${DOCKER_ARGS[@]} &\n\npid=$!\n\nprintf \"Setting up cgroups for docker pid=\"$pid\"\\n\"\n\n# Start dockerd and kubelet, add them to all cgroup subsystems available on the\n# system and wait for kubelet process to exit.\n\nchmod +x resource-container\n\n./resource-container --parent ${CGROUP_ROOT} --cgroup podruntime --cpus ${KUBE_RESERVED_CPUS} --mem ${KUBE_RESERVED_MEM} --pid $pid\n\nprintf \"Starting kubelet...\\n\"\n\nPATH=$(pwd)/docker:$(pwd)/socat.d:$PATH ./kubelet ${KUBELET_ARGS[@]} &\n\npid=$!\n\nprintf \"Setting up cgroups for kubelet pid=\"$pid\"\\n\"\n\n./resource-container --parent ${CGROUP_ROOT} --cgroup podruntime --cpus ${KUBE_RESERVED_CPUS} --mem ${KUBE_RESERVED_MEM} --pid $pid\n\nwait $pid\n"
} ],
"discovery-spec" : null,
"kill-grace-period" : 5,
"transport-encryption" : [ ]
}, {
"name" : "decommission",
"goal" : "FINISHED",
"resource-set" : {
"id" : "decommission-resource-set",
"resource-specifications" : [ {
"@type" : "DefaultResourceSpec",
"name" : "cpus",
"value" : {
"type" : "SCALAR",
"scalar" : {
"value" : 0.1
},
"ranges" : null,
"set" : null,
"text" : null
},
"role" : "kubernetes-role",
"pre-reserved-role" : "*",
"principal" : "kubernetes"
}, {
"@type" : "DefaultResourceSpec",
"name" : "mem",
"value" : {
"type" : "SCALAR",
"scalar" : {
"value" : 32.0
},
"ranges" : null,
"set" : null,
"text" : null
},
"role" : "kubernetes-role",
"pre-reserved-role" : "*",
"principal" : "kubernetes"
} ],
"volume-specifications" : [ ],
"role" : "kubernetes-role",
"principal" : "kubernetes"
},
"command-spec" : {
"value" : "\nprintf \"Starting to decommission the node...\\n\"\n\n./bootstrap --resolve=false 2>&1 ;\n\nchmod +x kubectl ;\n\nNODE_FOUND=$(./kubectl --kubeconfig=kubeconfig.yaml get node --ignore-not-found kube-node-$POD_INSTANCE_INDEX-kubelet.kubernetes.mesos | grep \"kube-node-$POD_INSTANCE_INDEX-kubelet.kubernetes.mesos\" | grep -c \"Ready\")\n\nif [ \"$IS_UPGRADE_PLAN\" = \"YES\" ] && [ \"$NODE_FOUND\" -eq \"1\" ] ; then\n printf \"Starting to decommission the kube-node-$POD_INSTANCE_INDEX-kubelet.kubernetes.mesos -- ${TASK_NAME} \\n\" ;\n ./kubectl --kubeconfig=kubeconfig.yaml drain kube-node-$POD_INSTANCE_INDEX-kubelet.kubernetes.mesos ;\nelse\n printf \"Ignored the decommission process of kube-node-$POD_INSTANCE_INDEX-kubelet.kubernetes.mesos!\" ;\nfi\n",
"environment" : {
"KUBERNETES_NODE_DOCKER_VERSION" : "17.09.0",
"KUBERNETES_VERSION" : "v1.7.11",
"USERNAME" : "kube-node"
}
},
"health-check-spec" : null,
"readiness-check-spec" : null,
"config-files" : [ {
"name" : "kubeconfig",
"relative-path" : "kubeconfig.yaml",
"template-content" : "apiVersion: v1\nkind: Config\nclusters:\n- name: local\n cluster:\n certificate-authority: ca-crt.pem\n server: https://apiserver.kubernetes.l4lb.thisdcos.directory:6443\nusers:\n- name: {{USERNAME}}\n user:\n client-certificate: {{USERNAME}}-crt.pem\n client-key: {{USERNAME}}-key.pem\ncontexts:\n- context:\n cluster: local\n user: {{USERNAME}}\n name: default\ncurrent-context: default\n"
} ],
"discovery-spec" : null,
"kill-grace-period" : 0,
"transport-encryption" : [ ]
}, {
"name" : "commission",
"goal" : "FINISHED",
"resource-set" : {
"id" : "commission-resource-set",
"resource-specifications" : [ {
"@type" : "DefaultResourceSpec",
"name" : "cpus",
"value" : {
"type" : "SCALAR",
"scalar" : {
"value" : 0.1
},
"ranges" : null,
"set" : null,
"text" : null
},
"role" : "kubernetes-role",
"pre-reserved-role" : "*",
"principal" : "kubernetes"
}, {
"@type" : "DefaultResourceSpec",
"name" : "mem",
"value" : {
"type" : "SCALAR",
"scalar" : {
"value" : 32.0
},
"ranges" : null,
"set" : null,
"text" : null
},
"role" : "kubernetes-role",
"pre-reserved-role" : "*",
"principal" : "kubernetes"
} ],
"volume-specifications" : [ ],
"role" : "kubernetes-role",
"principal" : "kubernetes"
},
"command-spec" : {
"value" : "\nprintf \"Starting to commission the node...\\n\"\n\n./bootstrap --resolve=false 2>&1 ;\n\nchmod +x kubectl ;\n\nNODE_FOUND=$(./kubectl --kubeconfig=kubeconfig.yaml get node --ignore-not-found kube-node-$POD_INSTANCE_INDEX-kubelet.kubernetes.mesos | grep -c \"kube-node-$POD_INSTANCE_INDEX-kubelet.kubernetes.mesos\")\n\nif [ \"$IS_UPGRADE_PLAN\" = \"YES\" ] && [ \"$NODE_FOUND\" -eq \"1\" ] ; then\n printf \"Starting to commission the kube-node-$POD_INSTANCE_INDEX-kubelet.kubernetes.mesos -- ${TASK_NAME} \\n\" ;\n ./kubectl --kubeconfig=kubeconfig.yaml uncordon kube-node-$POD_INSTANCE_INDEX-kubelet.kubernetes.mesos ;\nelse\n printf \"Ignored the commission process of kube-node-$POD_INSTANCE_INDEX-kubelet.kubernetes.mesos!\" ;\nfi\n",
"environment" : {
"KUBERNETES_NODE_DOCKER_VERSION" : "17.09.0",
"KUBERNETES_VERSION" : "v1.7.11",
"USERNAME" : "kube-node"
}
},
"health-check-spec" : null,
"readiness-check-spec" : null,
"config-files" : [ {
"name" : "kubeconfig",
"relative-path" : "kubeconfig.yaml",
"template-content" : "apiVersion: v1\nkind: Config\nclusters:\n- name: local\n cluster:\n certificate-authority: ca-crt.pem\n server: https://apiserver.kubernetes.l4lb.thisdcos.directory:6443\nusers:\n- name: {{USERNAME}}\n user:\n client-certificate: {{USERNAME}}-crt.pem\n client-key: {{USERNAME}}-key.pem\ncontexts:\n- context:\n cluster: local\n user: {{USERNAME}}\n name: default\ncurrent-context: default\n"
} ],
"discovery-spec" : null,
"kill-grace-period" : 0,
"transport-encryption" : [ ]
}, {
"name" : "recover",
"goal" : "FINISHED",
"resource-set" : {
"id" : "kube-node-kubelet",
"resource-specifications" : [ {
"@type" : "DefaultResourceSpec",
"name" : "cpus",
"value" : {
"type" : "SCALAR",
"scalar" : {
"value" : 3.0
},
"ranges" : null,
"set" : null,
"text" : null
},
"role" : "kubernetes-role",
"pre-reserved-role" : "*",
"principal" : "kubernetes"
}, {
"@type" : "DefaultResourceSpec",
"name" : "mem",
"value" : {
"type" : "SCALAR",
"scalar" : {
"value" : 4100.0
},
"ranges" : null,
"set" : null,
"text" : null
},
"role" : "kubernetes-role",
"pre-reserved-role" : "*",
"principal" : "kubernetes"
} ],
"volume-specifications" : [ ],
"role" : "kubernetes-role",
"principal" : "kubernetes"
},
"command-spec" : {
"value" : "./bootstrap --resolve=false 2>&1\n\nchmod +x kubectl\n\n./kubectl --kubeconfig=kubeconfig.yaml delete node kube-node-$POD_INSTANCE_INDEX-kubelet.kubernetes.mesos\n",
"environment" : {
"KUBERNETES_VERSION" : "v1.7.11",
"USERNAME" : "kube-node"
}
},
"health-check-spec" : null,
"readiness-check-spec" : null,
"config-files" : [ {
"name" : "kubeconfig",
"relative-path" : "kubeconfig.yaml",
"template-content" : "apiVersion: v1\nkind: Config\nclusters:\n- name: local\n cluster:\n certificate-authority: ca-crt.pem\n server: https://apiserver.kubernetes.l4lb.thisdcos.directory:6443\nusers:\n- name: {{USERNAME}}\n user:\n client-certificate: {{USERNAME}}-crt.pem\n client-key: {{USERNAME}}-key.pem\ncontexts:\n- context:\n cluster: local\n user: {{USERNAME}}\n name: default\ncurrent-context: default\n"
} ],
"discovery-spec" : null,
"kill-grace-period" : 0,
"transport-encryption" : [ ]
} ],
"placement-rule" : {
"@type" : "MaxPerHostnameRule",
"max" : 1,
"task-filter" : {
"@type" : "RegexMatcher",
"pattern" : "kube-node-.*"
}
},
"volumes" : [ {
"@type" : "DefaultVolumeSpec",
"type" : "ROOT",
"container-path" : "var",
"name" : "disk",
"value" : {
"type" : "SCALAR",
"scalar" : {
"value" : 1024.0
},
"ranges" : null,
"set" : null,
"text" : null
},
"role" : "kubernetes-role",
"pre-reserved-role" : "*",
"principal" : "kubernetes"
} ],
"pre-reserved-role" : "*",
"secrets" : [ {
"secret" : "kubernetes/ca_crt__tmp",
"env-key" : null,
"file" : "ca-crt.pem"
}, {
"secret" : "kubernetes/kube_node_chain__tmp",
"env-key" : null,
"file" : "kube-node-crt.pem"
}, {
"secret" : "kubernetes/kube_node_private_key__tmp",
"env-key" : null,
"file" : "kube-node-key.pem"
} ],
"share-pid-namespace" : false,
"allow-decommission" : true
}, {
"type" : "mandatory-addons",
"user" : "root",
"count" : 1,
"image" : null,
"networks" : [ ],
"rlimits" : [ ],
"uris" : [ "https://s3.eu-central-1.amazonaws.com/dcos-kubernetes-test/bootstrap.zip", "https://storage.googleapis.com/kubernetes-release/release/v1.7.11/bin/linux/amd64/kubectl", "https://s3.eu-central-1.amazonaws.com/dcos-kubernetes-test/executor.zip" ],
"task-specs" : [ {
"name" : "kube-dns",
"goal" : "FINISHED",
"resource-set" : {
"id" : "addons",
"resource-specifications" : [ {
"@type" : "DefaultResourceSpec",
"name" : "cpus",
"value" : {
"type" : "SCALAR",
"scalar" : {
"value" : 0.1
},
"ranges" : null,
"set" : null,
"text" : null
},
"role" : "kubernetes-role",
"pre-reserved-role" : "*",
"principal" : "kubernetes"
}, {
"@type" : "DefaultResourceSpec",
"name" : "mem",
"value" : {
"type" : "SCALAR",
"scalar" : {
"value" : 32.0
},
"ranges" : null,
"set" : null,
"text" : null
},
"role" : "kubernetes-role",
"pre-reserved-role" : "*",
"principal" : "kubernetes"
} ],
"volume-specifications" : [ ],
"role" : "kubernetes-role",
"principal" : "kubernetes"
},
"command-spec" : {
"value" : "./bootstrap --resolve=false 2>&1\n\nTASK_NAME= ADDON=kube-dns bash ./install_addon.sh\n",
"environment" : {
"KUBEDNS_DNSMASQ_DOCKER_IMAGE" : "gcr.io/google_containers/k8s-dns-dnsmasq-nanny-amd64:1.14.7",
"KUBEDNS_DOCKER_IMAGE" : "gcr.io/google_containers/k8s-dns-kube-dns-amd64:1.14.7",
"KUBEDNS_SIDECAR_DOCKER_IMAGE" : "gcr.io/google_containers/k8s-dns-sidecar-amd64:1.14.7",
"KUBERNETES_VERSION" : "v1.7.11",
"USERNAME" : "mandatory-addons"
}
},
"health-check-spec" : null,
"readiness-check-spec" : null,
"config-files" : [ {
"name" : "kubeconfig",
"relative-path" : "kubeconfig.yaml",
"template-content" : "apiVersion: v1\nkind: Config\nclusters:\n- name: local\n cluster:\n certificate-authority: ca-crt.pem\n server: https://apiserver.kubernetes.l4lb.thisdcos.directory:6443\nusers:\n- name: {{USERNAME}}\n user:\n client-certificate: {{USERNAME}}-crt.pem\n client-key: {{USERNAME}}-key.pem\ncontexts:\n- context:\n cluster: local\n user: {{USERNAME}}\n name: default\ncurrent-context: default\n"
}, {
"name" : "kubedns",
"relative-path" : "kube-dns.yaml",
"template-content" : "# Copyright 2016 The Kubernetes Authors.\n#\n# Licensed under the Apache License, Version 2.0 (the \"License\");\n# you may not use this file except in compliance with the License.\n# You may obtain a copy of the License at\n#\n# http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS,\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n# See the License for the specific language governing permissions and\n# limitations under the License.\n\napiVersion: v1\nkind: ConfigMap\nmetadata:\n name: kube-dns\n namespace: kube-system\n labels:\n addonmanager.kubernetes.io/mode: EnsureExists\ndata:\n upstreamNameservers: |\n [\"198.51.100.1\"]\n---\n\napiVersion: v1\nkind: Service\nmetadata:\n name: kube-dns\n namespace: kube-system\n labels:\n k8s-app: kube-dns\n kubernetes.io/cluster-service: \"true\"\n kubernetes.io/name: \"KubeDNS\"\nspec:\n selector:\n k8s-app: kube-dns\n clusterIP: 10.100.0.10\n ports:\n - name: dns\n port: 53\n protocol: UDP\n - name: dns-tcp\n port: 53\n protocol: TCP\n---\n\napiVersion: apps/v1beta1\nkind: Deployment\nmetadata:\n name: kube-dns\n namespace: kube-system\n labels:\n k8s-app: kube-dns\n kubernetes.io/cluster-service: \"true\"\nspec:\n # We initially require 3 replicas to handle HA during upgrade operations.\n replicas: 3\n strategy:\n rollingUpdate:\n maxSurge: 10%\n maxUnavailable: 0\n selector:\n matchLabels:\n k8s-app: kube-dns\n template:\n metadata:\n labels:\n k8s-app: kube-dns\n annotations:\n scheduler.alpha.kubernetes.io/critical-pod: ''\n spec:\n tolerations:\n - key: \"CriticalAddonsOnly\"\n operator: \"Exists\"\n volumes:\n - name: kube-dns-config\n configMap:\n name: kube-dns\n optional: true\n containers:\n - name: kubedns\n image: {{KUBEDNS_DOCKER_IMAGE}}\n resources:\n # TODO: Set memory limits when we've profiled the container for large\n # clusters, then set request = limit to keep this container in\n # guaranteed class. Currently, this container falls into the\n # \"burstable\" category so the kubelet doesn't backoff from restarting it.\n limits:\n memory: 170Mi\n requests:\n cpu: 100m\n memory: 70Mi\n livenessProbe:\n httpGet:\n path: /healthcheck/kubedns\n port: 10054\n scheme: HTTP\n initialDelaySeconds: 60\n timeoutSeconds: 5\n successThreshold: 1\n failureThreshold: 5\n readinessProbe:\n httpGet:\n path: /readiness\n port: 8081\n scheme: HTTP\n # we poll on pod startup for the Kubernetes master service and\n # only setup the /readiness HTTP server once that's available.\n initialDelaySeconds: 3\n timeoutSeconds: 5\n args:\n - --domain=cluster.local.\n - --dns-port=10053\n - --config-dir=/kube-dns-config\n - --v=2\n env:\n - name: PROMETHEUS_PORT\n value: \"10055\"\n ports:\n - containerPort: 10053\n name: dns-local\n protocol: UDP\n - containerPort: 10053\n name: dns-tcp-local\n protocol: TCP\n - containerPort: 10055\n name: metrics\n protocol: TCP\n volumeMounts:\n - name: kube-dns-config\n mountPath: /kube-dns-config\n - name: dnsmasq\n image: {{KUBEDNS_DNSMASQ_DOCKER_IMAGE}}\n livenessProbe:\n httpGet:\n path: /healthcheck/dnsmasq\n port: 10054\n scheme: HTTP\n initialDelaySeconds: 60\n timeoutSeconds: 5\n successThreshold: 1\n failureThreshold: 5\n args:\n - -v=2\n - -logtostderr\n - -configDir=/etc/k8s/dns/dnsmasq-nanny\n - -restartDnsmasq=true\n - --\n - -k\n - --cache-size=1000\n - --no-negcache\n - --log-facility=-\n - --server=/cluster.local/127.0.0.1#10053\n - --server=/in-addr.arpa/127.0.0.1#10053\n - --server=/ip6.arpa/127.0.0.1#10053\n ports:\n - containerPort: 53\n name: dns\n protocol: UDP\n - containerPort: 53\n name: dns-tcp\n protocol: TCP\n # see: https://github.com/kubernetes/kubernetes/issues/29055 for details\n resources:\n requests:\n cpu: 150m\n memory: 20Mi\n volumeMounts:\n - name: kube-dns-config\n mountPath: /etc/k8s/dns/dnsmasq-nanny\n - name: sidecar\n image: {{KUBEDNS_SIDECAR_DOCKER_IMAGE}}\n livenessProbe:\n httpGet:\n path: /metrics\n port: 10054\n scheme: HTTP\n initialDelaySeconds: 60\n timeoutSeconds: 5\n successThreshold: 1\n failureThreshold: 5\n args:\n - --v=2\n - --logtostderr\n - --probe=kubedns,127.0.0.1:10053,kubernetes.default.svc.cluster.local,5,SRV\n - --probe=dnsmasq,127.0.0.1:53,kubernetes.default.svc.cluster.local,5,SRV\n ports:\n - containerPort: 10054\n name: metrics\n protocol: TCP\n resources:\n requests:\n memory: 20Mi\n cpu: 10m\n dnsPolicy: Default # Don't use cluster DNS.\n #serviceAccountName: kube-dns\n"
}, {
"name" : "install_addon.sh",
"relative-path" : "install_addon.sh",
"template-content" : "#!/bin/bash\n\nset -e\n\nchmod +x kubectl\n\nprintf \"\\n\\n ###### Deploying ${ADDON} components -- ${TASK_NAME} ###### \\n\"\n\n./kubectl --kubeconfig=kubeconfig.yaml apply -f $ADDON.yaml\nsleep 10\n./kubectl --kubeconfig=kubeconfig.yaml get pods -n kube-system | grep $ADDON | grep -i running && \\\n./kubectl --kubeconfig=kubeconfig.yaml get svc -n kube-system | grep $ADDON\nif [ $? -ne 0 ]\nthen\n echo \"$ADDON check failed and is not ready\"\n exit 1\nfi\n"
} ],
"discovery-spec" : null,
"kill-grace-period" : 0,
"transport-encryption" : [ ]
}, {
"name" : "heapster",
"goal" : "FINISHED",
"resource-set" : {
"id" : "addons",
"resource-specifications" : [ {
"@type" : "DefaultResourceSpec",
"name" : "cpus",
"value" : {
"type" : "SCALAR",
"scalar" : {
"value" : 0.1
},
"ranges" : null,
"set" : null,
"text" : null
},
"role" : "kubernetes-role",
"pre-reserved-role" : "*",
"principal" : "kubernetes"
}, {
"@type" : "DefaultResourceSpec",
"name" : "mem",
"value" : {
"type" : "SCALAR",
"scalar" : {
"value" : 32.0
},
"ranges" : null,
"set" : null,
"text" : null
},
"role" : "kubernetes-role",
"pre-reserved-role" : "*",
"principal" : "kubernetes"
} ],
"volume-specifications" : [ ],
"role" : "kubernetes-role",
"principal" : "kubernetes"
},
"command-spec" : {
"value" : "./bootstrap --resolve=false 2>&1\n\nTASK_NAME= ADDON=heapster bash ./install_addon.sh\n",
"environment" : {
"HEAPSTER_DOCKER_IMAGE" : "gcr.io/google_containers/heapster-amd64:v1.4.3",
"KUBERNETES_VERSION" : "v1.7.11",
"USERNAME" : "mandatory-addons"
}
},
"health-check-spec" : null,
"readiness-check-spec" : null,
"config-files" : [ {
"name" : "kubeconfig",
"relative-path" : "kubeconfig.yaml",
"template-content" : "apiVersion: v1\nkind: Config\nclusters:\n- name: local\n cluster:\n certificate-authority: ca-crt.pem\n server: https://apiserver.kubernetes.l4lb.thisdcos.directory:6443\nusers:\n- name: {{USERNAME}}\n user:\n client-certificate: {{USERNAME}}-crt.pem\n client-key: {{USERNAME}}-key.pem\ncontexts:\n- context:\n cluster: local\n user: {{USERNAME}}\n name: default\ncurrent-context: default\n"
}, {
"name" : "heapster",
"relative-path" : "heapster.yaml",
"template-content" : "apiVersion: v1\nkind: ServiceAccount\nmetadata:\n name: heapster\n namespace: kube-system\n---\napiVersion: apps/v1beta1\nkind: Deployment\nmetadata:\n name: heapster\n namespace: kube-system\nspec:\n replicas: 1\n template:\n metadata:\n labels:\n task: monitoring\n k8s-app: heapster\n spec:\n serviceAccountName: heapster\n containers:\n - name: heapster\n image: {{HEAPSTER_DOCKER_IMAGE}}\n imagePullPolicy: IfNotPresent\n command:\n - /heapster\n - --source=kubernetes.summary_api:https://kubernetes.default\n---\napiVersion: v1\nkind: Service\nmetadata:\n labels:\n task: monitoring\n # For use as a Cluster add-on (https://github.com/kubernetes/kubernetes/tree/master/cluster/addons)\n # If you are NOT using this as an addon, you should comment out this line.\n kubernetes.io/cluster-service: 'true'\n kubernetes.io/name: Heapster\n name: heapster\n namespace: kube-system\nspec:\n ports:\n - port: 80\n targetPort: 8082\n selector:\n k8s-app: heapster\n"
}, {
"name" : "install_addon.sh",
"relative-path" : "install_addon.sh",
"template-content" : "#!/bin/bash\n\nset -e\n\nchmod +x kubectl\n\nprintf \"\\n\\n ###### Deploying ${ADDON} components -- ${TASK_NAME} ###### \\n\"\n\n./kubectl --kubeconfig=kubeconfig.yaml apply -f $ADDON.yaml\nsleep 10\n./kubectl --kubeconfig=kubeconfig.yaml get pods -n kube-system | grep $ADDON | grep -i running && \\\n./kubectl --kubeconfig=kubeconfig.yaml get svc -n kube-system | grep $ADDON\nif [ $? -ne 0 ]\nthen\n echo \"$ADDON check failed and is not ready\"\n exit 1\nfi\n"
} ],
"discovery-spec" : null,
"kill-grace-period" : 0,
"transport-encryption" : [ ]
}, {
"name" : "dashboard",
"goal" : "FINISHED",
"resource-set" : {
"id" : "addons",
"resource-specifications" : [ {
"@type" : "DefaultResourceSpec",
"name" : "cpus",
"value" : {
"type" : "SCALAR",
"scalar" : {
"value" : 0.1
},
"ranges" : null,
"set" : null,
"text" : null
},
"role" : "kubernetes-role",
"pre-reserved-role" : "*",
"principal" : "kubernetes"
}, {
"@type" : "DefaultResourceSpec",
"name" : "mem",
"value" : {
"type" : "SCALAR",
"scalar" : {
"value" : 32.0
},
"ranges" : null,
"set" : null,
"text" : null
},
"role" : "kubernetes-role",
"pre-reserved-role" : "*",
"principal" : "kubernetes"
} ],
"volume-specifications" : [ ],
"role" : "kubernetes-role",
"principal" : "kubernetes"
},
"command-spec" : {
"value" : "./bootstrap --resolve=false 2>&1\n\nTASK_NAME= ADDON=dashboard bash ./install_addon.sh\n",
"environment" : {
"DASHBOARD_DOCKER_IMAGE" : "gcr.io/google_containers/kubernetes-dashboard-amd64:v1.6.3",
"KUBERNETES_VERSION" : "v1.7.11",
"USERNAME" : "mandatory-addons"
}
},
"health-check-spec" : null,
"readiness-check-spec" : null,
"config-files" : [ {
"name" : "kubeconfig",
"relative-path" : "kubeconfig.yaml",
"template-content" : "apiVersion: v1\nkind: Config\nclusters:\n- name: local\n cluster:\n certificate-authority: ca-crt.pem\n server: https://apiserver.kubernetes.l4lb.thisdcos.directory:6443\nusers:\n- name: {{USERNAME}}\n user:\n client-certificate: {{USERNAME}}-crt.pem\n client-key: {{USERNAME}}-key.pem\ncontexts:\n- context:\n cluster: local\n user: {{USERNAME}}\n name: default\ncurrent-context: default\n"
}, {
"name" : "dashboard",
"relative-path" : "dashboard.yaml",
"template-content" : "kind: Deployment\napiVersion: apps/v1beta1\nmetadata:\n labels:\n k8s-app: kubernetes-dashboard\n name: kubernetes-dashboard\n namespace: kube-system\nspec:\n replicas: 1\n revisionHistoryLimit: 10\n selector:\n matchLabels:\n k8s-app: kubernetes-dashboard\n template:\n metadata:\n labels:\n k8s-app: kubernetes-dashboard\n spec:\n containers:\n - name: kubernetes-dashboard\n image: {{DASHBOARD_DOCKER_IMAGE}}\n ports:\n - containerPort: 9090\n protocol: TCP\n args:\n # Uncomment the following line to manually specify Kubernetes API server Host\n # If not specified, Dashboard will attempt to auto discover the API server and connect\n # to it. Uncomment only if the default does not work.\n # - --apiserver-host=https://kubernetes.default\n livenessProbe:\n httpGet:\n path: /\n port: 9090\n initialDelaySeconds: 30\n timeoutSeconds: 30\n---\n\nkind: Service\napiVersion: v1\nmetadata:\n labels:\n k8s-app: kubernetes-dashboard\n name: kubernetes-dashboard\n namespace: kube-system\nspec:\n ports:\n - port: 80\n targetPort: 9090\n selector:\n k8s-app: kubernetes-dashboard\n"
}, {
"name" : "install_addon.sh",
"relative-path" : "install_addon.sh",
"template-content" : "#!/bin/bash\n\nset -e\n\nchmod +x kubectl\n\nprintf \"\\n\\n ###### Deploying ${ADDON} components -- ${TASK_NAME} ###### \\n\"\n\n./kubectl --kubeconfig=kubeconfig.yaml apply -f $ADDON.yaml\nsleep 10\n./kubectl --kubeconfig=kubeconfig.yaml get pods -n kube-system | grep $ADDON | grep -i running && \\\n./kubectl --kubeconfig=kubeconfig.yaml get svc -n kube-system | grep $ADDON\nif [ $? -ne 0 ]\nthen\n echo \"$ADDON check failed and is not ready\"\n exit 1\nfi\n"
} ],
"discovery-spec" : null,
"kill-grace-period" : 0,
"transport-encryption" : [ ]
} ],
"placement-rule" : null,
"volumes" : [ ],
"pre-reserved-role" : "*",
"secrets" : [ {
"secret" : "kubernetes/ca_crt__tmp",
"env-key" : null,
"file" : "ca-crt.pem"
}, {
"secret" : "kubernetes/kube_node_chain__tmp",
"env-key" : null,
"file" : "mandatory-addons-crt.pem"
}, {
"secret" : "kubernetes/kube_node_private_key__tmp",
"env-key" : null,
"file" : "mandatory-addons-key.pem"
} ],
"share-pid-namespace" : false,
"allow-decommission" : false
} ],
"replacement-failure-policy" : null,
"user" : "root"
}
INFO 2017-12-06 17:44:04,225 [main] com.mesosphere.sdk.config.DefaultConfigurationUpdater:updateConfiguration(92): Prior target config:
{
"name" : "kubernetes",
"role" : "kubernetes-role",
"principal" : "kubernetes",
"web-url" : null,
"zookeeper" : "master.mesos:2181",
"pod-specs" : [ {
"type" : "etcd",
"user" : "root",
"count" : 3,
"image" : null,
"networks" : [ ],
"rlimits" : [ ],
"uris" : [ "https://storage.googleapis.com/etcd/v3.2.9/etcd-v3.2.9-linux-amd64.tar.gz", "https://s3.eu-central-1.amazonaws.com/dcos-kubernetes-test/executor.zip" ],
"task-specs" : [ {
"name" : "peer",
"goal" : "RUNNING",
"resource-set" : {
"id" : "etcd",
"resource-specifications" : [ {
"@type" : "DefaultResourceSpec",
"name" : "cpus",
"value" : {
"type" : "SCALAR",
"scalar" : {
"value" : 0.5
},
"ranges" : null,
"set" : null,
"text" : null
},
"role" : "kubernetes-role",
"pre-reserved-role" : "*",
"principal" : "kubernetes"
}, {
"@type" : "DefaultResourceSpec",
"name" : "mem",
"value" : {
"type" : "SCALAR",
"scalar" : {
"value" : 1024.0
},
"ranges" : null,
"set" : null,
"text" : null
},
"role" : "kubernetes-role",
"pre-reserved-role" : "*",
"principal" : "kubernetes"
}, {
"@type" : "NamedVIPSpec",
"value" : {
"type" : "RANGES",
"scalar" : null,
"ranges" : {
"range" : [ {
"begin" : 2380,
"end" : 2380
} ]
},
"set" : null,
"text" : null
},
"role" : "kubernetes-role",
"pre-reserved-role" : "*",
"principal" : "kubernetes",
"env-key" : "ETCD_LISTEN_PEER_PORT",
"port-name" : "peer",
"protocol" : "tcp",
"visibility" : "CLUSTER",
"vip-name" : "etcd-peer",
"vip-port" : 2380,
"network-names" : [ ],
"name" : "ports"
}, {
"@type" : "NamedVIPSpec",
"value" : {
"type" : "RANGES",
"scalar" : null,
"ranges" : {
"range" : [ {
"begin" : 2379,
"end" : 2379
} ]
},
"set" : null,
"text" : null
},
"role" : "kubernetes-role",
"pre-reserved-role" : "*",
"principal" : "kubernetes",
"env-key" : "ETCD_LISTEN_CLIENT_PORT",
"port-name" : "client",
"protocol" : "tcp",
"visibility" : "CLUSTER",
"vip-name" : "etcd",
"vip-port" : 2379,
"network-names" : [ ],
"name" : "ports"
} ],
"volume-specifications" : [ ],
"role" : "kubernetes-role",
"principal" : "kubernetes"
},
"command-spec" : {
"value" : "printf \"\\n\\n ###### Starting etcd -- ${TASK_NAME} ###### \\n\"\n\n# In a graceful shutdown, we remove the peer from the cluster\nterminated () {\n\n printf \"Removing member etcd-$POD_INSTANCE_INDEX-peer \\n\"\n\n DEAD_PEER_ID=$(\n ./etcd-v3.2.9-linux-amd64/etcdctl \\\n --cert-file etcd-crt.pem \\\n --key-file etcd-key.pem \\\n --ca-file ca-crt.pem \\\n --endpoints=https://etcd.kubernetes.l4lb.thisdcos.directory:$ETCD_LISTEN_CLIENT_PORT member list | awk \"/etcd-${POD_INSTANCE_INDEX}-peer/ { gsub(\\\":\\\", \\\"\\\", \\$1); print \\$1 }\" \\\n )\n\n printf $DEAD_PEER_ID\n\n ./etcd-v3.2.9-linux-amd64/etcdctl \\\n --cert-file etcd-crt.pem \\\n --key-file etcd-key.pem \\\n --ca-file ca-crt.pem \\\n --endpoints https://etcd.kubernetes.l4lb.thisdcos.directory:$ETCD_LISTEN_CLIENT_PORT member remove $DEAD_PEER_ID\n\n rm -rf data-dir/member\n\n printf \"Member etcd-$POD_INSTANCE_INDEX-peer removed!\\n\"\n\n exit 0\n}\n\ntrap terminated SIGTERM EXIT\n\necho \"Trapping etcd SIGTERM and EXIT!\"\n\n./etcd-v3.2.9-linux-amd64/etcd \\\n--name=infra$POD_INSTANCE_INDEX \\\n--cert-file=etcd-crt.pem \\\n--key-file=etcd-key.pem \\\n--client-cert-auth \\\n--trusted-ca-file=ca-crt.pem \\\n--peer-cert-file=etcd-crt.pem \\\n--peer-key-file=etcd-key.pem \\\n--peer-trusted-ca-file=ca-crt.pem \\\n--listen-peer-urls=https://$MESOS_CONTAINER_IP:$ETCD_LISTEN_PEER_PORT \\\n--initial-advertise-peer-urls=https://etcd-$POD_INSTANCE_INDEX-peer.kubernetes.mesos:$ETCD_LISTEN_PEER_PORT \\\n--listen-client-urls=https://$MESOS_CONTAINER_IP:$ETCD_LISTEN_CLIENT_PORT \\\n--advertise-client-urls=https://etcd-$POD_INSTANCE_INDEX-peer.kubernetes.mesos:$ETCD_LISTEN_CLIENT_PORT \\\n--log-output=stdout \\\n--quota-backend-bytes=3221225472 \\\n--election-timeout=5000 \\\n--heartbeat-interval=250 \\\n--initial-cluster infra0=https://etcd-0-peer.kubernetes.mesos:$ETCD_LISTEN_PEER_PORT,infra1=https://etcd-1-peer.kubernetes.mesos:$ETCD_LISTEN_PEER_PORT,infra2=https://etcd-2-peer.kubernetes.mesos:$ETCD_LISTEN_PEER_PORT\n",
"environment" : {
"ETCD_DATA_DIR" : "data-dir",
"ETCD_INITIAL_CLUSTER_TOKEN" : "kubernetes",
"ETCD_VERSION" : "v3.2.9",
"ETCD_WAL_DIR" : "wal-pv/wal-dir"
}
},
"health-check-spec" : {
"command" : "HEALTHY_PEER=$(./etcd-v3.2.9-linux-amd64/etcdctl --cert-file etcd-crt.pem --key-file etcd-key.pem --ca-file ca-crt.pem --endpoints=https://etcd-$POD_INSTANCE_INDEX-peer.kubernetes.mesos:$ETCD_LISTEN_CLIENT_PORT cluster-health | grep \"etcd-${POD_INSTANCE_INDEX}-peer\" | grep -c 'is healthy') && [ \"$HEALTHY_PEER\" -eq \"1\" ]\n",
"max-consecutive-failures" : 4,
"delay" : 0,
"interval" : 15,
"timeout" : 10,
"gracePeriod" : 60
},
"readiness-check-spec" : {
"command" : "HEALTHY_CLUSTER=$(./etcd-v3.2.9-linux-amd64/etcdctl --cert-file etcd-crt.pem --key-file etcd-key.pem --ca-file ca-crt.pem --endpoints=https://etcd-$POD_INSTANCE_INDEX-peer.kubernetes.mesos:$ETCD_LISTEN_CLIENT_PORT cluster-health | grep '^cluster ' | grep -c 'cluster is healthy') && [ \"$HEALTHY_CLUSTER\" -eq \"1\" ]\n",
"delay" : 0,
"interval" : 30,
"timeout" : 10
},
"config-files" : [ ],
"discovery-spec" : null,
"kill-grace-period" : 30,
"transport-encryption" : [ ]
}, {
"name" : "recover",
"goal" : "FINISHED",
"resource-set" : {
"id" : "recover-resource-set",
"resource-specifications" : [ {
"@type" : "DefaultResourceSpec",
"name" : "cpus",
"value" : {
"type" : "SCALAR",
"scalar" : {
"value" : 0.1
},
"ranges" : null,
"set" : null,
"text" : null
},
"role" : "kubernetes-role",
"pre-reserved-role" : "*",
"principal" : "kubernetes"
}, {
"@type" : "DefaultResourceSpec",
"name" : "mem",
"value" : {
"type" : "SCALAR",
"scalar" : {
"value" : 32.0
},
"ranges" : null,
"set" : null,
"text" : null
},
"role" : "kubernetes-role",
"pre-reserved-role" : "*",
"principal" : "kubernetes"
} ],
"volume-specifications" : [ ],
"role" : "kubernetes-role",
"principal" : "kubernetes"
},
"command-spec" : {
"value" : "DEAD_PEER_ID=$(\n ./etcd-v3.2.9-linux-amd64/etcdctl \\\n --cert-file etcd-crt.pem \\\n --key-file etcd-key.pem \\\n --ca-file ca-crt.pem \\\n --endpoints=https://etcd.kubernetes.l4lb.thisdcos.directory:$ETCD_LISTEN_CLIENT_PORT member list | awk \"/etcd-${POD_INSTANCE_INDEX}-peer/ { gsub(\\\":\\\", \\\"\\\", \\$1); print \\$1 }\" \\\n)\n./etcd-v3.2.9-linux-amd64/etcdctl --cert-file etcd-crt.pem --key-file etcd-key.pem --ca-file ca-crt.pem --endpoints https://etcd.kubernetes.l4lb.thisdcos.directory:$ETCD_LISTEN_CLIENT_PORT member remove $DEAD_PEER_ID\nrm -rf data-dir/member\n./etcd-v3.2.9-linux-amd64/etcdctl --cert-file etcd-crt.pem --key-file etcd-key.pem --ca-file ca-crt.pem --endpoints https://etcd.kubernetes.l4lb.thisdcos.directory:$ETCD_LISTEN_CLIENT_PORT member add infra$POD_INSTANCE_INDEX https://etcd-$POD_INSTANCE_INDEX-peer.kubernetes.mesos:$ETCD_LISTEN_PEER_PORT\n",
"environment" : {
"ETCD_LISTEN_CLIENT_PORT" : "2379",
"ETCD_LISTEN_PEER_PORT" : "2380",
"ETCD_VERSION" : "v3.2.9"
}
},
"health-check-spec" : null,
"readiness-check-spec" : null,
"config-files" : [ ],
"discovery-spec" : null,
"kill-grace-period" : 0,
"transport-encryption" : [ ]
} ],
"placement-rule" : {
"@type" : "MaxPerHostnameRule",
"max" : 1,
"task-filter" : {
"@type" : "RegexMatcher",
"pattern" : "etcd-.*"
}
},
"volumes" : [ {
"@type" : "DefaultVolumeSpec",
"type" : "ROOT",
"container-path" : "data-dir",
"name" : "disk",
"value" : {
"type" : "SCALAR",
"scalar" : {
"value" : 3072.0
},
"ranges" : null,
"set" : null,
"text" : null
},
"role" : "kubernetes-role",
"pre-reserved-role" : "*",
"principal" : "kubernetes"
}, {
"@type" : "DefaultVolumeSpec",
"type" : "ROOT",
"container-path" : "wal-pv",
"name" : "disk",
"value" : {
"type" : "SCALAR",
"scalar" : {
"value" : 512.0
},
"ranges" : null,
"set" : null,
"text" : null
},
"role" : "kubernetes-role",
"pre-reserved-role" : "*",
"principal" : "kubernetes"
} ],
"pre-reserved-role" : "*",
"secrets" : [ {
"secret" : "kubernetes/ca_crt__tmp",
"env-key" : null,
"file" : "ca-crt.pem"
}, {
"secret" : "kubernetes/etcd_chain__tmp",
"env-key" : null,
"file" : "etcd-crt.pem"
}, {
"secret" : "kubernetes/etcd_private_key__tmp",
"env-key" : null,
"file" : "etcd-key.pem"
} ],
"share-pid-namespace" : false,
"allow-decommission" : true
}, {
"type" : "kube-apiserver",
"user" : "root",
"count" : 3,
"image" : null,
"networks" : [ ],
"rlimits" : [ ],
"uris" : [ "https://s3.eu-central-1.amazonaws.com/dcos-kubernetes-test/bootstrap.zip", "https://storage.googleapis.com/kubernetes-release/release/v1.7.11/bin/linux/amd64/kube-apiserver", "https://s3.eu-central-1.amazonaws.com/dcos-kubernetes-test/executor.zip" ],
"task-specs" : [ {
"name" : "instance",
"goal" : "RUNNING",
"resource-set" : {
"id" : "instance-resource-set",
"resource-specifications" : [ {
"@type" : "DefaultResourceSpec",
"name" : "cpus",
"value" : {
"type" : "SCALAR",
"scalar" : {
"value" : 0.5
},
"ranges" : null,
"set" : null,
"text" : null
},
"role" : "kubernetes-role",
"pre-reserved-role" : "*",
"principal" : "kubernetes"
}, {
"@type" : "DefaultResourceSpec",
"name" : "mem",
"value" : {
"type" : "SCALAR",
"scalar" : {
"value" : 1024.0
},
"ranges" : null,
"set" : null,
"text" : null
},
"role" : "kubernetes-role",
"pre-reserved-role" : "*",
"principal" : "kubernetes"
}, {
"@type" : "NamedVIPSpec",
"value" : {
"type" : "RANGES",
"scalar" : null,
"ranges" : {
"range" : [ {
"begin" : 6443,
"end" : 6443
} ]
},
"set" : null,
"text" : null
},
"role" : "kubernetes-role",
"pre-reserved-role" : "*",
"principal" : "kubernetes",
"env-key" : "KUBE_APISERVER_PORT",
"port-name" : "apiserver",
"protocol" : "tcp",
"visibility" : "CLUSTER",
"vip-name" : "apiserver",
"vip-port" : 6443,
"network-names" : [ ],
"name" : "ports"
}, {
"@type" : "NamedVIPSpec",
"value" : {
"type" : "RANGES",
"scalar" : null,
"ranges" : {
"range" : [ {
"begin" : 9000,
"end" : 9000
} ]
},
"set" : null,
"text" : null
},
"role" : "kubernetes-role",
"pre-reserved-role" : "*",
"principal" : "kubernetes",
"env-key" : "KUBE_APISERVER_INSECURE_PORT",
"port-name" : "apiserver_insecure",
"protocol" : "tcp",
"visibility" : "CLUSTER",
"vip-name" : "apiserver-insecure",
"vip-port" : 9000,
"network-names" : [ ],
"name" : "ports"
} ],
"volume-specifications" : [ ],
"role" : "kubernetes-role",
"principal" : "kubernetes"
},
"command-spec" : {
"value" : "\nchmod +x kube-apiserver\nprintf \"\\n\\n ###### Starting Kube API SERVER -- ${TASK_NAME} ###### \\n\"\n./kube-apiserver --etcd-servers=https://etcd-0-peer.kubernetes.mesos:2379,https://etcd-1-peer.kubernetes.mesos:2379,https://etcd-2-peer.kubernetes.mesos:2379 --etcd-cafile=ca-crt.pem --etcd-certfile=kube-apiserver-crt.pem --etcd-keyfile=kube-apiserver-key.pem --etcd-prefix=\"/registry/cluster-0\" --etcd-quorum-read --bind-address=$MESOS_CONTAINER_IP --insecure-bind-address=$MESOS_CONTAINER_IP --insecure-port=9000 --secure-port=6443 --apiserver-count=3 --allow-privileged --service-cluster-ip-range=10.100.0.0/16 --authorization-mode=AlwaysAllow --admission-control=NamespaceLifecycle,LimitRanger,ServiceAccount,PersistentVolumeLabel,DefaultStorageClass,ResourceQuota,DefaultTolerationSeconds --runtime-config=batch/v2alpha1=true,admissionregistration.k8s.io/v1alpha1=true --service-account-key-file=service-account-key.pem --tls-ca-file=ca-crt.pem --tls-cert-file=kube-apiserver-crt.pem --tls-private-key-file=kube-apiserver-key.pem --client-ca-file=ca-crt.pem --target-ram-mb=1024 2>&1\n",
"environment" : {
"KUBERNETES_VERSION" : "v1.7.11"
}
},
"health-check-spec" : {
"command" : "HTTP_CODE=$(/opt/mesosphere/bin/curl --silent --output /dev/null --fail --write-out \"%{http_code}\" --cert kube-apiserver-crt.pem --key kube-apiserver-key.pem --cacert ca-crt.pem https://kube-apiserver-$POD_INSTANCE_INDEX-instance.kubernetes.mesos:6443/healthz) && [ \"$HTTP_CODE\" -eq \"200\" ]\n",
"max-consecutive-failures" : 3,
"delay" : 0,
"interval" : 15,
"timeout" : 10,
"gracePeriod" : 30
},
"readiness-check-spec" : null,
"config-files" : [ ],
"discovery-spec" : null,
"kill-grace-period" : 0,
"transport-encryption" : [ ]
} ],
"placement-rule" : {
"@type" : "MaxPerHostnameRule",
"max" : 1,
"task-filter" : {
"@type" : "RegexMatcher",
"pattern" : "kube-apiserver-.*"
}
},
"volumes" : [ ],
"pre-reserved-role" : "*",
"secrets" : [ {
"secret" : "kubernetes/service_account_private_key__tmp",
"env-key" : null,
"file" : "service-account-key.pem"
}, {
"secret" : "kubernetes/ca_crt__tmp",
"env-key" : null,
"file" : "ca-crt.pem"
}, {
"secret" : "kubernetes/kube_apiserver_chain__tmp",
"env-key" : null,
"file" : "kube-apiserver-crt.pem"
}, {
"secret" : "kubernetes/kube_apiserver_private_key__tmp",
"env-key" : null,
"file" : "kube-apiserver-key.pem"
} ],
"share-pid-namespace" : false,
"allow-decommission" : true
}, {
"type" : "kube-controller-manager",
"user" : "root",
"count" : 3,
"image" : null,
"networks" : [ ],
"rlimits" : [ ],
"uris" : [ "https://s3.eu-central-1.amazonaws.com/dcos-kubernetes-test/bootstrap.zip", "https://storage.googleapis.com/kubernetes-release/release/v1.7.11/bin/linux/amd64/kube-controller-manager", "https://s3.eu-central-1.amazonaws.com/dcos-kubernetes-test/executor.zip" ],
"task-specs" : [ {
"name" : "instance",
"goal" : "RUNNING",
"resource-set" : {
"id" : "instance-resource-set",
"resource-specifications" : [ {
"@type" : "DefaultResourceSpec",
"name" : "cpus",
"value" : {
"type" : "SCALAR",
"scalar" : {
"value" : 0.5
},
"ranges" : null,
"set" : null,
"text" : null
},
"role" : "kubernetes-role",
"pre-reserved-role" : "*",
"principal" : "kubernetes"
}, {
"@type" : "DefaultResourceSpec",
"name" : "mem",
"value" : {
"type" : "SCALAR",
"scalar" : {
"value" : 512.0
},
"ranges" : null,
"set" : null,
"text" : null
},
"role" : "kubernetes-role",
"pre-reserved-role" : "*",
"principal" : "kubernetes"
} ],
"volume-specifications" : [ ],
"role" : "kubernetes-role",
"principal" : "kubernetes"
},
"command-spec" : {
"value" : "./bootstrap --resolve=false 2>&1\nchmod +x kube-controller-manager\nprintf \"\\n\\n ###### Starting Kube CONTROLLER MANAGER -- ${TASK_NAME} ###### \\n\"\n./kube-controller-manager --address=$MESOS_CONTAINER_IP --port=10252 --kubeconfig=kubeconfig.yaml --root-ca-file=ca-crt.pem --service-account-private-key-file=service-account-key.pem --leader-elect 2>&1\n",
"environment" : {
"KUBERNETES_VERSION" : "v1.7.11",
"USERNAME" : "kube-controller-manager"
}
},
"health-check-spec" : {
"command" : "HTTP_CODE=$(/opt/mesosphere/bin/curl --silent --output /dev/null --fail --write-out \"%{http_code}\" http://$MESOS_CONTAINER_IP:10252/healthz) && [ \"$HTTP_CODE\" -eq \"200\" ]\n",
"max-consecutive-failures" : 3,
"delay" : 0,
"interval" : 15,
"timeout" : 10,
"gracePeriod" : 30
},
"readiness-check-spec" : {
"command" : "HTTP_CODE=$(/opt/mesosphere/bin/curl --silent --output /dev/null --fail --write-out \"%{http_code}\" http://apiserver-insecure.kubernetes.l4lb.thisdcos.directory:9000) && [ \"$HTTP_CODE\" -eq \"200\" ]\n",
"delay" : 0,
"interval" : 5,
"timeout" : 10
},
"config-files" : [ {
"name" : "kubeconfig",
"relative-path" : "kubeconfig.yaml",
"template-content" : "apiVersion: v1\nkind: Config\nclusters:\n- name: local\n cluster:\n certificate-authority: ca-crt.pem\n server: https://apiserver.kubernetes.l4lb.thisdcos.directory:6443\nusers:\n- name: {{USERNAME}}\n user:\n client-certificate: {{USERNAME}}-crt.pem\n client-key: {{USERNAME}}-key.pem\ncontexts:\n- context:\n cluster: local\n user: {{USERNAME}}\n name: default\ncurrent-context: default\n"
} ],
"discovery-spec" : null,
"kill-grace-period" : 0,
"transport-encryption" : [ ]
} ],
"placement-rule" : {
"@type" : "MaxPerHostnameRule",
"max" : 1,
"task-filter" : {
"@type" : "RegexMatcher",
"pattern" : "kube-controller-manager-.*"
}
},
"volumes" : [ ],
"pre-reserved-role" : "*",
"secrets" : [ {
"secret" : "kubernetes/service_account_private_key__tmp",
"env-key" : null,
"file" : "service-account-key.pem"
}, {
"secret" : "kubernetes/ca_crt__tmp",
"env-key" : null,
"file" : "ca-crt.pem"
}, {
"secret" : "kubernetes/kube_controller_manager_chain__tmp",
"env-key" : null,
"file" : "kube-controller-manager-crt.pem"
}, {
"secret" : "kubernetes/kube_controller_manager_private_key__tmp",
"env-key" : null,
"file" : "kube-controller-manager-key.pem"
} ],
"share-pid-namespace" : false,
"allow-decommission" : true
}, {
"type" : "kube-scheduler",
"user" : "root",
"count" : 3,
"image" : null,
"networks" : [ ],
"rlimits" : [ ],
"uris" : [ "https://s3.eu-central-1.amazonaws.com/dcos-kubernetes-test/bootstrap.zip", "https://storage.googleapis.com/kubernetes-release/release/v1.7.11/bin/linux/amd64/kube-scheduler", "https://s3.eu-central-1.amazonaws.com/dcos-kubernetes-test/executor.zip" ],
"task-specs" : [ {
"name" : "instance",
"goal" : "RUNNING",
"resource-set" : {
"id" : "instance-resource-set",
"resource-specifications" : [ {
"@type" : "DefaultResourceSpec",
"name" : "cpus",
"value" : {
"type" : "SCALAR",
"scalar" : {
"value" : 0.5
},
"ranges" : null,
"set" : null,
"text" : null
},
"role" : "kubernetes-role",
"pre-reserved-role" : "*",
"principal" : "kubernetes"
}, {
"@type" : "DefaultResourceSpec",
"name" : "mem",
"value" : {
"type" : "SCALAR",
"scalar" : {
"value" : 512.0
},
"ranges" : null,
"set" : null,
"text" : null
},
"role" : "kubernetes-role",
"pre-reserved-role" : "*",
"principal" : "kubernetes"
} ],
"volume-specifications" : [ ],
"role" : "kubernetes-role",
"principal" : "kubernetes"
},
"command-spec" : {
"value" : "./bootstrap --resolve=false 2>&1\nchmod +x kube-scheduler\nprintf \"\\n\\n ###### Starting Kube SCHEDULER -- ${TASK_NAME} ###### \\n\"\n./kube-scheduler --address=$MESOS_CONTAINER_IP --kubeconfig=kubeconfig.yaml --leader-elect --kube-api-burst=120 --kube-api-qps=80 2>&1\n",
"environment" : {
"KUBERNETES_VERSION" : "v1.7.11",
"USERNAME" : "kube-scheduler"
}
},
"health-check-spec" : {
"command" : "HTTP_CODE=$(/opt/mesosphere/bin/curl --silent --output /dev/null --fail --write-out \"%{http_code}\" http://$MESOS_CONTAINER_IP:10251/healthz) && [ \"$HTTP_CODE\" -eq \"200\" ]\n",
"max-consecutive-failures" : 3,
"delay" : 0,
"interval" : 15,
"timeout" : 10,
"gracePeriod" : 30
},
"readiness-check-spec" : null,
"config-files" : [ {
"name" : "kubeconfig",
"relative-path" : "kubeconfig.yaml",
"template-content" : "apiVersion: v1\nkind: Config\nclusters:\n- name: local\n cluster:\n certificate-authority: ca-crt.pem\n server: https://apiserver.kubernetes.l4lb.thisdcos.directory:6443\nusers:\n- name: {{USERNAME}}\n user:\n client-certificate: {{USERNAME}}-crt.pem\n client-key: {{USERNAME}}-key.pem\ncontexts:\n- context:\n cluster: local\n user: {{USERNAME}}\n name: default\ncurrent-context: default\n"
} ],
"discovery-spec" : null,
"kill-grace-period" : 0,
"transport-encryption" : [ ]
} ],
"placement-rule" : {
"@type" : "MaxPerHostnameRule",
"max" : 1,
"task-filter" : {
"@type" : "RegexMatcher",
"pattern" : "kube-scheduler-.*"
}
},
"volumes" : [ ],
"pre-reserved-role" : "*",
"secrets" : [ {
"secret" : "kubernetes/ca_crt__tmp",
"env-key" : null,
"file" : "ca-crt.pem"
}, {
"secret" : "kubernetes/kube_scheduler_chain__tmp",
"env-key" : null,
"file" : "kube-scheduler-crt.pem"
}, {
"secret" : "kubernetes/kube_scheduler_private_key__tmp",
"env-key" : null,
"file" : "kube-scheduler-key.pem"
} ],
"share-pid-namespace" : false,
"allow-decommission" : true
}, {
"type" : "kube-node",
"user" : "root",
"count" : 2,
"image" : null,
"networks" : [ ],
"rlimits" : [ ],
"uris" : [ "https://s3.eu-central-1.amazonaws.com/dcos-kubernetes-test/bootstrap.zip", "https://download.docker.com/linux/static/stable/x86_64/docker-17.09.0-ce.tgz", "https://downloads.mesosphere.com/kubernetes/socat/socat.d-1.7.3.2-2.fc26.tar.gz", "https://storage.googleapis.com/kubernetes-release/release/v1.7.11/bin/linux/amd64/kube-proxy", "https://storage.googleapis.com/kubernetes-release/release/v1.7.11/bin/linux/amd64/kubelet", "https://storage.googleapis.com/kubernetes-release/release/v1.7.11/bin/linux/amd64/kubectl", "https://infinity-artifacts.s3.amazonaws.com/autodelete7d/kubernetes/20171206-172217-Z5QGImH7dVzO3lwz/resource-container.gz", "https://s3.eu-central-1.amazonaws.com/dcos-kubernetes-test/executor.zip" ],
"task-specs" : [ {
"name" : "kube-proxy",
"goal" : "RUNNING",
"resource-set" : {
"id" : "kube-proxy-resource-set",
"resource-specifications" : [ {
"@type" : "DefaultResourceSpec",
"name" : "cpus",
"value" : {
"type" : "SCALAR",
"scalar" : {
"value" : 0.1
},
"ranges" : null,
"set" : null,
"text" : null
},
"role" : "kubernetes-role",
"pre-reserved-role" : "*",
"principal" : "kubernetes"
}, {
"@type" : "DefaultResourceSpec",
"name" : "mem",
"value" : {
"type" : "SCALAR",
"scalar" : {
"value" : 512.0
},
"ranges" : null,
"set" : null,
"text" : null
},
"role" : "kubernetes-role",
"pre-reserved-role" : "*",
"principal" : "kubernetes"
} ],
"volume-specifications" : [ ],
"role" : "kubernetes-role",
"principal" : "kubernetes"
},
"command-spec" : {
"value" : "./bootstrap --resolve=false 2>&1\nchmod +x kube-proxy\nprintf \"\\n\\n ###### Starting Kube PROXY -- ${TASK_NAME} ###### \\n\"\n./kube-proxy --hostname-override=kube-node-$POD_INSTANCE_INDEX-kube-proxy.kubernetes.mesos --bind-address=127.0.0.1 --kubeconfig=kubeconfig.yaml --resource-container=\"\" --healthz-port=0 2>&1\n",
"environment" : {
"KUBERNETES_VERSION" : "v1.7.11",
"PATH" : "/usr/local/sbin:/usr/local/bin:/usr/sbin:/usr/bin:/sbin:/bin:/opt/bin:/opt/mesosphere/bin/dcos-path",
"USERNAME" : "kube-node"
}
},
"health-check-spec" : null,
"readiness-check-spec" : null,
"config-files" : [ {
"name" : "kubeconfig",
"relative-path" : "kubeconfig.yaml",