Last active
December 6, 2017 17:58
-
-
Save hectorj2f/d61f21555e7c54840e6a2a372c20999c to your computer and use it in GitHub Desktop.
This file contains bidirectional Unicode text that may be interpreted or compiled differently than what appears below. To review, open the file in an editor that reveals hidden Unicode characters.
Learn more about bidirectional Unicode characters
Archive: /var/lib/mesos/slave/slaves/66df60ae-ce98-4ce6-968a-f99922382ef3-S0/frameworks/66df60ae-ce98-4ce6-968a-f99922382ef3-0001/executors/kubernetes.eb803f00-daac-11e7-af0a-f66634909198/runs/6f3605e0-9e27-4133-a2af-bbbe3706877b/kubernetes-scheduler.zip | |
creating: /var/lib/mesos/slave/slaves/66df60ae-ce98-4ce6-968a-f99922382ef3-S0/frameworks/66df60ae-ce98-4ce6-968a-f99922382ef3-0001/executors/kubernetes.eb803f00-daac-11e7-af0a-f66634909198/runs/6f3605e0-9e27-4133-a2af-bbbe3706877b/kubernetes-scheduler/ | |
inflating: /var/lib/mesos/slave/slaves/66df60ae-ce98-4ce6-968a-f99922382ef3-S0/frameworks/66df60ae-ce98-4ce6-968a-f99922382ef3-0001/executors/kubernetes.eb803f00-daac-11e7-af0a-f66634909198/runs/6f3605e0-9e27-4133-a2af-bbbe3706877b/kubernetes-scheduler/dashboard.template.yaml | |
inflating: /var/lib/mesos/slave/slaves/66df60ae-ce98-4ce6-968a-f99922382ef3-S0/frameworks/66df60ae-ce98-4ce6-968a-f99922382ef3-0001/executors/kubernetes.eb803f00-daac-11e7-af0a-f66634909198/runs/6f3605e0-9e27-4133-a2af-bbbe3706877b/kubernetes-scheduler/heapster.template.yaml | |
inflating: /var/lib/mesos/slave/slaves/66df60ae-ce98-4ce6-968a-f99922382ef3-S0/frameworks/66df60ae-ce98-4ce6-968a-f99922382ef3-0001/executors/kubernetes.eb803f00-daac-11e7-af0a-f66634909198/runs/6f3605e0-9e27-4133-a2af-bbbe3706877b/kubernetes-scheduler/install_addon.template.sh | |
inflating: /var/lib/mesos/slave/slaves/66df60ae-ce98-4ce6-968a-f99922382ef3-S0/frameworks/66df60ae-ce98-4ce6-968a-f99922382ef3-0001/executors/kubernetes.eb803f00-daac-11e7-af0a-f66634909198/runs/6f3605e0-9e27-4133-a2af-bbbe3706877b/kubernetes-scheduler/kubeconfig.template.yaml | |
inflating: /var/lib/mesos/slave/slaves/66df60ae-ce98-4ce6-968a-f99922382ef3-S0/frameworks/66df60ae-ce98-4ce6-968a-f99922382ef3-0001/executors/kubernetes.eb803f00-daac-11e7-af0a-f66634909198/runs/6f3605e0-9e27-4133-a2af-bbbe3706877b/kubernetes-scheduler/kubedns.template.yaml | |
inflating: /var/lib/mesos/slave/slaves/66df60ae-ce98-4ce6-968a-f99922382ef3-S0/frameworks/66df60ae-ce98-4ce6-968a-f99922382ef3-0001/executors/kubernetes.eb803f00-daac-11e7-af0a-f66634909198/runs/6f3605e0-9e27-4133-a2af-bbbe3706877b/kubernetes-scheduler/kubelet-wrapper.template.sh | |
inflating: /var/lib/mesos/slave/slaves/66df60ae-ce98-4ce6-968a-f99922382ef3-S0/frameworks/66df60ae-ce98-4ce6-968a-f99922382ef3-0001/executors/kubernetes.eb803f00-daac-11e7-af0a-f66634909198/runs/6f3605e0-9e27-4133-a2af-bbbe3706877b/kubernetes-scheduler/svc.yml | |
creating: /var/lib/mesos/slave/slaves/66df60ae-ce98-4ce6-968a-f99922382ef3-S0/frameworks/66df60ae-ce98-4ce6-968a-f99922382ef3-0001/executors/kubernetes.eb803f00-daac-11e7-af0a-f66634909198/runs/6f3605e0-9e27-4133-a2af-bbbe3706877b/kubernetes-scheduler/lib/ | |
inflating: /var/lib/mesos/slave/slaves/66df60ae-ce98-4ce6-968a-f99922382ef3-S0/frameworks/66df60ae-ce98-4ce6-968a-f99922382ef3-0001/executors/kubernetes.eb803f00-daac-11e7-af0a-f66634909198/runs/6f3605e0-9e27-4133-a2af-bbbe3706877b/kubernetes-scheduler/lib/kubernetes.jar | |
inflating: /var/lib/mesos/slave/slaves/66df60ae-ce98-4ce6-968a-f99922382ef3-S0/frameworks/66df60ae-ce98-4ce6-968a-f99922382ef3-0001/executors/kubernetes.eb803f00-daac-11e7-af0a-f66634909198/runs/6f3605e0-9e27-4133-a2af-bbbe3706877b/kubernetes-scheduler/lib/scheduler-master.jar | |
inflating: /var/lib/mesos/slave/slaves/66df60ae-ce98-4ce6-968a-f99922382ef3-S0/frameworks/66df60ae-ce98-4ce6-968a-f99922382ef3-0001/executors/kubernetes.eb803f00-daac-11e7-af0a-f66634909198/runs/6f3605e0-9e27-4133-a2af-bbbe3706877b/kubernetes-scheduler/lib/executor-master.jar | |
inflating: /var/lib/mesos/slave/slaves/66df60ae-ce98-4ce6-968a-f99922382ef3-S0/frameworks/66df60ae-ce98-4ce6-968a-f99922382ef3-0001/executors/kubernetes.eb803f00-daac-11e7-af0a-f66634909198/runs/6f3605e0-9e27-4133-a2af-bbbe3706877b/kubernetes-scheduler/lib/mesos-1.4.0-rc1.jar | |
inflating: /var/lib/mesos/slave/slaves/66df60ae-ce98-4ce6-968a-f99922382ef3-S0/frameworks/66df60ae-ce98-4ce6-968a-f99922382ef3-0001/executors/kubernetes.eb803f00-daac-11e7-af0a-f66634909198/runs/6f3605e0-9e27-4133-a2af-bbbe3706877b/kubernetes-scheduler/lib/common-master.jar | |
inflating: /var/lib/mesos/slave/slaves/66df60ae-ce98-4ce6-968a-f99922382ef3-S0/frameworks/66df60ae-ce98-4ce6-968a-f99922382ef3-0001/executors/kubernetes.eb803f00-daac-11e7-af0a-f66634909198/runs/6f3605e0-9e27-4133-a2af-bbbe3706877b/kubernetes-scheduler/lib/jackson-datatype-guava-2.6.3.jar | |
inflating: /var/lib/mesos/slave/slaves/66df60ae-ce98-4ce6-968a-f99922382ef3-S0/frameworks/66df60ae-ce98-4ce6-968a-f99922382ef3-0001/executors/kubernetes.eb803f00-daac-11e7-af0a-f66634909198/runs/6f3605e0-9e27-4133-a2af-bbbe3706877b/kubernetes-scheduler/lib/jackson-datatype-jdk8-2.6.3.jar | |
inflating: /var/lib/mesos/slave/slaves/66df60ae-ce98-4ce6-968a-f99922382ef3-S0/frameworks/66df60ae-ce98-4ce6-968a-f99922382ef3-0001/executors/kubernetes.eb803f00-daac-11e7-af0a-f66634909198/runs/6f3605e0-9e27-4133-a2af-bbbe3706877b/kubernetes-scheduler/lib/jackson-datatype-jsr310-2.6.3.jar | |
inflating: /var/lib/mesos/slave/slaves/66df60ae-ce98-4ce6-968a-f99922382ef3-S0/frameworks/66df60ae-ce98-4ce6-968a-f99922382ef3-0001/executors/kubernetes.eb803f00-daac-11e7-af0a-f66634909198/runs/6f3605e0-9e27-4133-a2af-bbbe3706877b/kubernetes-scheduler/lib/jackson-dataformat-yaml-2.6.3.jar | |
inflating: /var/lib/mesos/slave/slaves/66df60ae-ce98-4ce6-968a-f99922382ef3-S0/frameworks/66df60ae-ce98-4ce6-968a-f99922382ef3-0001/executors/kubernetes.eb803f00-daac-11e7-af0a-f66634909198/runs/6f3605e0-9e27-4133-a2af-bbbe3706877b/kubernetes-scheduler/lib/jackson-datatype-protobuf-0.9.3.jar | |
inflating: /var/lib/mesos/slave/slaves/66df60ae-ce98-4ce6-968a-f99922382ef3-S0/frameworks/66df60ae-ce98-4ce6-968a-f99922382ef3-0001/executors/kubernetes.eb803f00-daac-11e7-af0a-f66634909198/runs/6f3605e0-9e27-4133-a2af-bbbe3706877b/kubernetes-scheduler/lib/annotations-3.0.1u2.jar | |
inflating: /var/lib/mesos/slave/slaves/66df60ae-ce98-4ce6-968a-f99922382ef3-S0/frameworks/66df60ae-ce98-4ce6-968a-f99922382ef3-0001/executors/kubernetes.eb803f00-daac-11e7-af0a-f66634909198/runs/6f3605e0-9e27-4133-a2af-bbbe3706877b/kubernetes-scheduler/lib/commons-collections-3.2.2.jar | |
inflating: /var/lib/mesos/slave/slaves/66df60ae-ce98-4ce6-968a-f99922382ef3-S0/frameworks/66df60ae-ce98-4ce6-968a-f99922382ef3-0001/executors/kubernetes.eb803f00-daac-11e7-af0a-f66634909198/runs/6f3605e0-9e27-4133-a2af-bbbe3706877b/kubernetes-scheduler/lib/commons-io-2.4.jar | |
inflating: /var/lib/mesos/slave/slaves/66df60ae-ce98-4ce6-968a-f99922382ef3-S0/frameworks/66df60ae-ce98-4ce6-968a-f99922382ef3-0001/executors/kubernetes.eb803f00-daac-11e7-af0a-f66634909198/runs/6f3605e0-9e27-4133-a2af-bbbe3706877b/kubernetes-scheduler/lib/antlr4-runtime-4.5.1-1.jar | |
inflating: /var/lib/mesos/slave/slaves/66df60ae-ce98-4ce6-968a-f99922382ef3-S0/frameworks/66df60ae-ce98-4ce6-968a-f99922382ef3-0001/executors/kubernetes.eb803f00-daac-11e7-af0a-f66634909198/runs/6f3605e0-9e27-4133-a2af-bbbe3706877b/kubernetes-scheduler/lib/javax.ws.rs-api-2.0.1.jar | |
inflating: /var/lib/mesos/slave/slaves/66df60ae-ce98-4ce6-968a-f99922382ef3-S0/frameworks/66df60ae-ce98-4ce6-968a-f99922382ef3-0001/executors/kubernetes.eb803f00-daac-11e7-af0a-f66634909198/runs/6f3605e0-9e27-4133-a2af-bbbe3706877b/kubernetes-scheduler/lib/curator-framework-2.9.1.jar | |
inflating: /var/lib/mesos/slave/slaves/66df60ae-ce98-4ce6-968a-f99922382ef3-S0/frameworks/66df60ae-ce98-4ce6-968a-f99922382ef3-0001/executors/kubernetes.eb803f00-daac-11e7-af0a-f66634909198/runs/6f3605e0-9e27-4133-a2af-bbbe3706877b/kubernetes-scheduler/lib/curator-recipes-2.9.1.jar | |
inflating: /var/lib/mesos/slave/slaves/66df60ae-ce98-4ce6-968a-f99922382ef3-S0/frameworks/66df60ae-ce98-4ce6-968a-f99922382ef3-0001/executors/kubernetes.eb803f00-daac-11e7-af0a-f66634909198/runs/6f3605e0-9e27-4133-a2af-bbbe3706877b/kubernetes-scheduler/lib/curator-test-2.9.1.jar | |
inflating: /var/lib/mesos/slave/slaves/66df60ae-ce98-4ce6-968a-f99922382ef3-S0/frameworks/66df60ae-ce98-4ce6-968a-f99922382ef3-0001/executors/kubernetes.eb803f00-daac-11e7-af0a-f66634909198/runs/6f3605e0-9e27-4133-a2af-bbbe3706877b/kubernetes-scheduler/lib/httpclient-4.5.2.jar | |
inflating: /var/lib/mesos/slave/slaves/66df60ae-ce98-4ce6-968a-f99922382ef3-S0/frameworks/66df60ae-ce98-4ce6-968a-f99922382ef3-0001/executors/kubernetes.eb803f00-daac-11e7-af0a-f66634909198/runs/6f3605e0-9e27-4133-a2af-bbbe3706877b/kubernetes-scheduler/lib/fluent-hc-4.5.2.jar | |
inflating: /var/lib/mesos/slave/slaves/66df60ae-ce98-4ce6-968a-f99922382ef3-S0/frameworks/66df60ae-ce98-4ce6-968a-f99922382ef3-0001/executors/kubernetes.eb803f00-daac-11e7-af0a-f66634909198/runs/6f3605e0-9e27-4133-a2af-bbbe3706877b/kubernetes-scheduler/lib/commons-lang3-3.4.jar | |
inflating: /var/lib/mesos/slave/slaves/66df60ae-ce98-4ce6-968a-f99922382ef3-S0/frameworks/66df60ae-ce98-4ce6-968a-f99922382ef3-0001/executors/kubernetes.eb803f00-daac-11e7-af0a-f66634909198/runs/6f3605e0-9e27-4133-a2af-bbbe3706877b/kubernetes-scheduler/lib/protobuf-java-format-1.4.jar | |
inflating: /var/lib/mesos/slave/slaves/66df60ae-ce98-4ce6-968a-f99922382ef3-S0/frameworks/66df60ae-ce98-4ce6-968a-f99922382ef3-0001/executors/kubernetes.eb803f00-daac-11e7-af0a-f66634909198/runs/6f3605e0-9e27-4133-a2af-bbbe3706877b/kubernetes-scheduler/lib/json-20160212.jar | |
inflating: /var/lib/mesos/slave/slaves/66df60ae-ce98-4ce6-968a-f99922382ef3-S0/frameworks/66df60ae-ce98-4ce6-968a-f99922382ef3-0001/executors/kubernetes.eb803f00-daac-11e7-af0a-f66634909198/runs/6f3605e0-9e27-4133-a2af-bbbe3706877b/kubernetes-scheduler/lib/diffutils-1.3.0.jar | |
inflating: /var/lib/mesos/slave/slaves/66df60ae-ce98-4ce6-968a-f99922382ef3-S0/frameworks/66df60ae-ce98-4ce6-968a-f99922382ef3-0001/executors/kubernetes.eb803f00-daac-11e7-af0a-f66634909198/runs/6f3605e0-9e27-4133-a2af-bbbe3706877b/kubernetes-scheduler/lib/jersey-container-jetty-http-2.23.jar | |
inflating: /var/lib/mesos/slave/slaves/66df60ae-ce98-4ce6-968a-f99922382ef3-S0/frameworks/66df60ae-ce98-4ce6-968a-f99922382ef3-0001/executors/kubernetes.eb803f00-daac-11e7-af0a-f66634909198/runs/6f3605e0-9e27-4133-a2af-bbbe3706877b/kubernetes-scheduler/lib/jersey-container-servlet-core-2.23.jar | |
inflating: /var/lib/mesos/slave/slaves/66df60ae-ce98-4ce6-968a-f99922382ef3-S0/frameworks/66df60ae-ce98-4ce6-968a-f99922382ef3-0001/executors/kubernetes.eb803f00-daac-11e7-af0a-f66634909198/runs/6f3605e0-9e27-4133-a2af-bbbe3706877b/kubernetes-scheduler/lib/jersey-media-json-jackson-2.23.jar | |
inflating: /var/lib/mesos/slave/slaves/66df60ae-ce98-4ce6-968a-f99922382ef3-S0/frameworks/66df60ae-ce98-4ce6-968a-f99922382ef3-0001/executors/kubernetes.eb803f00-daac-11e7-af0a-f66634909198/runs/6f3605e0-9e27-4133-a2af-bbbe3706877b/kubernetes-scheduler/lib/jersey-media-multipart-2.23.jar | |
inflating: /var/lib/mesos/slave/slaves/66df60ae-ce98-4ce6-968a-f99922382ef3-S0/frameworks/66df60ae-ce98-4ce6-968a-f99922382ef3-0001/executors/kubernetes.eb803f00-daac-11e7-af0a-f66634909198/runs/6f3605e0-9e27-4133-a2af-bbbe3706877b/kubernetes-scheduler/lib/mimepull-1.9.6.jar | |
inflating: /var/lib/mesos/slave/slaves/66df60ae-ce98-4ce6-968a-f99922382ef3-S0/frameworks/66df60ae-ce98-4ce6-968a-f99922382ef3-0001/executors/kubernetes.eb803f00-daac-11e7-af0a-f66634909198/runs/6f3605e0-9e27-4133-a2af-bbbe3706877b/kubernetes-scheduler/lib/jetty-servlet-9.2.3.v20140905.jar | |
inflating: /var/lib/mesos/slave/slaves/66df60ae-ce98-4ce6-968a-f99922382ef3-S0/frameworks/66df60ae-ce98-4ce6-968a-f99922382ef3-0001/executors/kubernetes.eb803f00-daac-11e7-af0a-f66634909198/runs/6f3605e0-9e27-4133-a2af-bbbe3706877b/kubernetes-scheduler/lib/hibernate-validator-5.3.2.Final.jar | |
inflating: /var/lib/mesos/slave/slaves/66df60ae-ce98-4ce6-968a-f99922382ef3-S0/frameworks/66df60ae-ce98-4ce6-968a-f99922382ef3-0001/executors/kubernetes.eb803f00-daac-11e7-af0a-f66634909198/runs/6f3605e0-9e27-4133-a2af-bbbe3706877b/kubernetes-scheduler/lib/javax.el-api-2.2.4.jar | |
inflating: /var/lib/mesos/slave/slaves/66df60ae-ce98-4ce6-968a-f99922382ef3-S0/frameworks/66df60ae-ce98-4ce6-968a-f99922382ef3-0001/executors/kubernetes.eb803f00-daac-11e7-af0a-f66634909198/runs/6f3605e0-9e27-4133-a2af-bbbe3706877b/kubernetes-scheduler/lib/javax.el-2.2.4.jar | |
inflating: /var/lib/mesos/slave/slaves/66df60ae-ce98-4ce6-968a-f99922382ef3-S0/frameworks/66df60ae-ce98-4ce6-968a-f99922382ef3-0001/executors/kubernetes.eb803f00-daac-11e7-af0a-f66634909198/runs/6f3605e0-9e27-4133-a2af-bbbe3706877b/kubernetes-scheduler/lib/mesos-http-adapter-0.4.1.jar | |
inflating: /var/lib/mesos/slave/slaves/66df60ae-ce98-4ce6-968a-f99922382ef3-S0/frameworks/66df60ae-ce98-4ce6-968a-f99922382ef3-0001/executors/kubernetes.eb803f00-daac-11e7-af0a-f66634909198/runs/6f3605e0-9e27-4133-a2af-bbbe3706877b/kubernetes-scheduler/lib/java-jwt-3.2.0.jar | |
inflating: /var/lib/mesos/slave/slaves/66df60ae-ce98-4ce6-968a-f99922382ef3-S0/frameworks/66df60ae-ce98-4ce6-968a-f99922382ef3-0001/executors/kubernetes.eb803f00-daac-11e7-af0a-f66634909198/runs/6f3605e0-9e27-4133-a2af-bbbe3706877b/kubernetes-scheduler/lib/bcprov-jdk15on-1.57.jar | |
inflating: /var/lib/mesos/slave/slaves/66df60ae-ce98-4ce6-968a-f99922382ef3-S0/frameworks/66df60ae-ce98-4ce6-968a-f99922382ef3-0001/executors/kubernetes.eb803f00-daac-11e7-af0a-f66634909198/runs/6f3605e0-9e27-4133-a2af-bbbe3706877b/kubernetes-scheduler/lib/bcpkix-jdk15on-1.57.jar | |
inflating: /var/lib/mesos/slave/slaves/66df60ae-ce98-4ce6-968a-f99922382ef3-S0/frameworks/66df60ae-ce98-4ce6-968a-f99922382ef3-0001/executors/kubernetes.eb803f00-daac-11e7-af0a-f66634909198/runs/6f3605e0-9e27-4133-a2af-bbbe3706877b/kubernetes-scheduler/lib/metrics-core-3.2.5.jar | |
inflating: /var/lib/mesos/slave/slaves/66df60ae-ce98-4ce6-968a-f99922382ef3-S0/frameworks/66df60ae-ce98-4ce6-968a-f99922382ef3-0001/executors/kubernetes.eb803f00-daac-11e7-af0a-f66634909198/runs/6f3605e0-9e27-4133-a2af-bbbe3706877b/kubernetes-scheduler/lib/metrics-servlet-3.2.5.jar | |
inflating: /var/lib/mesos/slave/slaves/66df60ae-ce98-4ce6-968a-f99922382ef3-S0/frameworks/66df60ae-ce98-4ce6-968a-f99922382ef3-0001/executors/kubernetes.eb803f00-daac-11e7-af0a-f66634909198/runs/6f3605e0-9e27-4133-a2af-bbbe3706877b/kubernetes-scheduler/lib/metrics-servlets-3.2.5.jar | |
inflating: /var/lib/mesos/slave/slaves/66df60ae-ce98-4ce6-968a-f99922382ef3-S0/frameworks/66df60ae-ce98-4ce6-968a-f99922382ef3-0001/executors/kubernetes.eb803f00-daac-11e7-af0a-f66634909198/runs/6f3605e0-9e27-4133-a2af-bbbe3706877b/kubernetes-scheduler/lib/simpleclient_dropwizard-0.0.26.jar | |
inflating: /var/lib/mesos/slave/slaves/66df60ae-ce98-4ce6-968a-f99922382ef3-S0/frameworks/66df60ae-ce98-4ce6-968a-f99922382ef3-0001/executors/kubernetes.eb803f00-daac-11e7-af0a-f66634909198/runs/6f3605e0-9e27-4133-a2af-bbbe3706877b/kubernetes-scheduler/lib/simpleclient_servlet-0.0.26.jar | |
inflating: /var/lib/mesos/slave/slaves/66df60ae-ce98-4ce6-968a-f99922382ef3-S0/frameworks/66df60ae-ce98-4ce6-968a-f99922382ef3-0001/executors/kubernetes.eb803f00-daac-11e7-af0a-f66634909198/runs/6f3605e0-9e27-4133-a2af-bbbe3706877b/kubernetes-scheduler/lib/metrics3-statsd-4.2.0.jar | |
inflating: /var/lib/mesos/slave/slaves/66df60ae-ce98-4ce6-968a-f99922382ef3-S0/frameworks/66df60ae-ce98-4ce6-968a-f99922382ef3-0001/executors/kubernetes.eb803f00-daac-11e7-af0a-f66634909198/runs/6f3605e0-9e27-4133-a2af-bbbe3706877b/kubernetes-scheduler/lib/slf4j-api-1.7.25.jar | |
inflating: /var/lib/mesos/slave/slaves/66df60ae-ce98-4ce6-968a-f99922382ef3-S0/frameworks/66df60ae-ce98-4ce6-968a-f99922382ef3-0001/executors/kubernetes.eb803f00-daac-11e7-af0a-f66634909198/runs/6f3605e0-9e27-4133-a2af-bbbe3706877b/kubernetes-scheduler/lib/log4j-core-2.8.1.jar | |
inflating: /var/lib/mesos/slave/slaves/66df60ae-ce98-4ce6-968a-f99922382ef3-S0/frameworks/66df60ae-ce98-4ce6-968a-f99922382ef3-0001/executors/kubernetes.eb803f00-daac-11e7-af0a-f66634909198/runs/6f3605e0-9e27-4133-a2af-bbbe3706877b/kubernetes-scheduler/lib/log4j-slf4j-impl-2.8.1.jar | |
inflating: /var/lib/mesos/slave/slaves/66df60ae-ce98-4ce6-968a-f99922382ef3-S0/frameworks/66df60ae-ce98-4ce6-968a-f99922382ef3-0001/executors/kubernetes.eb803f00-daac-11e7-af0a-f66634909198/runs/6f3605e0-9e27-4133-a2af-bbbe3706877b/kubernetes-scheduler/lib/compiler-0.9.2.jar | |
inflating: /var/lib/mesos/slave/slaves/66df60ae-ce98-4ce6-968a-f99922382ef3-S0/frameworks/66df60ae-ce98-4ce6-968a-f99922382ef3-0001/executors/kubernetes.eb803f00-daac-11e7-af0a-f66634909198/runs/6f3605e0-9e27-4133-a2af-bbbe3706877b/kubernetes-scheduler/lib/commons-codec-1.10.jar | |
inflating: /var/lib/mesos/slave/slaves/66df60ae-ce98-4ce6-968a-f99922382ef3-S0/frameworks/66df60ae-ce98-4ce6-968a-f99922382ef3-0001/executors/kubernetes.eb803f00-daac-11e7-af0a-f66634909198/runs/6f3605e0-9e27-4133-a2af-bbbe3706877b/kubernetes-scheduler/lib/snakeyaml-1.15.jar | |
inflating: /var/lib/mesos/slave/slaves/66df60ae-ce98-4ce6-968a-f99922382ef3-S0/frameworks/66df60ae-ce98-4ce6-968a-f99922382ef3-0001/executors/kubernetes.eb803f00-daac-11e7-af0a-f66634909198/runs/6f3605e0-9e27-4133-a2af-bbbe3706877b/kubernetes-scheduler/lib/jackson-annotations-2.6.0.jar | |
inflating: /var/lib/mesos/slave/slaves/66df60ae-ce98-4ce6-968a-f99922382ef3-S0/frameworks/66df60ae-ce98-4ce6-968a-f99922382ef3-0001/executors/kubernetes.eb803f00-daac-11e7-af0a-f66634909198/runs/6f3605e0-9e27-4133-a2af-bbbe3706877b/kubernetes-scheduler/lib/curator-client-2.9.1.jar | |
inflating: /var/lib/mesos/slave/slaves/66df60ae-ce98-4ce6-968a-f99922382ef3-S0/frameworks/66df60ae-ce98-4ce6-968a-f99922382ef3-0001/executors/kubernetes.eb803f00-daac-11e7-af0a-f66634909198/runs/6f3605e0-9e27-4133-a2af-bbbe3706877b/kubernetes-scheduler/lib/zookeeper-3.4.6.jar | |
inflating: /var/lib/mesos/slave/slaves/66df60ae-ce98-4ce6-968a-f99922382ef3-S0/frameworks/66df60ae-ce98-4ce6-968a-f99922382ef3-0001/executors/kubernetes.eb803f00-daac-11e7-af0a-f66634909198/runs/6f3605e0-9e27-4133-a2af-bbbe3706877b/kubernetes-scheduler/lib/javassist-3.18.1-GA.jar | |
inflating: /var/lib/mesos/slave/slaves/66df60ae-ce98-4ce6-968a-f99922382ef3-S0/frameworks/66df60ae-ce98-4ce6-968a-f99922382ef3-0001/executors/kubernetes.eb803f00-daac-11e7-af0a-f66634909198/runs/6f3605e0-9e27-4133-a2af-bbbe3706877b/kubernetes-scheduler/lib/commons-math-2.2.jar | |
inflating: /var/lib/mesos/slave/slaves/66df60ae-ce98-4ce6-968a-f99922382ef3-S0/frameworks/66df60ae-ce98-4ce6-968a-f99922382ef3-0001/executors/kubernetes.eb803f00-daac-11e7-af0a-f66634909198/runs/6f3605e0-9e27-4133-a2af-bbbe3706877b/kubernetes-scheduler/lib/httpcore-4.4.4.jar | |
inflating: /var/lib/mesos/slave/slaves/66df60ae-ce98-4ce6-968a-f99922382ef3-S0/frameworks/66df60ae-ce98-4ce6-968a-f99922382ef3-0001/executors/kubernetes.eb803f00-daac-11e7-af0a-f66634909198/runs/6f3605e0-9e27-4133-a2af-bbbe3706877b/kubernetes-scheduler/lib/commons-logging-1.2.jar | |
inflating: /var/lib/mesos/slave/slaves/66df60ae-ce98-4ce6-968a-f99922382ef3-S0/frameworks/66df60ae-ce98-4ce6-968a-f99922382ef3-0001/executors/kubernetes.eb803f00-daac-11e7-af0a-f66634909198/runs/6f3605e0-9e27-4133-a2af-bbbe3706877b/kubernetes-scheduler/lib/javax.inject-2.4.0-b34.jar | |
inflating: /var/lib/mesos/slave/slaves/66df60ae-ce98-4ce6-968a-f99922382ef3-S0/frameworks/66df60ae-ce98-4ce6-968a-f99922382ef3-0001/executors/kubernetes.eb803f00-daac-11e7-af0a-f66634909198/runs/6f3605e0-9e27-4133-a2af-bbbe3706877b/kubernetes-scheduler/lib/jetty-continuation-9.1.1.v20140108.jar | |
inflating: /var/lib/mesos/slave/slaves/66df60ae-ce98-4ce6-968a-f99922382ef3-S0/frameworks/66df60ae-ce98-4ce6-968a-f99922382ef3-0001/executors/kubernetes.eb803f00-daac-11e7-af0a-f66634909198/runs/6f3605e0-9e27-4133-a2af-bbbe3706877b/kubernetes-scheduler/lib/jersey-common-2.23.jar | |
inflating: /var/lib/mesos/slave/slaves/66df60ae-ce98-4ce6-968a-f99922382ef3-S0/frameworks/66df60ae-ce98-4ce6-968a-f99922382ef3-0001/executors/kubernetes.eb803f00-daac-11e7-af0a-f66634909198/runs/6f3605e0-9e27-4133-a2af-bbbe3706877b/kubernetes-scheduler/lib/jersey-server-2.23.jar | |
inflating: /var/lib/mesos/slave/slaves/66df60ae-ce98-4ce6-968a-f99922382ef3-S0/frameworks/66df60ae-ce98-4ce6-968a-f99922382ef3-0001/executors/kubernetes.eb803f00-daac-11e7-af0a-f66634909198/runs/6f3605e0-9e27-4133-a2af-bbbe3706877b/kubernetes-scheduler/lib/jersey-entity-filtering-2.23.jar | |
inflating: /var/lib/mesos/slave/slaves/66df60ae-ce98-4ce6-968a-f99922382ef3-S0/frameworks/66df60ae-ce98-4ce6-968a-f99922382ef3-0001/executors/kubernetes.eb803f00-daac-11e7-af0a-f66634909198/runs/6f3605e0-9e27-4133-a2af-bbbe3706877b/kubernetes-scheduler/lib/jackson-jaxrs-base-2.5.4.jar | |
inflating: /var/lib/mesos/slave/slaves/66df60ae-ce98-4ce6-968a-f99922382ef3-S0/frameworks/66df60ae-ce98-4ce6-968a-f99922382ef3-0001/executors/kubernetes.eb803f00-daac-11e7-af0a-f66634909198/runs/6f3605e0-9e27-4133-a2af-bbbe3706877b/kubernetes-scheduler/lib/jackson-jaxrs-json-provider-2.5.4.jar | |
inflating: /var/lib/mesos/slave/slaves/66df60ae-ce98-4ce6-968a-f99922382ef3-S0/frameworks/66df60ae-ce98-4ce6-968a-f99922382ef3-0001/executors/kubernetes.eb803f00-daac-11e7-af0a-f66634909198/runs/6f3605e0-9e27-4133-a2af-bbbe3706877b/kubernetes-scheduler/lib/jetty-security-9.2.3.v20140905.jar | |
inflating: /var/lib/mesos/slave/slaves/66df60ae-ce98-4ce6-968a-f99922382ef3-S0/frameworks/66df60ae-ce98-4ce6-968a-f99922382ef3-0001/executors/kubernetes.eb803f00-daac-11e7-af0a-f66634909198/runs/6f3605e0-9e27-4133-a2af-bbbe3706877b/kubernetes-scheduler/lib/validation-api-1.1.0.Final.jar | |
inflating: /var/lib/mesos/slave/slaves/66df60ae-ce98-4ce6-968a-f99922382ef3-S0/frameworks/66df60ae-ce98-4ce6-968a-f99922382ef3-0001/executors/kubernetes.eb803f00-daac-11e7-af0a-f66634909198/runs/6f3605e0-9e27-4133-a2af-bbbe3706877b/kubernetes-scheduler/lib/jboss-logging-3.3.0.Final.jar | |
inflating: /var/lib/mesos/slave/slaves/66df60ae-ce98-4ce6-968a-f99922382ef3-S0/frameworks/66df60ae-ce98-4ce6-968a-f99922382ef3-0001/executors/kubernetes.eb803f00-daac-11e7-af0a-f66634909198/runs/6f3605e0-9e27-4133-a2af-bbbe3706877b/kubernetes-scheduler/lib/classmate-1.3.1.jar | |
inflating: /var/lib/mesos/slave/slaves/66df60ae-ce98-4ce6-968a-f99922382ef3-S0/frameworks/66df60ae-ce98-4ce6-968a-f99922382ef3-0001/executors/kubernetes.eb803f00-daac-11e7-af0a-f66634909198/runs/6f3605e0-9e27-4133-a2af-bbbe3706877b/kubernetes-scheduler/lib/edu-umd-cs-findbugs-annotations-1.3.2-201002241900.nbm | |
inflating: /var/lib/mesos/slave/slaves/66df60ae-ce98-4ce6-968a-f99922382ef3-S0/frameworks/66df60ae-ce98-4ce6-968a-f99922382ef3-0001/executors/kubernetes.eb803f00-daac-11e7-af0a-f66634909198/runs/6f3605e0-9e27-4133-a2af-bbbe3706877b/kubernetes-scheduler/lib/log4j-over-slf4j-1.7.10.jar | |
inflating: /var/lib/mesos/slave/slaves/66df60ae-ce98-4ce6-968a-f99922382ef3-S0/frameworks/66df60ae-ce98-4ce6-968a-f99922382ef3-0001/executors/kubernetes.eb803f00-daac-11e7-af0a-f66634909198/runs/6f3605e0-9e27-4133-a2af-bbbe3706877b/kubernetes-scheduler/lib/jcl-over-slf4j-1.7.10.jar | |
inflating: /var/lib/mesos/slave/slaves/66df60ae-ce98-4ce6-968a-f99922382ef3-S0/frameworks/66df60ae-ce98-4ce6-968a-f99922382ef3-0001/executors/kubernetes.eb803f00-daac-11e7-af0a-f66634909198/runs/6f3605e0-9e27-4133-a2af-bbbe3706877b/kubernetes-scheduler/lib/google-http-client-1.20.0.jar | |
inflating: /var/lib/mesos/slave/slaves/66df60ae-ce98-4ce6-968a-f99922382ef3-S0/frameworks/66df60ae-ce98-4ce6-968a-f99922382ef3-0001/executors/kubernetes.eb803f00-daac-11e7-af0a-f66634909198/runs/6f3605e0-9e27-4133-a2af-bbbe3706877b/kubernetes-scheduler/lib/metrics-healthchecks-3.2.5.jar | |
inflating: /var/lib/mesos/slave/slaves/66df60ae-ce98-4ce6-968a-f99922382ef3-S0/frameworks/66df60ae-ce98-4ce6-968a-f99922382ef3-0001/executors/kubernetes.eb803f00-daac-11e7-af0a-f66634909198/runs/6f3605e0-9e27-4133-a2af-bbbe3706877b/kubernetes-scheduler/lib/metrics-json-3.2.5.jar | |
inflating: /var/lib/mesos/slave/slaves/66df60ae-ce98-4ce6-968a-f99922382ef3-S0/frameworks/66df60ae-ce98-4ce6-968a-f99922382ef3-0001/executors/kubernetes.eb803f00-daac-11e7-af0a-f66634909198/runs/6f3605e0-9e27-4133-a2af-bbbe3706877b/kubernetes-scheduler/lib/metrics-jvm-3.2.5.jar | |
inflating: /var/lib/mesos/slave/slaves/66df60ae-ce98-4ce6-968a-f99922382ef3-S0/frameworks/66df60ae-ce98-4ce6-968a-f99922382ef3-0001/executors/kubernetes.eb803f00-daac-11e7-af0a-f66634909198/runs/6f3605e0-9e27-4133-a2af-bbbe3706877b/kubernetes-scheduler/lib/profiler-1.0.2.jar | |
inflating: /var/lib/mesos/slave/slaves/66df60ae-ce98-4ce6-968a-f99922382ef3-S0/frameworks/66df60ae-ce98-4ce6-968a-f99922382ef3-0001/executors/kubernetes.eb803f00-daac-11e7-af0a-f66634909198/runs/6f3605e0-9e27-4133-a2af-bbbe3706877b/kubernetes-scheduler/lib/simpleclient-0.0.26.jar | |
inflating: /var/lib/mesos/slave/slaves/66df60ae-ce98-4ce6-968a-f99922382ef3-S0/frameworks/66df60ae-ce98-4ce6-968a-f99922382ef3-0001/executors/kubernetes.eb803f00-daac-11e7-af0a-f66634909198/runs/6f3605e0-9e27-4133-a2af-bbbe3706877b/kubernetes-scheduler/lib/simpleclient_common-0.0.26.jar | |
inflating: /var/lib/mesos/slave/slaves/66df60ae-ce98-4ce6-968a-f99922382ef3-S0/frameworks/66df60ae-ce98-4ce6-968a-f99922382ef3-0001/executors/kubernetes.eb803f00-daac-11e7-af0a-f66634909198/runs/6f3605e0-9e27-4133-a2af-bbbe3706877b/kubernetes-scheduler/lib/metrics-statsd-common-4.2.0.jar | |
inflating: /var/lib/mesos/slave/slaves/66df60ae-ce98-4ce6-968a-f99922382ef3-S0/frameworks/66df60ae-ce98-4ce6-968a-f99922382ef3-0001/executors/kubernetes.eb803f00-daac-11e7-af0a-f66634909198/runs/6f3605e0-9e27-4133-a2af-bbbe3706877b/kubernetes-scheduler/lib/log4j-api-2.8.1.jar | |
inflating: /var/lib/mesos/slave/slaves/66df60ae-ce98-4ce6-968a-f99922382ef3-S0/frameworks/66df60ae-ce98-4ce6-968a-f99922382ef3-0001/executors/kubernetes.eb803f00-daac-11e7-af0a-f66634909198/runs/6f3605e0-9e27-4133-a2af-bbbe3706877b/kubernetes-scheduler/lib/log4j-1.2.16.jar | |
inflating: /var/lib/mesos/slave/slaves/66df60ae-ce98-4ce6-968a-f99922382ef3-S0/frameworks/66df60ae-ce98-4ce6-968a-f99922382ef3-0001/executors/kubernetes.eb803f00-daac-11e7-af0a-f66634909198/runs/6f3605e0-9e27-4133-a2af-bbbe3706877b/kubernetes-scheduler/lib/jline-0.9.94.jar | |
inflating: /var/lib/mesos/slave/slaves/66df60ae-ce98-4ce6-968a-f99922382ef3-S0/frameworks/66df60ae-ce98-4ce6-968a-f99922382ef3-0001/executors/kubernetes.eb803f00-daac-11e7-af0a-f66634909198/runs/6f3605e0-9e27-4133-a2af-bbbe3706877b/kubernetes-scheduler/lib/netty-3.7.0.Final.jar | |
inflating: /var/lib/mesos/slave/slaves/66df60ae-ce98-4ce6-968a-f99922382ef3-S0/frameworks/66df60ae-ce98-4ce6-968a-f99922382ef3-0001/executors/kubernetes.eb803f00-daac-11e7-af0a-f66634909198/runs/6f3605e0-9e27-4133-a2af-bbbe3706877b/kubernetes-scheduler/lib/javax.servlet-api-3.1.0.jar | |
inflating: /var/lib/mesos/slave/slaves/66df60ae-ce98-4ce6-968a-f99922382ef3-S0/frameworks/66df60ae-ce98-4ce6-968a-f99922382ef3-0001/executors/kubernetes.eb803f00-daac-11e7-af0a-f66634909198/runs/6f3605e0-9e27-4133-a2af-bbbe3706877b/kubernetes-scheduler/lib/javax.annotation-api-1.2.jar | |
inflating: /var/lib/mesos/slave/slaves/66df60ae-ce98-4ce6-968a-f99922382ef3-S0/frameworks/66df60ae-ce98-4ce6-968a-f99922382ef3-0001/executors/kubernetes.eb803f00-daac-11e7-af0a-f66634909198/runs/6f3605e0-9e27-4133-a2af-bbbe3706877b/kubernetes-scheduler/lib/jersey-guava-2.23.jar | |
inflating: /var/lib/mesos/slave/slaves/66df60ae-ce98-4ce6-968a-f99922382ef3-S0/frameworks/66df60ae-ce98-4ce6-968a-f99922382ef3-0001/executors/kubernetes.eb803f00-daac-11e7-af0a-f66634909198/runs/6f3605e0-9e27-4133-a2af-bbbe3706877b/kubernetes-scheduler/lib/hk2-api-2.4.0-b34.jar | |
inflating: /var/lib/mesos/slave/slaves/66df60ae-ce98-4ce6-968a-f99922382ef3-S0/frameworks/66df60ae-ce98-4ce6-968a-f99922382ef3-0001/executors/kubernetes.eb803f00-daac-11e7-af0a-f66634909198/runs/6f3605e0-9e27-4133-a2af-bbbe3706877b/kubernetes-scheduler/lib/hk2-locator-2.4.0-b34.jar | |
inflating: /var/lib/mesos/slave/slaves/66df60ae-ce98-4ce6-968a-f99922382ef3-S0/frameworks/66df60ae-ce98-4ce6-968a-f99922382ef3-0001/executors/kubernetes.eb803f00-daac-11e7-af0a-f66634909198/runs/6f3605e0-9e27-4133-a2af-bbbe3706877b/kubernetes-scheduler/lib/osgi-resource-locator-1.0.1.jar | |
inflating: /var/lib/mesos/slave/slaves/66df60ae-ce98-4ce6-968a-f99922382ef3-S0/frameworks/66df60ae-ce98-4ce6-968a-f99922382ef3-0001/executors/kubernetes.eb803f00-daac-11e7-af0a-f66634909198/runs/6f3605e0-9e27-4133-a2af-bbbe3706877b/kubernetes-scheduler/lib/jersey-client-2.23.jar | |
inflating: /var/lib/mesos/slave/slaves/66df60ae-ce98-4ce6-968a-f99922382ef3-S0/frameworks/66df60ae-ce98-4ce6-968a-f99922382ef3-0001/executors/kubernetes.eb803f00-daac-11e7-af0a-f66634909198/runs/6f3605e0-9e27-4133-a2af-bbbe3706877b/kubernetes-scheduler/lib/jersey-media-jaxb-2.23.jar | |
inflating: /var/lib/mesos/slave/slaves/66df60ae-ce98-4ce6-968a-f99922382ef3-S0/frameworks/66df60ae-ce98-4ce6-968a-f99922382ef3-0001/executors/kubernetes.eb803f00-daac-11e7-af0a-f66634909198/runs/6f3605e0-9e27-4133-a2af-bbbe3706877b/kubernetes-scheduler/lib/jackson-module-jaxb-annotations-2.5.4.jar | |
inflating: /var/lib/mesos/slave/slaves/66df60ae-ce98-4ce6-968a-f99922382ef3-S0/frameworks/66df60ae-ce98-4ce6-968a-f99922382ef3-0001/executors/kubernetes.eb803f00-daac-11e7-af0a-f66634909198/runs/6f3605e0-9e27-4133-a2af-bbbe3706877b/kubernetes-scheduler/lib/annotations-1.3.2.jar | |
inflating: /var/lib/mesos/slave/slaves/66df60ae-ce98-4ce6-968a-f99922382ef3-S0/frameworks/66df60ae-ce98-4ce6-968a-f99922382ef3-0001/executors/kubernetes.eb803f00-daac-11e7-af0a-f66634909198/runs/6f3605e0-9e27-4133-a2af-bbbe3706877b/kubernetes-scheduler/lib/joda-time-2.9.1.jar | |
inflating: /var/lib/mesos/slave/slaves/66df60ae-ce98-4ce6-968a-f99922382ef3-S0/frameworks/66df60ae-ce98-4ce6-968a-f99922382ef3-0001/executors/kubernetes.eb803f00-daac-11e7-af0a-f66634909198/runs/6f3605e0-9e27-4133-a2af-bbbe3706877b/kubernetes-scheduler/lib/hk2-utils-2.4.0-b34.jar | |
inflating: /var/lib/mesos/slave/slaves/66df60ae-ce98-4ce6-968a-f99922382ef3-S0/frameworks/66df60ae-ce98-4ce6-968a-f99922382ef3-0001/executors/kubernetes.eb803f00-daac-11e7-af0a-f66634909198/runs/6f3605e0-9e27-4133-a2af-bbbe3706877b/kubernetes-scheduler/lib/aopalliance-repackaged-2.4.0-b34.jar | |
inflating: /var/lib/mesos/slave/slaves/66df60ae-ce98-4ce6-968a-f99922382ef3-S0/frameworks/66df60ae-ce98-4ce6-968a-f99922382ef3-0001/executors/kubernetes.eb803f00-daac-11e7-af0a-f66634909198/runs/6f3605e0-9e27-4133-a2af-bbbe3706877b/kubernetes-scheduler/lib/javax.inject-1.jar | |
inflating: /var/lib/mesos/slave/slaves/66df60ae-ce98-4ce6-968a-f99922382ef3-S0/frameworks/66df60ae-ce98-4ce6-968a-f99922382ef3-0001/executors/kubernetes.eb803f00-daac-11e7-af0a-f66634909198/runs/6f3605e0-9e27-4133-a2af-bbbe3706877b/kubernetes-scheduler/lib/protobuf-java-3.3.1.jar | |
inflating: /var/lib/mesos/slave/slaves/66df60ae-ce98-4ce6-968a-f99922382ef3-S0/frameworks/66df60ae-ce98-4ce6-968a-f99922382ef3-0001/executors/kubernetes.eb803f00-daac-11e7-af0a-f66634909198/runs/6f3605e0-9e27-4133-a2af-bbbe3706877b/kubernetes-scheduler/lib/jackson-databind-2.6.6.jar | |
inflating: /var/lib/mesos/slave/slaves/66df60ae-ce98-4ce6-968a-f99922382ef3-S0/frameworks/66df60ae-ce98-4ce6-968a-f99922382ef3-0001/executors/kubernetes.eb803f00-daac-11e7-af0a-f66634909198/runs/6f3605e0-9e27-4133-a2af-bbbe3706877b/kubernetes-scheduler/lib/guava-18.0.jar | |
inflating: /var/lib/mesos/slave/slaves/66df60ae-ce98-4ce6-968a-f99922382ef3-S0/frameworks/66df60ae-ce98-4ce6-968a-f99922382ef3-0001/executors/kubernetes.eb803f00-daac-11e7-af0a-f66634909198/runs/6f3605e0-9e27-4133-a2af-bbbe3706877b/kubernetes-scheduler/lib/jcip-annotations-1.0.jar | |
inflating: /var/lib/mesos/slave/slaves/66df60ae-ce98-4ce6-968a-f99922382ef3-S0/frameworks/66df60ae-ce98-4ce6-968a-f99922382ef3-0001/executors/kubernetes.eb803f00-daac-11e7-af0a-f66634909198/runs/6f3605e0-9e27-4133-a2af-bbbe3706877b/kubernetes-scheduler/lib/jetty-server-9.2.3.v20140905.jar | |
inflating: /var/lib/mesos/slave/slaves/66df60ae-ce98-4ce6-968a-f99922382ef3-S0/frameworks/66df60ae-ce98-4ce6-968a-f99922382ef3-0001/executors/kubernetes.eb803f00-daac-11e7-af0a-f66634909198/runs/6f3605e0-9e27-4133-a2af-bbbe3706877b/kubernetes-scheduler/lib/jackson-core-2.6.6.jar | |
inflating: /var/lib/mesos/slave/slaves/66df60ae-ce98-4ce6-968a-f99922382ef3-S0/frameworks/66df60ae-ce98-4ce6-968a-f99922382ef3-0001/executors/kubernetes.eb803f00-daac-11e7-af0a-f66634909198/runs/6f3605e0-9e27-4133-a2af-bbbe3706877b/kubernetes-scheduler/lib/jsr305-3.0.1.jar | |
inflating: /var/lib/mesos/slave/slaves/66df60ae-ce98-4ce6-968a-f99922382ef3-S0/frameworks/66df60ae-ce98-4ce6-968a-f99922382ef3-0001/executors/kubernetes.eb803f00-daac-11e7-af0a-f66634909198/runs/6f3605e0-9e27-4133-a2af-bbbe3706877b/kubernetes-scheduler/lib/jetty-http-9.2.3.v20140905.jar | |
inflating: /var/lib/mesos/slave/slaves/66df60ae-ce98-4ce6-968a-f99922382ef3-S0/frameworks/66df60ae-ce98-4ce6-968a-f99922382ef3-0001/executors/kubernetes.eb803f00-daac-11e7-af0a-f66634909198/runs/6f3605e0-9e27-4133-a2af-bbbe3706877b/kubernetes-scheduler/lib/jetty-io-9.2.3.v20140905.jar | |
inflating: /var/lib/mesos/slave/slaves/66df60ae-ce98-4ce6-968a-f99922382ef3-S0/frameworks/66df60ae-ce98-4ce6-968a-f99922382ef3-0001/executors/kubernetes.eb803f00-daac-11e7-af0a-f66634909198/runs/6f3605e0-9e27-4133-a2af-bbbe3706877b/kubernetes-scheduler/lib/jetty-util-9.2.3.v20140905.jar | |
creating: /var/lib/mesos/slave/slaves/66df60ae-ce98-4ce6-968a-f99922382ef3-S0/frameworks/66df60ae-ce98-4ce6-968a-f99922382ef3-0001/executors/kubernetes.eb803f00-daac-11e7-af0a-f66634909198/runs/6f3605e0-9e27-4133-a2af-bbbe3706877b/kubernetes-scheduler/bin/ | |
inflating: /var/lib/mesos/slave/slaves/66df60ae-ce98-4ce6-968a-f99922382ef3-S0/frameworks/66df60ae-ce98-4ce6-968a-f99922382ef3-0001/executors/kubernetes.eb803f00-daac-11e7-af0a-f66634909198/runs/6f3605e0-9e27-4133-a2af-bbbe3706877b/kubernetes-scheduler/bin/kubernetes | |
inflating: /var/lib/mesos/slave/slaves/66df60ae-ce98-4ce6-968a-f99922382ef3-S0/frameworks/66df60ae-ce98-4ce6-968a-f99922382ef3-0001/executors/kubernetes.eb803f00-daac-11e7-af0a-f66634909198/runs/6f3605e0-9e27-4133-a2af-bbbe3706877b/kubernetes-scheduler/bin/kubernetes.bat | |
Executing pre-exec command '{"arguments":["mesos-containerizer","mount","--help=false","--operation=make-rslave","--path=\/"],"shell":false,"value":"\/opt\/mesosphere\/active\/mesos\/libexec\/mesos\/mesos-containerizer"}' | |
Executing pre-exec command '{"shell":true,"value":"mount -n -t proc proc \/proc -o nosuid,noexec,nodev"}' | |
INFO 2017-12-06 17:43:45,542 [main] com.mesosphere.sdk.specification.yaml.RawServiceSpec$Builder:build(70): Rendered ServiceSpec from /var/lib/mesos/slave/slaves/66df60ae-ce98-4ce6-968a-f99922382ef3-S0/frameworks/66df60ae-ce98-4ce6-968a-f99922382ef3-0001/executors/kubernetes.eb803f00-daac-11e7-af0a-f66634909198/runs/6f3605e0-9e27-4133-a2af-bbbe3706877b/./kubernetes-scheduler/svc.yml: | |
Missing template values: [FRAMEWORK_NAME} | |
KUBERNETES_VERSION: {{KUBERNETES_VERSION@L398, CONFIG_TEMPLATE_PATH@L433, CONFIG_TEMPLATE_PATH@L461, TASK_NAME@L510, CONFIG_TEMPLATE_PATH@L525, TASK_NAME@L533, CONFIG_TEMPLATE_PATH@L546, TASK_NAME@L556, CONFIG_TEMPLATE_PATH@L569] | |
name: kubernetes | |
scheduler: | |
principal: kubernetes | |
pods: | |
etcd: | |
count: 3 | |
allow-decommission: true | |
placement: hostname:UNIQUE | |
uris: | |
- https://storage.googleapis.com/etcd/v3.2.9/etcd-v3.2.9-linux-amd64.tar.gz | |
resource-sets: | |
etcd: | |
cpus: 0.5 | |
memory: 1024 | |
ports: | |
peer: | |
port: 2380 | |
env-key: ETCD_LISTEN_PEER_PORT | |
vip: | |
prefix: etcd-peer | |
port: 2380 | |
client: | |
port: 2379 | |
env-key: ETCD_LISTEN_CLIENT_PORT | |
vip: | |
prefix: etcd | |
port: 2379 | |
volumes: | |
data: | |
path: "data-dir" | |
type: ROOT | |
size: 3072 | |
wal: | |
path: "wal-pv" | |
type: ROOT | |
size: 512 | |
tasks: | |
peer: | |
goal: RUNNING | |
kill-grace-period: 30 | |
resource-set: etcd | |
# WARNING do not disable v2 API below with --enable-v2=false | |
# otherwise the healthcheck will fail. | |
# | |
# TODO (@pires) | |
# add the line below after https://github.com/coreos/etcd/issues/8268 is fixed and released | |
#--peer-client-cert-auth | |
cmd: | | |
printf "\n\n ###### Starting etcd -- ${TASK_NAME} ###### \n" | |
# In a graceful shutdown, we remove the peer from the cluster | |
terminated () { | |
printf "Removing member etcd-$POD_INSTANCE_INDEX-peer \n" | |
DEAD_PEER_ID=$( | |
./etcd-v3.2.9-linux-amd64/etcdctl \ | |
--cert-file etcd-crt.pem \ | |
--key-file etcd-key.pem \ | |
--ca-file ca-crt.pem \ | |
--endpoints=https://etcd.kubernetes.l4lb.thisdcos.directory:$ETCD_LISTEN_CLIENT_PORT member list | awk "/etcd-${POD_INSTANCE_INDEX}-peer/ { gsub(\":\", \"\", \$1); print \$1 }" \ | |
) | |
printf $DEAD_PEER_ID | |
./etcd-v3.2.9-linux-amd64/etcdctl \ | |
--cert-file etcd-crt.pem \ | |
--key-file etcd-key.pem \ | |
--ca-file ca-crt.pem \ | |
--endpoints https://etcd.kubernetes.l4lb.thisdcos.directory:$ETCD_LISTEN_CLIENT_PORT member remove $DEAD_PEER_ID | |
rm -rf data-dir/member | |
printf "Member etcd-$POD_INSTANCE_INDEX-peer removed!\n" | |
exit 0 | |
} | |
trap terminated SIGTERM EXIT | |
echo "Trapping etcd SIGTERM and EXIT!" | |
./etcd-v3.2.9-linux-amd64/etcd \ | |
--name=infra$POD_INSTANCE_INDEX \ | |
--cert-file=etcd-crt.pem \ | |
--key-file=etcd-key.pem \ | |
--client-cert-auth \ | |
--trusted-ca-file=ca-crt.pem \ | |
--peer-cert-file=etcd-crt.pem \ | |
--peer-key-file=etcd-key.pem \ | |
--peer-trusted-ca-file=ca-crt.pem \ | |
--listen-peer-urls=https://$MESOS_CONTAINER_IP:$ETCD_LISTEN_PEER_PORT \ | |
--initial-advertise-peer-urls=https://etcd-$POD_INSTANCE_INDEX-peer.kubernetes.mesos:$ETCD_LISTEN_PEER_PORT \ | |
--listen-client-urls=https://$MESOS_CONTAINER_IP:$ETCD_LISTEN_CLIENT_PORT \ | |
--advertise-client-urls=https://etcd-$POD_INSTANCE_INDEX-peer.kubernetes.mesos:$ETCD_LISTEN_CLIENT_PORT \ | |
--log-output=stdout \ | |
--quota-backend-bytes=3221225472 \ | |
--election-timeout=5000 \ | |
--heartbeat-interval=250 \ | |
--initial-cluster infra0=https://etcd-0-peer.kubernetes.mesos:$ETCD_LISTEN_PEER_PORT,infra1=https://etcd-1-peer.kubernetes.mesos:$ETCD_LISTEN_PEER_PORT,infra2=https://etcd-2-peer.kubernetes.mesos:$ETCD_LISTEN_PEER_PORT | |
env: | |
ETCD_VERSION: v3.2.9 | |
ETCD_INITIAL_CLUSTER_TOKEN: kubernetes | |
ETCD_DATA_DIR: data-dir | |
ETCD_WAL_DIR: wal-pv/wal-dir | |
health-check: | |
cmd: > | |
HEALTHY_PEER=$(./etcd-v3.2.9-linux-amd64/etcdctl | |
--cert-file etcd-crt.pem | |
--key-file etcd-key.pem | |
--ca-file ca-crt.pem | |
--endpoints=https://etcd-$POD_INSTANCE_INDEX-peer.kubernetes.mesos:$ETCD_LISTEN_CLIENT_PORT | |
cluster-health | grep "etcd-${POD_INSTANCE_INDEX}-peer" | grep -c 'is healthy') && [ "$HEALTHY_PEER" -eq "1" ] | |
interval: 15 | |
grace-period: 60 | |
max-consecutive-failures: 4 | |
delay: 0 | |
timeout: 10 | |
readiness-check: | |
cmd: > | |
HEALTHY_CLUSTER=$(./etcd-v3.2.9-linux-amd64/etcdctl | |
--cert-file etcd-crt.pem | |
--key-file etcd-key.pem | |
--ca-file ca-crt.pem | |
--endpoints=https://etcd-$POD_INSTANCE_INDEX-peer.kubernetes.mesos:$ETCD_LISTEN_CLIENT_PORT | |
cluster-health | grep '^cluster ' | grep -c 'cluster is healthy') && [ "$HEALTHY_CLUSTER" -eq "1" ] | |
interval: 30 | |
delay: 0 | |
timeout: 10 | |
recover: | |
goal: FINISHED | |
# ---- | |
# if this task is assigned the same resource-set as etcd-peer then the | |
# resource-set VIPs will point to it when it is RUNNING and DC/OS will | |
# assume etcd-peer is healthy when in fact it's not (no etcd is running, | |
# just the recovery task). Uncomment next line when KUB-124 is fixed: | |
# resource-set: etcd | |
# ---- | |
# - determine dead peer from the new peer we're about to recover | |
# - remove dead peer from member list | |
# - add new peer to the member list | |
cmd: > | |
DEAD_PEER_ID=$( | |
./etcd-v3.2.9-linux-amd64/etcdctl \ | |
--cert-file etcd-crt.pem \ | |
--key-file etcd-key.pem \ | |
--ca-file ca-crt.pem \ | |
--endpoints=https://etcd.kubernetes.l4lb.thisdcos.directory:$ETCD_LISTEN_CLIENT_PORT member list | awk "/etcd-${POD_INSTANCE_INDEX}-peer/ { gsub(\":\", \"\", \$1); print \$1 }" \ | |
) | |
./etcd-v3.2.9-linux-amd64/etcdctl | |
--cert-file etcd-crt.pem | |
--key-file etcd-key.pem | |
--ca-file ca-crt.pem | |
--endpoints https://etcd.kubernetes.l4lb.thisdcos.directory:$ETCD_LISTEN_CLIENT_PORT member remove $DEAD_PEER_ID | |
rm -rf data-dir/member | |
./etcd-v3.2.9-linux-amd64/etcdctl | |
--cert-file etcd-crt.pem | |
--key-file etcd-key.pem | |
--ca-file ca-crt.pem | |
--endpoints https://etcd.kubernetes.l4lb.thisdcos.directory:$ETCD_LISTEN_CLIENT_PORT member add infra$POD_INSTANCE_INDEX https://etcd-$POD_INSTANCE_INDEX-peer.kubernetes.mesos:$ETCD_LISTEN_PEER_PORT | |
cpus: 0.1 | |
memory: 32 | |
env: | |
ETCD_LISTEN_CLIENT_PORT: 2379 | |
ETCD_LISTEN_PEER_PORT: 2380 | |
ETCD_VERSION: v3.2.9 | |
kube-apiserver: | |
count: 3 | |
allow-decommission: true | |
placement: hostname:UNIQUE | |
uris: | |
- https://s3.eu-central-1.amazonaws.com/dcos-kubernetes-test/bootstrap.zip | |
- https://storage.googleapis.com/kubernetes-release/release/v1.7.11/bin/linux/amd64/kube-apiserver | |
tasks: | |
instance: | |
goal: RUNNING | |
env: | |
KUBERNETES_VERSION: v1.7.11 | |
cmd: > | |
chmod +x kube-apiserver | |
printf "\n\n ###### Starting Kube API SERVER -- ${TASK_NAME} ###### \n" | |
./kube-apiserver | |
--etcd-servers=https://etcd-0-peer.kubernetes.mesos:2379,https://etcd-1-peer.kubernetes.mesos:2379,https://etcd-2-peer.kubernetes.mesos:2379 | |
--etcd-cafile=ca-crt.pem | |
--etcd-certfile=kube-apiserver-crt.pem | |
--etcd-keyfile=kube-apiserver-key.pem | |
--etcd-prefix="/registry/cluster-0" | |
--etcd-quorum-read | |
--bind-address=$MESOS_CONTAINER_IP | |
--insecure-bind-address=$MESOS_CONTAINER_IP | |
--insecure-port=9000 | |
--secure-port=6443 | |
--apiserver-count=3 | |
--allow-privileged | |
--service-cluster-ip-range=10.100.0.0/16 | |
--authorization-mode=AlwaysAllow | |
--admission-control=NamespaceLifecycle,LimitRanger,ServiceAccount,PersistentVolumeLabel,DefaultStorageClass,ResourceQuota,DefaultTolerationSeconds | |
--runtime-config=batch/v2alpha1=true,admissionregistration.k8s.io/v1alpha1=true | |
--service-account-key-file=service-account-key.pem | |
--tls-ca-file=ca-crt.pem | |
--tls-cert-file=kube-apiserver-crt.pem | |
--tls-private-key-file=kube-apiserver-key.pem | |
--client-ca-file=ca-crt.pem | |
--target-ram-mb=1024 2>&1 | |
cpus: 0.5 | |
memory: 1024 | |
ports: | |
apiserver: | |
port: 6443 | |
env-key: KUBE_APISERVER_PORT | |
vip: | |
prefix: apiserver | |
port: 6443 | |
apiserver_insecure: | |
port: 9000 | |
env-key: KUBE_APISERVER_INSECURE_PORT | |
vip: | |
prefix: apiserver-insecure | |
port: 9000 | |
health-check: | |
cmd: > | |
HTTP_CODE=$(/opt/mesosphere/bin/curl | |
--silent --output /dev/null --fail --write-out "%{http_code}" | |
--cert kube-apiserver-crt.pem | |
--key kube-apiserver-key.pem | |
--cacert ca-crt.pem | |
https://kube-apiserver-$POD_INSTANCE_INDEX-instance.kubernetes.mesos:6443/healthz) | |
&& [ "$HTTP_CODE" -eq "200" ] | |
interval: 15 | |
grace-period: 30 | |
max-consecutive-failures: 3 | |
delay: 0 | |
timeout: 10 | |
kube-controller-manager: | |
count: 3 | |
allow-decommission: true | |
placement: hostname:UNIQUE | |
uris: | |
- https://s3.eu-central-1.amazonaws.com/dcos-kubernetes-test/bootstrap.zip | |
- https://storage.googleapis.com/kubernetes-release/release/v1.7.11/bin/linux/amd64/kube-controller-manager | |
tasks: | |
instance: | |
goal: RUNNING | |
cmd: > | |
./bootstrap --resolve=false 2>&1 | |
chmod +x kube-controller-manager | |
printf "\n\n ###### Starting Kube CONTROLLER MANAGER -- ${TASK_NAME} ###### \n" | |
./kube-controller-manager | |
--address=$MESOS_CONTAINER_IP | |
--port=10252 | |
--kubeconfig=kubeconfig.yaml | |
--root-ca-file=ca-crt.pem | |
--service-account-private-key-file=service-account-key.pem | |
--leader-elect 2>&1 | |
env: | |
USERNAME: kube-controller-manager | |
KUBERNETES_VERSION: v1.7.11 | |
configs: | |
kubeconfig: | |
template: kubeconfig.template.yaml | |
dest: kubeconfig.yaml | |
cpus: 0.5 | |
memory: 512 | |
health-check: | |
cmd: > | |
HTTP_CODE=$(/opt/mesosphere/bin/curl | |
--silent --output /dev/null --fail --write-out "%{http_code}" | |
http://$MESOS_CONTAINER_IP:10252/healthz) | |
&& [ "$HTTP_CODE" -eq "200" ] | |
interval: 15 | |
grace-period: 30 | |
max-consecutive-failures: 3 | |
delay: 0 | |
timeout: 10 | |
readiness-check: | |
cmd: > | |
HTTP_CODE=$(/opt/mesosphere/bin/curl | |
--silent --output /dev/null --fail --write-out "%{http_code}" | |
http://apiserver-insecure.kubernetes.l4lb.thisdcos.directory:9000) | |
&& [ "$HTTP_CODE" -eq "200" ] | |
interval: 5 | |
delay: 0 | |
timeout: 10 | |
kube-scheduler: | |
count: 3 | |
allow-decommission: true | |
placement: hostname:UNIQUE | |
uris: | |
- https://s3.eu-central-1.amazonaws.com/dcos-kubernetes-test/bootstrap.zip | |
- https://storage.googleapis.com/kubernetes-release/release/v1.7.11/bin/linux/amd64/kube-scheduler | |
tasks: | |
instance: | |
goal: RUNNING | |
cmd: > | |
./bootstrap --resolve=false 2>&1 | |
chmod +x kube-scheduler | |
printf "\n\n ###### Starting Kube SCHEDULER -- ${TASK_NAME} ###### \n" | |
./kube-scheduler | |
--address=$MESOS_CONTAINER_IP | |
--kubeconfig=kubeconfig.yaml | |
--leader-elect | |
--kube-api-burst=120 | |
--kube-api-qps=80 2>&1 | |
env: | |
USERNAME: kube-scheduler | |
KUBERNETES_VERSION: v1.7.11 | |
configs: | |
kubeconfig: | |
template: kubeconfig.template.yaml | |
dest: kubeconfig.yaml | |
cpus: 0.5 | |
memory: 512 | |
health-check: | |
cmd: > | |
HTTP_CODE=$(/opt/mesosphere/bin/curl | |
--silent --output /dev/null --fail --write-out "%{http_code}" | |
http://$MESOS_CONTAINER_IP:10251/healthz) | |
&& [ "$HTTP_CODE" -eq "200" ] | |
interval: 15 | |
grace-period: 30 | |
max-consecutive-failures: 3 | |
delay: 0 | |
timeout: 10 | |
kube-node: | |
count: 1 | |
allow-decommission: true | |
placement: hostname:UNIQUE | |
uris: | |
- https://s3.eu-central-1.amazonaws.com/dcos-kubernetes-test/bootstrap.zip | |
- https://download.docker.com/linux/static/stable/x86_64/docker-17.09.0-ce.tgz | |
- https://downloads.mesosphere.com/kubernetes/socat/socat.d-1.7.3.2-2.fc26.tar.gz | |
- https://storage.googleapis.com/kubernetes-release/release/v1.7.11/bin/linux/amd64/kube-proxy | |
- https://storage.googleapis.com/kubernetes-release/release/v1.7.11/bin/linux/amd64/kubelet | |
- https://storage.googleapis.com/kubernetes-release/release/v1.7.11/bin/linux/amd64/kubectl | |
- https://infinity-artifacts.s3.amazonaws.com/autodelete7d/kubernetes/20171206-172217-Z5QGImH7dVzO3lwz/resource-container.gz | |
resource-sets: | |
kube-node-kubelet: | |
cpus: 3.0 | |
memory: 4100 | |
volumes: | |
var: | |
path: "var" | |
type: ROOT | |
size: 1024 | |
tasks: | |
kube-proxy: | |
goal: RUNNING | |
cmd: > | |
./bootstrap --resolve=false 2>&1 | |
chmod +x kube-proxy | |
printf "\n\n ###### Starting Kube PROXY -- ${TASK_NAME} ###### \n" | |
./kube-proxy | |
--hostname-override=kube-node-$POD_INSTANCE_INDEX-kube-proxy.kubernetes.mesos | |
--bind-address=127.0.0.1 | |
--kubeconfig=kubeconfig.yaml | |
--resource-container="" | |
--healthz-port=0 2>&1 | |
env: | |
PATH: "/usr/local/sbin:/usr/local/bin:/usr/sbin:/usr/bin:/sbin:/bin:/opt/bin:/opt/mesosphere/bin/dcos-path" | |
USERNAME: kube-node | |
KUBERNETES_VERSION: v1.7.11 | |
configs: | |
kubeconfig: | |
template: kubeconfig.template.yaml | |
dest: kubeconfig.yaml | |
cpus: 0.1 | |
memory: 512 | |
kubelet: | |
goal: RUNNING | |
kill-grace-period: 5 | |
resource-set: kube-node-kubelet | |
cmd: | | |
./bootstrap --resolve=false 2>&1 | |
chmod +x kubelet-wrapper.sh | |
printf "\n\n ###### Starting Kubelet -- ${TASK_NAME} ###### \n" | |
./kubelet-wrapper.sh 2>&1 | |
env: | |
PATH: "/usr/local/sbin:/usr/local/bin:/usr/sbin:/usr/bin:/sbin:/bin:/opt/bin:/opt/mesosphere/bin/dcos-path" | |
USERNAME: kube-node | |
FRAMEWORK_NAME: | |
PAUSE_DOCKER_IMAGE: "gcr.io/google_containers/pause-amd64:3.0" | |
KUBE_ALLOCATABLE_CPUS: 2 | |
KUBE_ALLOCATABLE_MEM: 2048 | |
KUBE_RESERVED_CPUS: 1 | |
KUBE_RESERVED_MEM: 2052 | |
KUBERNETES_NODE_DOCKER_VERSION: 17.09.0 | |
configs: | |
kubeconfig: | |
template: kubeconfig.template.yaml | |
dest: kubeconfig.yaml | |
kubelet-wrapper.sh: | |
template: kubelet-wrapper.template.sh | |
dest: kubelet-wrapper.sh | |
health-check: | |
cmd: > | |
HTTP_CODE=$(/opt/mesosphere/bin/curl | |
--silent --output /dev/null --fail --write-out "%{http_code}" | |
http://$MESOS_CONTAINER_IP:10258/healthz) | |
&& [ "$HTTP_CODE" -eq "200" ] | |
interval: 15 | |
grace-period: 30 | |
max-consecutive-failures: 3 | |
delay: 0 | |
timeout: 10 | |
decommission: | |
goal: FINISHED | |
cpus: 0.1 | |
memory: 32 | |
env: | |
USERNAME: kube-node | |
KUBERNETES_NODE_DOCKER_VERSION: 17.09.0 | |
KUBERNETES_VERSION: v1.7.11 | |
configs: | |
kubeconfig: | |
template: /kubeconfig.template.yaml | |
dest: kubeconfig.yaml | |
cmd: | | |
printf "Starting to decommission the node...\n" | |
./bootstrap --resolve=false 2>&1 ; | |
chmod +x kubectl ; | |
NODE_FOUND=$(./kubectl --kubeconfig=kubeconfig.yaml get node --ignore-not-found kube-node-$POD_INSTANCE_INDEX-kubelet.kubernetes.mesos | grep "kube-node-$POD_INSTANCE_INDEX-kubelet.kubernetes.mesos" | grep -c "Ready") | |
if [ "$IS_UPGRADE_PLAN" = "YES" ] && [ "$NODE_FOUND" -eq "1" ] ; then | |
printf "Starting to decommission the kube-node-$POD_INSTANCE_INDEX-kubelet.kubernetes.mesos -- ${TASK_NAME} \n" ; | |
./kubectl --kubeconfig=kubeconfig.yaml drain kube-node-$POD_INSTANCE_INDEX-kubelet.kubernetes.mesos ; | |
else | |
printf "Ignored the decommission process of kube-node-$POD_INSTANCE_INDEX-kubelet.kubernetes.mesos!" ; | |
fi | |
commission: | |
goal: FINISHED | |
cpus: 0.1 | |
memory: 32 | |
env: | |
USERNAME: kube-node | |
KUBERNETES_NODE_DOCKER_VERSION: 17.09.0 | |
KUBERNETES_VERSION: v1.7.11 | |
configs: | |
kubeconfig: | |
template: /kubeconfig.template.yaml | |
dest: kubeconfig.yaml | |
cmd: | | |
printf "Starting to commission the node...\n" | |
./bootstrap --resolve=false 2>&1 ; | |
chmod +x kubectl ; | |
NODE_FOUND=$(./kubectl --kubeconfig=kubeconfig.yaml get node --ignore-not-found kube-node-$POD_INSTANCE_INDEX-kubelet.kubernetes.mesos | grep -c "kube-node-$POD_INSTANCE_INDEX-kubelet.kubernetes.mesos") | |
if [ "$IS_UPGRADE_PLAN" = "YES" ] && [ "$NODE_FOUND" -eq "1" ] ; then | |
printf "Starting to commission the kube-node-$POD_INSTANCE_INDEX-kubelet.kubernetes.mesos -- ${TASK_NAME} \n" ; | |
./kubectl --kubeconfig=kubeconfig.yaml uncordon kube-node-$POD_INSTANCE_INDEX-kubelet.kubernetes.mesos ; | |
else | |
printf "Ignored the commission process of kube-node-$POD_INSTANCE_INDEX-kubelet.kubernetes.mesos!" ; | |
fi | |
recover: | |
goal: FINISHED | |
resource-set: kube-node-kubelet | |
cmd: | | |
./bootstrap --resolve=false 2>&1 | |
chmod +x kubectl | |
./kubectl --kubeconfig=kubeconfig.yaml delete node kube-node-$POD_INSTANCE_INDEX-kubelet.kubernetes.mesos | |
env: | |
USERNAME: kube-node | |
KUBERNETES_VERSION: v1.7.11 | |
configs: | |
kubeconfig: | |
template: kubeconfig.template.yaml | |
dest: kubeconfig.yaml | |
mandatory-addons: | |
count: 1 | |
uris: | |
- https://s3.eu-central-1.amazonaws.com/dcos-kubernetes-test/bootstrap.zip | |
- https://storage.googleapis.com/kubernetes-release/release/v1.7.11/bin/linux/amd64/kubectl | |
resource-sets: | |
addons: | |
cpus: 0.1 | |
memory: 32 | |
tasks: | |
kube-dns: | |
goal: FINISHED | |
cmd: | | |
./bootstrap --resolve=false 2>&1 | |
TASK_NAME= ADDON=kube-dns bash ./install_addon.sh | |
env: | |
KUBERNETES_VERSION: v1.7.11 | |
KUBEDNS_DOCKER_IMAGE: gcr.io/google_containers/k8s-dns-kube-dns-amd64:1.14.7 | |
KUBEDNS_DNSMASQ_DOCKER_IMAGE: gcr.io/google_containers/k8s-dns-dnsmasq-nanny-amd64:1.14.7 | |
KUBEDNS_SIDECAR_DOCKER_IMAGE: gcr.io/google_containers/k8s-dns-sidecar-amd64:1.14.7 | |
USERNAME: mandatory-addons | |
configs: | |
kubeconfig: | |
template: kubeconfig.template.yaml | |
dest: kubeconfig.yaml | |
kubedns: | |
template: kubedns.template.yaml | |
dest: kube-dns.yaml | |
install_addon.sh: | |
template: /install_addon.template.sh | |
dest: install_addon.sh | |
resource-set: addons | |
heapster: | |
goal: FINISHED | |
cmd: | | |
./bootstrap --resolve=false 2>&1 | |
TASK_NAME= ADDON=heapster bash ./install_addon.sh | |
env: | |
KUBERNETES_VERSION: v1.7.11 | |
HEAPSTER_DOCKER_IMAGE: gcr.io/google_containers/heapster-amd64:v1.4.3 | |
USERNAME: mandatory-addons | |
configs: | |
kubeconfig: | |
template: kubeconfig.template.yaml | |
dest: kubeconfig.yaml | |
heapster: | |
template: heapster.template.yaml | |
dest: heapster.yaml | |
install_addon.sh: | |
template: /install_addon.template.sh | |
dest: install_addon.sh | |
cpus: 0.1 | |
memory: 32 | |
resource-set: addons | |
dashboard: | |
goal: FINISHED | |
cmd: | | |
./bootstrap --resolve=false 2>&1 | |
TASK_NAME= ADDON=dashboard bash ./install_addon.sh | |
env: | |
KUBERNETES_VERSION: v1.7.11 | |
DASHBOARD_DOCKER_IMAGE: gcr.io/google_containers/kubernetes-dashboard-amd64:v1.6.3 | |
USERNAME: mandatory-addons | |
configs: | |
kubeconfig: | |
template: kubeconfig.template.yaml | |
dest: kubeconfig.yaml | |
dashboard: | |
template: dashboard.template.yaml | |
dest: dashboard.yaml | |
install_addon.sh: | |
template: /install_addon.template.sh | |
dest: install_addon.sh | |
resource-set: addons | |
plans: | |
update: | |
strategy: kubernetes-upgrade | |
phases: | |
etcd: | |
strategy: serial | |
pod: etcd | |
steps: | |
- default: [[peer]] | |
apiserver: | |
strategy: serial | |
pod: kube-apiserver | |
steps: | |
- default: [[instance]] | |
controller-manager: | |
strategy: serial | |
pod: kube-controller-manager | |
steps: | |
- default: [[instance]] | |
scheduler: | |
strategy: serial | |
pod: kube-scheduler | |
steps: | |
- default: [[instance]] | |
node: | |
strategy: serial | |
pod: kube-node | |
steps: | |
- default: [[decommission],[kube-proxy,kubelet],[commission]] | |
mandatory-addons: | |
strategy: serial | |
pod: mandatory-addons | |
steps: | |
- default: [[kube-dns],[heapster],[dashboard]] | |
deploy: | |
strategy: serial | |
phases: | |
etcd: | |
strategy: parallel | |
pod: etcd | |
steps: | |
- default: [[peer]] | |
apiserver: | |
strategy: parallel | |
pod: kube-apiserver | |
steps: | |
- default: [[instance]] | |
controller-manager: | |
strategy: parallel | |
pod: kube-controller-manager | |
steps: | |
- default: [[instance]] | |
scheduler: | |
strategy: parallel | |
pod: kube-scheduler | |
steps: | |
- default: [[instance]] | |
node: | |
strategy: parallel | |
pod: kube-node | |
steps: | |
- default: [[kube-proxy,kubelet]] | |
mandatory-addons: | |
strategy: serial | |
pod: mandatory-addons | |
steps: | |
- default: [[kube-dns],[heapster],[dashboard]] | |
replace: | |
strategy: serial | |
phases: | |
etcd: | |
strategy: serial | |
pod: etcd | |
steps: | |
- default: [[recover],[peer]] | |
kube-node: | |
strategy: serial | |
pod: kube-node | |
steps: | |
- default: [[recover],[kube-proxy,kubelet]] | |
INFO 2017-12-06 17:43:45,980 [main] org.hibernate.validator.internal.util.Version:<clinit>(30): HV000001: Hibernate Validator 5.3.2.Final | |
INFO 2017-12-06 17:43:47,244 [main] com.mesosphere.sdk.offer.evaluate.placement.MarathonConstraintParser:parseRow(122): Marathon-style row '[hostname, UNIQUE]' resulted in placement rule: 'MaxPerHostnameRule{max=1, task-filter=RegexMatcher{pattern='etcd-.*'}}' | |
INFO 2017-12-06 17:43:48,126 [main] com.mesosphere.sdk.offer.evaluate.placement.MarathonConstraintParser:parseRow(122): Marathon-style row '[hostname, UNIQUE]' resulted in placement rule: 'MaxPerHostnameRule{max=1, task-filter=RegexMatcher{pattern='kube-apiserver-.*'}}' | |
INFO 2017-12-06 17:43:48,536 [main] com.mesosphere.sdk.offer.evaluate.placement.MarathonConstraintParser:parseRow(122): Marathon-style row '[hostname, UNIQUE]' resulted in placement rule: 'MaxPerHostnameRule{max=1, task-filter=RegexMatcher{pattern='kube-controller-manager-.*'}}' | |
INFO 2017-12-06 17:43:48,939 [main] com.mesosphere.sdk.offer.evaluate.placement.MarathonConstraintParser:parseRow(122): Marathon-style row '[hostname, UNIQUE]' resulted in placement rule: 'MaxPerHostnameRule{max=1, task-filter=RegexMatcher{pattern='kube-scheduler-.*'}}' | |
INFO 2017-12-06 17:43:49,924 [main] com.mesosphere.sdk.offer.evaluate.placement.MarathonConstraintParser:parseRow(122): Marathon-style row '[hostname, UNIQUE]' resulted in placement rule: 'MaxPerHostnameRule{max=1, task-filter=RegexMatcher{pattern='kube-node-.*'}}' | |
INFO 2017-12-06 17:43:51,044 [main] org.apache.curator.framework.imps.CuratorFrameworkImpl:start(234): Starting | |
INFO 2017-12-06 17:43:51,128 [main] org.apache.zookeeper.ZooKeeper:logEnv(100): Client environment:zookeeper.version=3.4.6-1569965, built on 02/20/2014 09:09 GMT | |
INFO 2017-12-06 17:43:51,128 [main] org.apache.zookeeper.ZooKeeper:logEnv(100): Client environment:host.name=vm-kb5q.c.massive-bliss-781.internal | |
INFO 2017-12-06 17:43:51,129 [main] org.apache.zookeeper.ZooKeeper:logEnv(100): Client environment:java.version=1.8.0_144 | |
INFO 2017-12-06 17:43:51,129 [main] org.apache.zookeeper.ZooKeeper:logEnv(100): Client environment:java.vendor=Oracle Corporation | |
INFO 2017-12-06 17:43:51,129 [main] org.apache.zookeeper.ZooKeeper:logEnv(100): Client environment:java.home=/var/lib/mesos/slave/slaves/66df60ae-ce98-4ce6-968a-f99922382ef3-S0/frameworks/66df60ae-ce98-4ce6-968a-f99922382ef3-0001/executors/kubernetes.eb803f00-daac-11e7-af0a-f66634909198/runs/6f3605e0-9e27-4133-a2af-bbbe3706877b/jre1.8.0_144 | |
INFO 2017-12-06 17:43:51,129 [main] org.apache.zookeeper.ZooKeeper:logEnv(100): Client environment:java.class.path=/var/lib/mesos/slave/slaves/66df60ae-ce98-4ce6-968a-f99922382ef3-S0/frameworks/66df60ae-ce98-4ce6-968a-f99922382ef3-0001/executors/kubernetes.eb803f00-daac-11e7-af0a-f66634909198/runs/6f3605e0-9e27-4133-a2af-bbbe3706877b/kubernetes-scheduler/lib/kubernetes.jar:/var/lib/mesos/slave/slaves/66df60ae-ce98-4ce6-968a-f99922382ef3-S0/frameworks/66df60ae-ce98-4ce6-968a-f99922382ef3-0001/executors/kubernetes.eb803f00-daac-11e7-af0a-f66634909198/runs/6f3605e0-9e27-4133-a2af-bbbe3706877b/kubernetes-scheduler/lib/scheduler-master.jar:/var/lib/mesos/slave/slaves/66df60ae-ce98-4ce6-968a-f99922382ef3-S0/frameworks/66df60ae-ce98-4ce6-968a-f99922382ef3-0001/executors/kubernetes.eb803f00-daac-11e7-af0a-f66634909198/runs/6f3605e0-9e27-4133-a2af-bbbe3706877b/kubernetes-scheduler/lib/executor-master.jar:/var/lib/mesos/slave/slaves/66df60ae-ce98-4ce6-968a-f99922382ef3-S0/frameworks/66df60ae-ce98-4ce6-968a-f99922382ef3-0001/executors/kubernetes.eb803f00-daac-11e7-af0a-f66634909198/runs/6f3605e0-9e27-4133-a2af-bbbe3706877b/kubernetes-scheduler/lib/mesos-1.4.0-rc1.jar:/var/lib/mesos/slave/slaves/66df60ae-ce98-4ce6-968a-f99922382ef3-S0/frameworks/66df60ae-ce98-4ce6-968a-f99922382ef3-0001/executors/kubernetes.eb803f00-daac-11e7-af0a-f66634909198/runs/6f3605e0-9e27-4133-a2af-bbbe3706877b/kubernetes-scheduler/lib/common-master.jar:/var/lib/mesos/slave/slaves/66df60ae-ce98-4ce6-968a-f99922382ef3-S0/frameworks/66df60ae-ce98-4ce6-968a-f99922382ef3-0001/executors/kubernetes.eb803f00-daac-11e7-af0a-f66634909198/runs/6f3605e0-9e27-4133-a2af-bbbe3706877b/kubernetes-scheduler/lib/jackson-datatype-guava-2.6.3.jar:/var/lib/mesos/slave/slaves/66df60ae-ce98-4ce6-968a-f99922382ef3-S0/frameworks/66df60ae-ce98-4ce6-968a-f99922382ef3-0001/executors/kubernetes.eb803f00-daac-11e7-af0a-f66634909198/runs/6f3605e0-9e27-4133-a2af-bbbe3706877b/kubernetes-scheduler/lib/jackson-datatype-jdk8-2.6.3.jar:/var/lib/mesos/slave/slaves/66df60ae-ce98-4ce6-968a-f99922382ef3-S0/frameworks/66df60ae-ce98-4ce6-968a-f99922382ef3-0001/executors/kubernetes.eb803f00-daac-11e7-af0a-f66634909198/runs/6f3605e0-9e27-4133-a2af-bbbe3706877b/kubernetes-scheduler/lib/jackson-datatype-jsr310-2.6.3.jar:/var/lib/mesos/slave/slaves/66df60ae-ce98-4ce6-968a-f99922382ef3-S0/frameworks/66df60ae-ce98-4ce6-968a-f99922382ef3-0001/executors/kubernetes.eb803f00-daac-11e7-af0a-f66634909198/runs/6f3605e0-9e27-4133-a2af-bbbe3706877b/kubernetes-scheduler/lib/jackson-dataformat-yaml-2.6.3.jar:/var/lib/mesos/slave/slaves/66df60ae-ce98-4ce6-968a-f99922382ef3-S0/frameworks/66df60ae-ce98-4ce6-968a-f99922382ef3-0001/executors/kubernetes.eb803f00-daac-11e7-af0a-f66634909198/runs/6f3605e0-9e27-4133-a2af-bbbe3706877b/kubernetes-scheduler/lib/jackson-datatype-protobuf-0.9.3.jar:/var/lib/mesos/slave/slaves/66df60ae-ce98-4ce6-968a-f99922382ef3-S0/frameworks/66df60ae-ce98-4ce6-968a-f99922382ef3-0001/executors/kubernetes.eb803f00-daac-11e7-af0a-f66634909198/runs/6f3605e0-9e27-4133-a2af-bbbe3706877b/kubernetes-scheduler/lib/annotations-3.0.1u2.jar:/var/lib/mesos/slave/slaves/66df60ae-ce98-4ce6-968a-f99922382ef3-S0/frameworks/66df60ae-ce98-4ce6-968a-f99922382ef3-0001/executors/kubernetes.eb803f00-daac-11e7-af0a-f66634909198/runs/6f3605e0-9e27-4133-a2af-bbbe3706877b/kubernetes-scheduler/lib/commons-collections-3.2.2.jar:/var/lib/mesos/slave/slaves/66df60ae-ce98-4ce6-968a-f99922382ef3-S0/frameworks/66df60ae-ce98-4ce6-968a-f99922382ef3-0001/executors/kubernetes.eb803f00-daac-11e7-af0a-f66634909198/runs/6f3605e0-9e27-4133-a2af-bbbe3706877b/kubernetes-scheduler/lib/commons-io-2.4.jar:/var/lib/mesos/slave/slaves/66df60ae-ce98-4ce6-968a-f99922382ef3-S0/frameworks/66df60ae-ce98-4ce6-968a-f99922382ef3-0001/executors/kubernetes.eb803f00-daac-11e7-af0a-f66634909198/runs/6f3605e0-9e27-4133-a2af-bbbe3706877b/kubernetes-scheduler/lib/antlr4-runtime-4.5.1-1.jar:/var/lib/mesos/slave/slaves/66df60ae-ce98-4ce6-968a-f99922382ef3-S0/frameworks/66df60ae-ce98-4ce6-968a-f99922382ef3-0001/executors/kubernetes.eb803f00-daac-11e7-af0a-f66634909198/runs/6f3605e0-9e27-4133-a2af-bbbe3706877b/kubernetes-scheduler/lib/javax.ws.rs-api-2.0.1.jar:/var/lib/mesos/slave/slaves/66df60ae-ce98-4ce6-968a-f99922382ef3-S0/frameworks/66df60ae-ce98-4ce6-968a-f99922382ef3-0001/executors/kubernetes.eb803f00-daac-11e7-af0a-f66634909198/runs/6f3605e0-9e27-4133-a2af-bbbe3706877b/kubernetes-scheduler/lib/curator-framework-2.9.1.jar:/var/lib/mesos/slave/slaves/66df60ae-ce98-4ce6-968a-f99922382ef3-S0/frameworks/66df60ae-ce98-4ce6-968a-f99922382ef3-0001/executors/kubernetes.eb803f00-daac-11e7-af0a-f66634909198/runs/6f3605e0-9e27-4133-a2af-bbbe3706877b/kubernetes-scheduler/lib/curator-recipes-2.9.1.jar:/var/lib/mesos/slave/slaves/66df60ae-ce98-4ce6-968a-f99922382ef3-S0/frameworks/66df60ae-ce98-4ce6-968a-f99922382ef3-0001/executors/kubernetes.eb803f00-daac-11e7-af0a-f66634909198/runs/6f3605e0-9e27-4133-a2af-bbbe3706877b/kubernetes-scheduler/lib/curator-test-2.9.1.jar:/var/lib/mesos/slave/slaves/66df60ae-ce98-4ce6-968a-f99922382ef3-S0/frameworks/66df60ae-ce98-4ce6-968a-f99922382ef3-0001/executors/kubernetes.eb803f00-daac-11e7-af0a-f66634909198/runs/6f3605e0-9e27-4133-a2af-bbbe3706877b/kubernetes-scheduler/lib/httpclient-4.5.2.jar:/var/lib/mesos/slave/slaves/66df60ae-ce98-4ce6-968a-f99922382ef3-S0/frameworks/66df60ae-ce98-4ce6-968a-f99922382ef3-0001/executors/kubernetes.eb803f00-daac-11e7-af0a-f66634909198/runs/6f3605e0-9e27-4133-a2af-bbbe3706877b/kubernetes-scheduler/lib/fluent-hc-4.5.2.jar:/var/lib/mesos/slave/slaves/66df60ae-ce98-4ce6-968a-f99922382ef3-S0/frameworks/66df60ae-ce98-4ce6-968a-f99922382ef3-0001/executors/kubernetes.eb803f00-daac-11e7-af0a-f66634909198/runs/6f3605e0-9e27-4133-a2af-bbbe3706877b/kubernetes-scheduler/lib/commons-lang3-3.4.jar:/var/lib/mesos/slave/slaves/66df60ae-ce98-4ce6-968a-f99922382ef3-S0/frameworks/66df60ae-ce98-4ce6-968a-f99922382ef3-0001/executors/kubernetes.eb803f00-daac-11e7-af0a-f66634909198/runs/6f3605e0-9e27-4133-a2af-bbbe3706877b/kubernetes-scheduler/lib/protobuf-java-format-1.4.jar:/var/lib/mesos/slave/slaves/66df60ae-ce98-4ce6-968a-f99922382ef3-S0/frameworks/66df60ae-ce98-4ce6-968a-f99922382ef3-0001/executors/kubernetes.eb803f00-daac-11e7-af0a-f66634909198/runs/6f3605e0-9e27-4133-a2af-bbbe3706877b/kubernetes-scheduler/lib/json-20160212.jar:/var/lib/mesos/slave/slaves/66df60ae-ce98-4ce6-968a-f99922382ef3-S0/frameworks/66df60ae-ce98-4ce6-968a-f99922382ef3-0001/executors/kubernetes.eb803f00-daac-11e7-af0a-f66634909198/runs/6f3605e0-9e27-4133-a2af-bbbe3706877b/kubernetes-scheduler/lib/diffutils-1.3.0.jar:/var/lib/mesos/slave/slaves/66df60ae-ce98-4ce6-968a-f99922382ef3-S0/frameworks/66df60ae-ce98-4ce6-968a-f99922382ef3-0001/executors/kubernetes.eb803f00-daac-11e7-af0a-f66634909198/runs/6f3605e0-9e27-4133-a2af-bbbe3706877b/kubernetes-scheduler/lib/jersey-container-jetty-http-2.23.jar:/var/lib/mesos/slave/slaves/66df60ae-ce98-4ce6-968a-f99922382ef3-S0/frameworks/66df60ae-ce98-4ce6-968a-f99922382ef3-0001/executors/kubernetes.eb803f00-daac-11e7-af0a-f66634909198/runs/6f3605e0-9e27-4133-a2af-bbbe3706877b/kubernetes-scheduler/lib/jersey-container-servlet-core-2.23.jar:/var/lib/mesos/slave/slaves/66df60ae-ce98-4ce6-968a-f99922382ef3-S0/frameworks/66df60ae-ce98-4ce6-968a-f99922382ef3-0001/executors/kubernetes.eb803f00-daac-11e7-af0a-f66634909198/runs/6f3605e0-9e27-4133-a2af-bbbe3706877b/kubernetes-scheduler/lib/jersey-media-json-jackson-2.23.jar:/var/lib/mesos/slave/slaves/66df60ae-ce98-4ce6-968a-f99922382ef3-S0/frameworks/66df60ae-ce98-4ce6-968a-f99922382ef3-0001/executors/kubernetes.eb803f00-daac-11e7-af0a-f66634909198/runs/6f3605e0-9e27-4133-a2af-bbbe3706877b/kubernetes-scheduler/lib/jersey-media-multipart-2.23.jar:/var/lib/mesos/slave/slaves/66df60ae-ce98-4ce6-968a-f99922382ef3-S0/frameworks/66df60ae-ce98-4ce6-968a-f99922382ef3-0001/executors/kubernetes.eb803f00-daac-11e7-af0a-f66634909198/runs/6f3605e0-9e27-4133-a2af-bbbe3706877b/kubernetes-scheduler/lib/mimepull-1.9.6.jar:/var/lib/mesos/slave/slaves/66df60ae-ce98-4ce6-968a-f99922382ef3-S0/frameworks/66df60ae-ce98-4ce6-968a-f99922382ef3-0001/executors/kubernetes.eb803f00-daac-11e7-af0a-f66634909198/runs/6f3605e0-9e27-4133-a2af-bbbe3706877b/kubernetes-scheduler/lib/jetty-servlet-9.2.3.v20140905.jar:/var/lib/mesos/slave/slaves/66df60ae-ce98-4ce6-968a-f99922382ef3-S0/frameworks/66df60ae-ce98-4ce6-968a-f99922382ef3-0001/executors/kubernetes.eb803f00-daac-11e7-af0a-f66634909198/runs/6f3605e0-9e27-4133-a2af-bbbe3706877b/kubernetes-scheduler/lib/hibernate-validator-5.3.2.Final.jar:/var/lib/mesos/slave/slaves/66df60ae-ce98-4ce6-968a-f99922382ef3-S0/frameworks/66df60ae-ce98-4ce6-968a-f99922382ef3-0001/executors/kubernetes.eb803f00-daac-11e7-af0a-f66634909198/runs/6f3605e0-9e27-4133-a2af-bbbe3706877b/kubernetes-scheduler/lib/javax.el-api-2.2.4.jar:/var/lib/mesos/slave/slaves/66df60ae-ce98-4ce6-968a-f99922382ef3-S0/frameworks/66df60ae-ce98-4ce6-968a-f99922382ef3-0001/executors/kubernetes.eb803f00-daac-11e7-af0a-f66634909198/runs/6f3605e0-9e27-4133-a2af-bbbe3706877b/kubernetes-scheduler/lib/javax.el-2.2.4.jar:/var/lib/mesos/slave/slaves/66df60ae-ce98-4ce6-968a-f99922382ef3-S0/frameworks/66df60ae-ce98-4ce6-968a-f99922382ef3-0001/executors/kubernetes.eb803f00-daac-11e7-af0a-f66634909198/runs/6f3605e0-9e27-4133-a2af-bbbe3706877b/kubernetes-scheduler/lib/mesos-http-adapter-0.4.1.jar:/var/lib/mesos/slave/slaves/66df60ae-ce98-4ce6-968a-f99922382ef3-S0/frameworks/66df60ae-ce98-4ce6-968a-f99922382ef3-0001/executors/kubernetes.eb803f00-daac-11e7-af0a-f66634909198/runs/6f3605e0-9e27-4133-a2af-bbbe3706877b/kubernetes-scheduler/lib/java-jwt-3.2.0.jar:/var/lib/mesos/slave/slaves/66df60ae-ce98-4ce6-968a-f99922382ef3-S0/frameworks/66df60ae-ce98-4ce6-968a-f99922382ef3-0001/executors/kubernetes.eb803f00-daac-11e7-af0a-f66634909198/runs/6f3605e0-9e27-4133-a2af-bbbe3706877b/kubernetes-scheduler/lib/bcprov-jdk15on-1.57.jar:/var/lib/mesos/slave/slaves/66df60ae-ce98-4ce6-968a-f99922382ef3-S0/frameworks/66df60ae-ce98-4ce6-968a-f99922382ef3-0001/executors/kubernetes.eb803f00-daac-11e7-af0a-f66634909198/runs/6f3605e0-9e27-4133-a2af-bbbe3706877b/kubernetes-scheduler/lib/bcpkix-jdk15on-1.57.jar:/var/lib/mesos/slave/slaves/66df60ae-ce98-4ce6-968a-f99922382ef3-S0/frameworks/66df60ae-ce98-4ce6-968a-f99922382ef3-0001/executors/kubernetes.eb803f00-daac-11e7-af0a-f66634909198/runs/6f3605e0-9e27-4133-a2af-bbbe3706877b/kubernetes-scheduler/lib/metrics-core-3.2.5.jar:/var/lib/mesos/slave/slaves/66df60ae-ce98-4ce6-968a-f99922382ef3-S0/frameworks/66df60ae-ce98-4ce6-968a-f99922382ef3-0001/executors/kubernetes.eb803f00-daac-11e7-af0a-f66634909198/runs/6f3605e0-9e27-4133-a2af-bbbe3706877b/kubernetes-scheduler/lib/metrics-servlet-3.2.5.jar:/var/lib/mesos/slave/slaves/66df60ae-ce98-4ce6-968a-f99922382ef3-S0/frameworks/66df60ae-ce98-4ce6-968a-f99922382ef3-0001/executors/kubernetes.eb803f00-daac-11e7-af0a-f66634909198/runs/6f3605e0-9e27-4133-a2af-bbbe3706877b/kubernetes-scheduler/lib/metrics-servlets-3.2.5.jar:/var/lib/mesos/slave/slaves/66df60ae-ce98-4ce6-968a-f99922382ef3-S0/frameworks/66df60ae-ce98-4ce6-968a-f99922382ef3-0001/executors/kubernetes.eb803f00-daac-11e7-af0a-f66634909198/runs/6f3605e0-9e27-4133-a2af-bbbe3706877b/kubernetes-scheduler/lib/simpleclient_dropwizard-0.0.26.jar:/var/lib/mesos/slave/slaves/66df60ae-ce98-4ce6-968a-f99922382ef3-S0/frameworks/66df60ae-ce98-4ce6-968a-f99922382ef3-0001/executors/kubernetes.eb803f00-daac-11e7-af0a-f66634909198/runs/6f3605e0-9e27-4133-a2af-bbbe3706877b/kubernetes-scheduler/lib/simpleclient_servlet-0.0.26.jar:/var/lib/mesos/slave/slaves/66df60ae-ce98-4ce6-968a-f99922382ef3-S0/frameworks/66df60ae-ce98-4ce6-968a-f99922382ef3-0001/executors/kubernetes.eb803f00-daac-11e7-af0a-f66634909198/runs/6f3605e0-9e27-4133-a2af-bbbe3706877b/kubernetes-scheduler/lib/metrics3-statsd-4.2.0.jar:/var/lib/mesos/slave/slaves/66df60ae-ce98-4ce6-968a-f99922382ef3-S0/frameworks/66df60ae-ce98-4ce6-968a-f99922382ef3-0001/executors/kubernetes.eb803f00-daac-11e7-af0a-f66634909198/runs/6f3605e0-9e27-4133-a2af-bbbe3706877b/kubernetes-scheduler/lib/slf4j-api-1.7.25.jar:/var/lib/mesos/slave/slaves/66df60ae-ce98-4ce6-968a-f99922382ef3-S0/frameworks/66df60ae-ce98-4ce6-968a-f99922382ef3-0001/executors/kubernetes.eb803f00-daac-11e7-af0a-f66634909198/runs/6f3605e0-9e27-4133-a2af-bbbe3706877b/kubernetes-scheduler/lib/log4j-core-2.8.1.jar:/var/lib/mesos/slave/slaves/66df60ae-ce98-4ce6-968a-f99922382ef3-S0/frameworks/66df60ae-ce98-4ce6-968a-f99922382ef3-0001/executors/kubernetes.eb803f00-daac-11e7-af0a-f66634909198/runs/6f3605e0-9e27-4133-a2af-bbbe3706877b/kubernetes-scheduler/lib/log4j-slf4j-impl-2.8.1.jar:/var/lib/mesos/slave/slaves/66df60ae-ce98-4ce6-968a-f99922382ef3-S0/frameworks/66df60ae-ce98-4ce6-968a-f99922382ef3-0001/executors/kubernetes.eb803f00-daac-11e7-af0a-f66634909198/runs/6f3605e0-9e27-4133-a2af-bbbe3706877b/kubernetes-scheduler/lib/compiler-0.9.2.jar:/var/lib/mesos/slave/slaves/66df60ae-ce98-4ce6-968a-f99922382ef3-S0/frameworks/66df60ae-ce98-4ce6-968a-f99922382ef3-0001/executors/kubernetes.eb803f00-daac-11e7-af0a-f66634909198/runs/6f3605e0-9e27-4133-a2af-bbbe3706877b/kubernetes-scheduler/lib/commons-codec-1.10.jar:/var/lib/mesos/slave/slaves/66df60ae-ce98-4ce6-968a-f99922382ef3-S0/frameworks/66df60ae-ce98-4ce6-968a-f99922382ef3-0001/executors/kubernetes.eb803f00-daac-11e7-af0a-f66634909198/runs/6f3605e0-9e27-4133-a2af-bbbe3706877b/kubernetes-scheduler/lib/snakeyaml-1.15.jar:/var/lib/mesos/slave/slaves/66df60ae-ce98-4ce6-968a-f99922382ef3-S0/frameworks/66df60ae-ce98-4ce6-968a-f99922382ef3-0001/executors/kubernetes.eb803f00-daac-11e7-af0a-f66634909198/runs/6f3605e0-9e27-4133-a2af-bbbe3706877b/kubernetes-scheduler/lib/jackson-annotations-2.6.0.jar:/var/lib/mesos/slave/slaves/66df60ae-ce98-4ce6-968a-f99922382ef3-S0/frameworks/66df60ae-ce98-4ce6-968a-f99922382ef3-0001/executors/kubernetes.eb803f00-daac-11e7-af0a-f66634909198/runs/6f3605e0-9e27-4133-a2af-bbbe3706877b/kubernetes-scheduler/lib/curator-client-2.9.1.jar:/var/lib/mesos/slave/slaves/66df60ae-ce98-4ce6-968a-f99922382ef3-S0/frameworks/66df60ae-ce98-4ce6-968a-f99922382ef3-0001/executors/kubernetes.eb803f00-daac-11e7-af0a-f66634909198/runs/6f3605e0-9e27-4133-a2af-bbbe3706877b/kubernetes-scheduler/lib/zookeeper-3.4.6.jar:/var/lib/mesos/slave/slaves/66df60ae-ce98-4ce6-968a-f99922382ef3-S0/frameworks/66df60ae-ce98-4ce6-968a-f99922382ef3-0001/executors/kubernetes.eb803f00-daac-11e7-af0a-f66634909198/runs/6f3605e0-9e27-4133-a2af-bbbe3706877b/kubernetes-scheduler/lib/javassist-3.18.1-GA.jar:/var/lib/mesos/slave/slaves/66df60ae-ce98-4ce6-968a-f99922382ef3-S0/frameworks/66df60ae-ce98-4ce6-968a-f99922382ef3-0001/executors/kubernetes.eb803f00-daac-11e7-af0a-f66634909198/runs/6f3605e0-9e27-4133-a2af-bbbe3706877b/kubernetes-scheduler/lib/commons-math-2.2.jar:/var/lib/mesos/slave/slaves/66df60ae-ce98-4ce6-968a-f99922382ef3-S0/frameworks/66df60ae-ce98-4ce6-968a-f99922382ef3-0001/executors/kubernetes.eb803f00-daac-11e7-af0a-f66634909198/runs/6f3605e0-9e27-4133-a2af-bbbe3706877b/kubernetes-scheduler/lib/httpcore-4.4.4.jar:/var/lib/mesos/slave/slaves/66df60ae-ce98-4ce6-968a-f99922382ef3-S0/frameworks/66df60ae-ce98-4ce6-968a-f99922382ef3-0001/executors/kubernetes.eb803f00-daac-11e7-af0a-f66634909198/runs/6f3605e0-9e27-4133-a2af-bbbe3706877b/kubernetes-scheduler/lib/commons-logging-1.2.jar:/var/lib/mesos/slave/slaves/66df60ae-ce98-4ce6-968a-f99922382ef3-S0/frameworks/66df60ae-ce98-4ce6-968a-f99922382ef3-0001/executors/kubernetes.eb803f00-daac-11e7-af0a-f66634909198/runs/6f3605e0-9e27-4133-a2af-bbbe3706877b/kubernetes-scheduler/lib/javax.inject-2.4.0-b34.jar:/var/lib/mesos/slave/slaves/66df60ae-ce98-4ce6-968a-f99922382ef3-S0/frameworks/66df60ae-ce98-4ce6-968a-f99922382ef3-0001/executors/kubernetes.eb803f00-daac-11e7-af0a-f66634909198/runs/6f3605e0-9e27-4133-a2af-bbbe3706877b/kubernetes-scheduler/lib/jetty-continuation-9.1.1.v20140108.jar:/var/lib/mesos/slave/slaves/66df60ae-ce98-4ce6-968a-f99922382ef3-S0/frameworks/66df60ae-ce98-4ce6-968a-f99922382ef3-0001/executors/kubernetes.eb803f00-daac-11e7-af0a-f66634909198/runs/6f3605e0-9e27-4133-a2af-bbbe3706877b/kubernetes-scheduler/lib/jersey-common-2.23.jar:/var/lib/mesos/slave/slaves/66df60ae-ce98-4ce6-968a-f99922382ef3-S0/frameworks/66df60ae-ce98-4ce6-968a-f99922382ef3-0001/executors/kubernetes.eb803f00-daac-11e7-af0a-f66634909198/runs/6f3605e0-9e27-4133-a2af-bbbe3706877b/kubernetes-scheduler/lib/jersey-server-2.23.jar:/var/lib/mesos/slave/slaves/66df60ae-ce98-4ce6-968a-f99922382ef3-S0/frameworks/66df60ae-ce98-4ce6-968a-f99922382ef3-0001/executors/kubernetes.eb803f00-daac-11e7-af0a-f66634909198/runs/6f3605e0-9e27-4133-a2af-bbbe3706877b/kubernetes-scheduler/lib/jersey-entity-filtering-2.23.jar:/var/lib/mesos/slave/slaves/66df60ae-ce98-4ce6-968a-f99922382ef3-S0/frameworks/66df60ae-ce98-4ce6-968a-f99922382ef3-0001/executors/kubernetes.eb803f00-daac-11e7-af0a-f66634909198/runs/6f3605e0-9e27-4133-a2af-bbbe3706877b/kubernetes-scheduler/lib/jackson-jaxrs-base-2.5.4.jar:/var/lib/mesos/slave/slaves/66df60ae-ce98-4ce6-968a-f99922382ef3-S0/frameworks/66df60ae-ce98-4ce6-968a-f99922382ef3-0001/executors/kubernetes.eb803f00-daac-11e7-af0a-f66634909198/runs/6f3605e0-9e27-4133-a2af-bbbe3706877b/kubernetes-scheduler/lib/jackson-jaxrs-json-provider-2.5.4.jar:/var/lib/mesos/slave/slaves/66df60ae-ce98-4ce6-968a-f99922382ef3-S0/frameworks/66df60ae-ce98-4ce6-968a-f99922382ef3-0001/executors/kubernetes.eb803f00-daac-11e7-af0a-f66634909198/runs/6f3605e0-9e27-4133-a2af-bbbe3706877b/kubernetes-scheduler/lib/jetty-security-9.2.3.v20140905.jar:/var/lib/mesos/slave/slaves/66df60ae-ce98-4ce6-968a-f99922382ef3-S0/frameworks/66df60ae-ce98-4ce6-968a-f99922382ef3-0001/executors/kubernetes.eb803f00-daac-11e7-af0a-f66634909198/runs/6f3605e0-9e27-4133-a2af-bbbe3706877b/kubernetes-scheduler/lib/validation-api-1.1.0.Final.jar:/var/lib/mesos/slave/slaves/66df60ae-ce98-4ce6-968a-f99922382ef3-S0/frameworks/66df60ae-ce98-4ce6-968a-f99922382ef3-0001/executors/kubernetes.eb803f00-daac-11e7-af0a-f66634909198/runs/6f3605e0-9e27-4133-a2af-bbbe3706877b/kubernetes-scheduler/lib/jboss-logging-3.3.0.Final.jar:/var/lib/mesos/slave/slaves/66df60ae-ce98-4ce6-968a-f99922382ef3-S0/frameworks/66df60ae-ce98-4ce6-968a-f99922382ef3-0001/executors/kubernetes.eb803f00-daac-11e7-af0a-f66634909198/runs/6f3605e0-9e27-4133-a2af-bbbe3706877b/kubernetes-scheduler/lib/classmate-1.3.1.jar:/var/lib/mesos/slave/slaves/66df60ae-ce98-4ce6-968a-f99922382ef3-S0/frameworks/66df60ae-ce98-4ce6-968a-f99922382ef3-0001/executors/kubernetes.eb803f00-daac-11e7-af0a-f66634909198/runs/6f3605e0-9e27-4133-a2af-bbbe3706877b/kubernetes-scheduler/lib/edu-umd-cs-findbugs-annotations-1.3.2-201002241900.nbm:/var/lib/mesos/slave/slaves/66df60ae-ce98-4ce6-968a-f99922382ef3-S0/frameworks/66df60ae-ce98-4ce6-968a-f99922382ef3-0001/executors/kubernetes.eb803f00-daac-11e7-af0a-f66634909198/runs/6f3605e0-9e27-4133-a2af-bbbe3706877b/kubernetes-scheduler/lib/log4j-over-slf4j-1.7.10.jar:/var/lib/mesos/slave/slaves/66df60ae-ce98-4ce6-968a-f99922382ef3-S0/frameworks/66df60ae-ce98-4ce6-968a-f99922382ef3-0001/executors/kubernetes.eb803f00-daac-11e7-af0a-f66634909198/runs/6f3605e0-9e27-4133-a2af-bbbe3706877b/kubernetes-scheduler/lib/jcl-over-slf4j-1.7.10.jar:/var/lib/mesos/slave/slaves/66df60ae-ce98-4ce6-968a-f99922382ef3-S0/frameworks/66df60ae-ce98-4ce6-968a-f99922382ef3-0001/executors/kubernetes.eb803f00-daac-11e7-af0a-f66634909198/runs/6f3605e0-9e27-4133-a2af-bbbe3706877b/kubernetes-scheduler/lib/google-http-client-1.20.0.jar:/var/lib/mesos/slave/slaves/66df60ae-ce98-4ce6-968a-f99922382ef3-S0/frameworks/66df60ae-ce98-4ce6-968a-f99922382ef3-0001/executors/kubernetes.eb803f00-daac-11e7-af0a-f66634909198/runs/6f3605e0-9e27-4133-a2af-bbbe3706877b/kubernetes-scheduler/lib/metrics-healthchecks-3.2.5.jar:/var/lib/mesos/slave/slaves/66df60ae-ce98-4ce6-968a-f99922382ef3-S0/frameworks/66df60ae-ce98-4ce6-968a-f99922382ef3-0001/executors/kubernetes.eb803f00-daac-11e7-af0a-f66634909198/runs/6f3605e0-9e27-4133-a2af-bbbe3706877b/kubernetes-scheduler/lib/metrics-json-3.2.5.jar:/var/lib/mesos/slave/slaves/66df60ae-ce98-4ce6-968a-f99922382ef3-S0/frameworks/66df60ae-ce98-4ce6-968a-f99922382ef3-0001/executors/kubernetes.eb803f00-daac-11e7-af0a-f66634909198/runs/6f3605e0-9e27-4133-a2af-bbbe3706877b/kubernetes-scheduler/lib/metrics-jvm-3.2.5.jar:/var/lib/mesos/slave/slaves/66df60ae-ce98-4ce6-968a-f99922382ef3-S0/frameworks/66df60ae-ce98-4ce6-968a-f99922382ef3-0001/executors/kubernetes.eb803f00-daac-11e7-af0a-f66634909198/runs/6f3605e0-9e27-4133-a2af-bbbe3706877b/kubernetes-scheduler/lib/profiler-1.0.2.jar:/var/lib/mesos/slave/slaves/66df60ae-ce98-4ce6-968a-f99922382ef3-S0/frameworks/66df60ae-ce98-4ce6-968a-f99922382ef3-0001/executors/kubernetes.eb803f00-daac-11e7-af0a-f66634909198/runs/6f3605e0-9e27-4133-a2af-bbbe3706877b/kubernetes-scheduler/lib/simpleclient-0.0.26.jar:/var/lib/mesos/slave/slaves/66df60ae-ce98-4ce6-968a-f99922382ef3-S0/frameworks/66df60ae-ce98-4ce6-968a-f99922382ef3-0001/executors/kubernetes.eb803f00-daac-11e7-af0a-f66634909198/runs/6f3605e0-9e27-4133-a2af-bbbe3706877b/kubernetes-scheduler/lib/simpleclient_common-0.0.26.jar:/var/lib/mesos/slave/slaves/66df60ae-ce98-4ce6-968a-f99922382ef3-S0/frameworks/66df60ae-ce98-4ce6-968a-f99922382ef3-0001/executors/kubernetes.eb803f00-daac-11e7-af0a-f66634909198/runs/6f3605e0-9e27-4133-a2af-bbbe3706877b/kubernetes-scheduler/lib/metrics-statsd-common-4.2.0.jar:/var/lib/mesos/slave/slaves/66df60ae-ce98-4ce6-968a-f99922382ef3-S0/frameworks/66df60ae-ce98-4ce6-968a-f99922382ef3-0001/executors/kubernetes.eb803f00-daac-11e7-af0a-f66634909198/runs/6f3605e0-9e27-4133-a2af-bbbe3706877b/kubernetes-scheduler/lib/log4j-api-2.8.1.jar:/var/lib/mesos/slave/slaves/66df60ae-ce98-4ce6-968a-f99922382ef3-S0/frameworks/66df60ae-ce98-4ce6-968a-f99922382ef3-0001/executors/kubernetes.eb803f00-daac-11e7-af0a-f66634909198/runs/6f3605e0-9e27-4133-a2af-bbbe3706877b/kubernetes-scheduler/lib/log4j-1.2.16.jar:/var/lib/mesos/slave/slaves/66df60ae-ce98-4ce6-968a-f99922382ef3-S0/frameworks/66df60ae-ce98-4ce6-968a-f99922382ef3-0001/executors/kubernetes.eb803f00-daac-11e7-af0a-f66634909198/runs/6f3605e0-9e27-4133-a2af-bbbe3706877b/kubernetes-scheduler/lib/jline-0.9.94.jar:/var/lib/mesos/slave/slaves/66df60ae-ce98-4ce6-968a-f99922382ef3-S0/frameworks/66df60ae-ce98-4ce6-968a-f99922382ef3-0001/executors/kubernetes.eb803f00-daac-11e7-af0a-f66634909198/runs/6f3605e0-9e27-4133-a2af-bbbe3706877b/kubernetes-scheduler/lib/netty-3.7.0.Final.jar:/var/lib/mesos/slave/slaves/66df60ae-ce98-4ce6-968a-f99922382ef3-S0/frameworks/66df60ae-ce98-4ce6-968a-f99922382ef3-0001/executors/kubernetes.eb803f00-daac-11e7-af0a-f66634909198/runs/6f3605e0-9e27-4133-a2af-bbbe3706877b/kubernetes-scheduler/lib/javax.servlet-api-3.1.0.jar:/var/lib/mesos/slave/slaves/66df60ae-ce98-4ce6-968a-f99922382ef3-S0/frameworks/66df60ae-ce98-4ce6-968a-f99922382ef3-0001/executors/kubernetes.eb803f00-daac-11e7-af0a-f66634909198/runs/6f3605e0-9e27-4133-a2af-bbbe3706877b/kubernetes-scheduler/lib/javax.annotation-api-1.2.jar:/var/lib/mesos/slave/slaves/66df60ae-ce98-4ce6-968a-f99922382ef3-S0/frameworks/66df60ae-ce98-4ce6-968a-f99922382ef3-0001/executors/kubernetes.eb803f00-daac-11e7-af0a-f66634909198/runs/6f3605e0-9e27-4133-a2af-bbbe3706877b/kubernetes-scheduler/lib/jersey-guava-2.23.jar:/var/lib/mesos/slave/slaves/66df60ae-ce98-4ce6-968a-f99922382ef3-S0/frameworks/66df60ae-ce98-4ce6-968a-f99922382ef3-0001/executors/kubernetes.eb803f00-daac-11e7-af0a-f66634909198/runs/6f3605e0-9e27-4133-a2af-bbbe3706877b/kubernetes-scheduler/lib/hk2-api-2.4.0-b34.jar:/var/lib/mesos/slave/slaves/66df60ae-ce98-4ce6-968a-f99922382ef3-S0/frameworks/66df60ae-ce98-4ce6-968a-f99922382ef3-0001/executors/kubernetes.eb803f00-daac-11e7-af0a-f66634909198/runs/6f3605e0-9e27-4133-a2af-bbbe3706877b/kubernetes-scheduler/lib/hk2-locator-2.4.0-b34.jar:/var/lib/mesos/slave/slaves/66df60ae-ce98-4ce6-968a-f99922382ef3-S0/frameworks/66df60ae-ce98-4ce6-968a-f99922382ef3-0001/executors/kubernetes.eb803f00-daac-11e7-af0a-f66634909198/runs/6f3605e0-9e27-4133-a2af-bbbe3706877b/kubernetes-scheduler/lib/osgi-resource-locator-1.0.1.jar:/var/lib/mesos/slave/slaves/66df60ae-ce98-4ce6-968a-f99922382ef3-S0/frameworks/66df60ae-ce98-4ce6-968a-f99922382ef3-0001/executors/kubernetes.eb803f00-daac-11e7-af0a-f66634909198/runs/6f3605e0-9e27-4133-a2af-bbbe3706877b/kubernetes-scheduler/lib/jersey-client-2.23.jar:/var/lib/mesos/slave/slaves/66df60ae-ce98-4ce6-968a-f99922382ef3-S0/frameworks/66df60ae-ce98-4ce6-968a-f99922382ef3-0001/executors/kubernetes.eb803f00-daac-11e7-af0a-f66634909198/runs/6f3605e0-9e27-4133-a2af-bbbe3706877b/kubernetes-scheduler/lib/jersey-media-jaxb-2.23.jar:/var/lib/mesos/slave/slaves/66df60ae-ce98-4ce6-968a-f99922382ef3-S0/frameworks/66df60ae-ce98-4ce6-968a-f99922382ef3-0001/executors/kubernetes.eb803f00-daac-11e7-af0a-f66634909198/runs/6f3605e0-9e27-4133-a2af-bbbe3706877b/kubernetes-scheduler/lib/jackson-module-jaxb-annotations-2.5.4.jar:/var/lib/mesos/slave/slaves/66df60ae-ce98-4ce6-968a-f99922382ef3-S0/frameworks/66df60ae-ce98-4ce6-968a-f99922382ef3-0001/executors/kubernetes.eb803f00-daac-11e7-af0a-f66634909198/runs/6f3605e0-9e27-4133-a2af-bbbe3706877b/kubernetes-scheduler/lib/annotations-1.3.2.jar:/var/lib/mesos/slave/slaves/66df60ae-ce98-4ce6-968a-f99922382ef3-S0/frameworks/66df60ae-ce98-4ce6-968a-f99922382ef3-0001/executors/kubernetes.eb803f00-daac-11e7-af0a-f66634909198/runs/6f3605e0-9e27-4133-a2af-bbbe3706877b/kubernetes-scheduler/lib/joda-time-2.9.1.jar:/var/lib/mesos/slave/slaves/66df60ae-ce98-4ce6-968a-f99922382ef3-S0/frameworks/66df60ae-ce98-4ce6-968a-f99922382ef3-0001/executors/kubernetes.eb803f00-daac-11e7-af0a-f66634909198/runs/6f3605e0-9e27-4133-a2af-bbbe3706877b/kubernetes-scheduler/lib/hk2-utils-2.4.0-b34.jar:/var/lib/mesos/slave/slaves/66df60ae-ce98-4ce6-968a-f99922382ef3-S0/frameworks/66df60ae-ce98-4ce6-968a-f99922382ef3-0001/executors/kubernetes.eb803f00-daac-11e7-af0a-f66634909198/runs/6f3605e0-9e27-4133-a2af-bbbe3706877b/kubernetes-scheduler/lib/aopalliance-repackaged-2.4.0-b34.jar:/var/lib/mesos/slave/slaves/66df60ae-ce98-4ce6-968a-f99922382ef3-S0/frameworks/66df60ae-ce98-4ce6-968a-f99922382ef3-0001/executors/kubernetes.eb803f00-daac-11e7-af0a-f66634909198/runs/6f3605e0-9e27-4133-a2af-bbbe3706877b/kubernetes-scheduler/lib/javax.inject-1.jar:/var/lib/mesos/slave/slaves/66df60ae-ce98-4ce6-968a-f99922382ef3-S0/frameworks/66df60ae-ce98-4ce6-968a-f99922382ef3-0001/executors/kubernetes.eb803f00-daac-11e7-af0a-f66634909198/runs/6f3605e0-9e27-4133-a2af-bbbe3706877b/kubernetes-scheduler/lib/protobuf-java-3.3.1.jar:/var/lib/mesos/slave/slaves/66df60ae-ce98-4ce6-968a-f99922382ef3-S0/frameworks/66df60ae-ce98-4ce6-968a-f99922382ef3-0001/executors/kubernetes.eb803f00-daac-11e7-af0a-f66634909198/runs/6f3605e0-9e27-4133-a2af-bbbe3706877b/kubernetes-scheduler/lib/jackson-databind-2.6.6.jar:/var/lib/mesos/slave/slaves/66df60ae-ce98-4ce6-968a-f99922382ef3-S0/frameworks/66df60ae-ce98-4ce6-968a-f99922382ef3-0001/executors/kubernetes.eb803f00-daac-11e7-af0a-f66634909198/runs/6f3605e0-9e27-4133-a2af-bbbe3706877b/kubernetes-scheduler/lib/guava-18.0.jar:/var/lib/mesos/slave/slaves/66df60ae-ce98-4ce6-968a-f99922382ef3-S0/frameworks/66df60ae-ce98-4ce6-968a-f99922382ef3-0001/executors/kubernetes.eb803f00-daac-11e7-af0a-f66634909198/runs/6f3605e0-9e27-4133-a2af-bbbe3706877b/kubernetes-scheduler/lib/jcip-annotations-1.0.jar:/var/lib/mesos/slave/slaves/66df60ae-ce98-4ce6-968a-f99922382ef3-S0/frameworks/66df60ae-ce98-4ce6-968a-f99922382ef3-0001/executors/kubernetes.eb803f00-daac-11e7-af0a-f66634909198/runs/6f3605e0-9e27-4133-a2af-bbbe3706877b/kubernetes-scheduler/lib/jetty-server-9.2.3.v20140905.jar:/var/lib/mesos/slave/slaves/66df60ae-ce98-4ce6-968a-f99922382ef3-S0/frameworks/66df60ae-ce98-4ce6-968a-f99922382ef3-0001/executors/kubernetes.eb803f00-daac-11e7-af0a-f66634909198/runs/6f3605e0-9e27-4133-a2af-bbbe3706877b/kubernetes-scheduler/lib/jackson-core-2.6.6.jar:/var/lib/mesos/slave/slaves/66df60ae-ce98-4ce6-968a-f99922382ef3-S0/frameworks/66df60ae-ce98-4ce6-968a-f99922382ef3-0001/executors/kubernetes.eb803f00-daac-11e7-af0a-f66634909198/runs/6f3605e0-9e27-4133-a2af-bbbe3706877b/kubernetes-scheduler/lib/jsr305-3.0.1.jar:/var/lib/mesos/slave/slaves/66df60ae-ce98-4ce6-968a-f99922382ef3-S0/frameworks/66df60ae-ce98-4ce6-968a-f99922382ef3-0001/executors/kubernetes.eb803f00-daac-11e7-af0a-f66634909198/runs/6f3605e0-9e27-4133-a2af-bbbe3706877b/kubernetes-scheduler/lib/jetty-http-9.2.3.v20140905.jar:/var/lib/mesos/slave/slaves/66df60ae-ce98-4ce6-968a-f99922382ef3-S0/frameworks/66df60ae-ce98-4ce6-968a-f99922382ef3-0001/executors/kubernetes.eb803f00-daac-11e7-af0a-f66634909198/runs/6f3605e0-9e27-4133-a2af-bbbe3706877b/kubernetes-scheduler/lib/jetty-io-9.2.3.v20140905.jar:/var/lib/mesos/slave/slaves/66df60ae-ce98-4ce6-968a-f99922382ef3-S0/frameworks/66df60ae-ce98-4ce6-968a-f99922382ef3-0001/executors/kubernetes.eb803f00-daac-11e7-af0a-f66634909198/runs/6f3605e0-9e27-4133-a2af-bbbe3706877b/kubernetes-scheduler/lib/jetty-util-9.2.3.v20140905.jar | |
INFO 2017-12-06 17:43:51,131 [main] org.apache.zookeeper.ZooKeeper:logEnv(100): Client environment:java.library.path=/var/lib/mesos/slave/slaves/66df60ae-ce98-4ce6-968a-f99922382ef3-S0/frameworks/66df60ae-ce98-4ce6-968a-f99922382ef3-0001/executors/kubernetes.eb803f00-daac-11e7-af0a-f66634909198/runs/6f3605e0-9e27-4133-a2af-bbbe3706877b/libmesos-bundle/lib:/opt/mesosphere/lib:/usr/java/packages/lib/amd64:/usr/lib64:/lib64:/lib:/usr/lib | |
INFO 2017-12-06 17:43:51,131 [main] org.apache.zookeeper.ZooKeeper:logEnv(100): Client environment:java.io.tmpdir=/tmp | |
INFO 2017-12-06 17:43:51,131 [main] org.apache.zookeeper.ZooKeeper:logEnv(100): Client environment:java.compiler=<NA> | |
INFO 2017-12-06 17:43:51,131 [main] org.apache.zookeeper.ZooKeeper:logEnv(100): Client environment:os.name=Linux | |
INFO 2017-12-06 17:43:51,132 [main] org.apache.zookeeper.ZooKeeper:logEnv(100): Client environment:os.arch=amd64 | |
INFO 2017-12-06 17:43:51,132 [main] org.apache.zookeeper.ZooKeeper:logEnv(100): Client environment:os.version=4.12.7-coreos | |
INFO 2017-12-06 17:43:51,132 [main] org.apache.zookeeper.ZooKeeper:logEnv(100): Client environment:user.name=root | |
INFO 2017-12-06 17:43:51,132 [main] org.apache.zookeeper.ZooKeeper:logEnv(100): Client environment:user.home=/root | |
INFO 2017-12-06 17:43:51,132 [main] org.apache.zookeeper.ZooKeeper:logEnv(100): Client environment:user.dir=/var/lib/mesos/slave/slaves/66df60ae-ce98-4ce6-968a-f99922382ef3-S0/frameworks/66df60ae-ce98-4ce6-968a-f99922382ef3-0001/executors/kubernetes.eb803f00-daac-11e7-af0a-f66634909198/runs/6f3605e0-9e27-4133-a2af-bbbe3706877b | |
INFO 2017-12-06 17:43:51,133 [main] org.apache.zookeeper.ZooKeeper:<init>(438): Initiating client connection, connectString=master.mesos:2181 sessionTimeout=60000 watcher=org.apache.curator.ConnectionState@6330987e | |
INFO 2017-12-06 17:43:51,324 [main-SendThread(vm-xppt.c.massive-bliss-781.internal:2181)] org.apache.zookeeper.ClientCnxn:logStartConnect(975): Opening socket connection to server vm-xppt.c.massive-bliss-781.internal/10.138.0.3:2181. Will not attempt to authenticate using SASL (unknown error) | |
INFO 2017-12-06 17:43:51,332 [main-SendThread(vm-xppt.c.massive-bliss-781.internal:2181)] org.apache.zookeeper.ClientCnxn:primeConnection(852): Socket connection established to vm-xppt.c.massive-bliss-781.internal/10.138.0.3:2181, initiating session | |
INFO 2017-12-06 17:43:51,340 [main-SendThread(vm-xppt.c.massive-bliss-781.internal:2181)] org.apache.zookeeper.ClientCnxn:onConnected(1235): Session establishment complete on server vm-xppt.c.massive-bliss-781.internal/10.138.0.3:2181, sessionid = 0x1602c8203b400fd, negotiated timeout = 40000 | |
INFO 2017-12-06 17:43:51,344 [main-EventThread] org.apache.curator.framework.state.ConnectionStateManager:postState(228): State change: CONNECTED | |
INFO 2017-12-06 17:43:52,830 [main] com.mesosphere.sdk.kubernetes.scheduler.tls.SecretsClientTLSStore:cleanupTempSecrets(145): Deleting leftover temporary secrets. | |
INFO 2017-12-06 17:43:53,550 [main] com.mesosphere.sdk.kubernetes.scheduler.tls.SecretsClientTLSStore:cleanupTempSecrets(165): Deleted leftover temporary secrets. | |
INFO 2017-12-06 17:43:54,136 [main] com.mesosphere.sdk.kubernetes.scheduler.tls.TLSProvisioner:initializeServiceAccountKeyPair(69): Loaded valid service account keypair data. | |
INFO 2017-12-06 17:43:58,828 [main] com.mesosphere.sdk.kubernetes.scheduler.tls.TLSProvisioner:initialize(249): TLS provisioning is finished. | |
INFO 2017-12-06 17:43:58,829 [main] com.mesosphere.sdk.scheduler.SchedulerRunner:<init>(84): Build information: | |
- kubernetes: stub-universe, built 2017-12-06T17:22:21.321Z | |
- SDK: master/2e7ada1-dirty, built 2017-11-28T17:16:08.166Z | |
INFO 2017-12-06 17:43:58,843 [main] org.apache.curator.framework.imps.CuratorFrameworkImpl:start(234): Starting | |
INFO 2017-12-06 17:43:58,920 [main] org.apache.zookeeper.ZooKeeper:<init>(438): Initiating client connection, connectString=master.mesos:2181 sessionTimeout=60000 watcher=org.apache.curator.ConnectionState@2d63dd21 | |
INFO 2017-12-06 17:43:58,923 [main-SendThread(vm-xppt.c.massive-bliss-781.internal:2181)] org.apache.zookeeper.ClientCnxn:logStartConnect(975): Opening socket connection to server vm-xppt.c.massive-bliss-781.internal/10.138.0.3:2181. Will not attempt to authenticate using SASL (unknown error) | |
INFO 2017-12-06 17:43:58,924 [main-SendThread(vm-xppt.c.massive-bliss-781.internal:2181)] org.apache.zookeeper.ClientCnxn:primeConnection(852): Socket connection established to vm-xppt.c.massive-bliss-781.internal/10.138.0.3:2181, initiating session | |
INFO 2017-12-06 17:43:58,927 [main-SendThread(vm-xppt.c.massive-bliss-781.internal:2181)] org.apache.zookeeper.ClientCnxn:onConnected(1235): Session establishment complete on server vm-xppt.c.massive-bliss-781.internal/10.138.0.3:2181, sessionid = 0x1602c8203b400fe, negotiated timeout = 40000 | |
INFO 2017-12-06 17:43:58,928 [main] com.mesosphere.sdk.curator.CuratorUtils:lock(52): Acquiring ZK lock on /dcos-service-kubernetes/lock... | |
INFO 2017-12-06 17:43:58,928 [main-EventThread] org.apache.curator.framework.state.ConnectionStateManager:postState(228): State change: CONNECTED | |
INFO 2017-12-06 17:43:59,025 [main] com.mesosphere.sdk.curator.CuratorUtils:lock(60): 1/3 Lock acquired. | |
INFO 2017-12-06 17:43:59,435 [main] com.mesosphere.sdk.storage.PersisterCache:getCache(149): Loaded data from persister: | |
ROOT: NULL | |
ConfigTarget: 36 bytes | |
Configurations: 0 bytes | |
dd27fcb7-4095-4c86-8f9d-b5aa708a6ca5: 64287 bytes | |
FrameworkID: 43 bytes | |
Properties: 0 bytes | |
etcd-0-peer:task-status: 286 bytes | |
etcd-1-peer:task-status: 286 bytes | |
etcd-2-peer:task-status: 286 bytes | |
kube-apiserver-0-instance:task-status: 302 bytes | |
kube-apiserver-1-instance:task-status: 302 bytes | |
kube-apiserver-2-instance:task-status: 302 bytes | |
kube-controller-manager-0-instance:task-status: 328 bytes | |
kube-controller-manager-1-instance:task-status: 328 bytes | |
kube-controller-manager-2-instance:task-status: 328 bytes | |
kube-node-0-kube-proxy:task-status: 290 bytes | |
kube-node-0-kubelet:task-status: 291 bytes | |
kube-node-1-kube-proxy:task-status: 290 bytes | |
kube-node-1-kubelet:task-status: 291 bytes | |
kube-scheduler-0-instance:task-status: 302 bytes | |
kube-scheduler-1-instance:task-status: 302 bytes | |
kube-scheduler-2-instance:task-status: 302 bytes | |
mandatory-addons-0-dashboard:task-status: 303 bytes | |
mandatory-addons-0-heapster:task-status: 302 bytes | |
mandatory-addons-0-kube-dns:task-status: 302 bytes | |
SchemaVersion: 1 byte | |
Tasks: 10 bytes | |
etcd-0-peer: 10 bytes | |
TaskInfo: 6407 bytes | |
TaskStatus: 286 bytes | |
etcd-0-recover: 10 bytes | |
TaskInfo: 3098 bytes | |
etcd-1-peer: 10 bytes | |
TaskInfo: 6407 bytes | |
TaskStatus: 286 bytes | |
etcd-1-recover: 10 bytes | |
TaskInfo: 3098 bytes | |
etcd-2-peer: 10 bytes | |
TaskInfo: 6407 bytes | |
TaskStatus: 286 bytes | |
etcd-2-recover: 10 bytes | |
TaskInfo: 3098 bytes | |
kube-apiserver-0-instance: 10 bytes | |
TaskInfo: 4597 bytes | |
TaskStatus: 302 bytes | |
kube-apiserver-1-instance: 10 bytes | |
TaskInfo: 4597 bytes | |
TaskStatus: 302 bytes | |
kube-apiserver-2-instance: 10 bytes | |
TaskInfo: 4597 bytes | |
TaskStatus: 302 bytes | |
kube-controller-manager-0-instance: 10 bytes | |
TaskInfo: 4320 bytes | |
TaskStatus: 328 bytes | |
kube-controller-manager-1-instance: 10 bytes | |
TaskInfo: 4320 bytes | |
TaskStatus: 328 bytes | |
kube-controller-manager-2-instance: 10 bytes | |
TaskInfo: 4320 bytes | |
TaskStatus: 328 bytes | |
kube-node-0-commission: 10 bytes | |
TaskInfo: 3648 bytes | |
kube-node-0-decommission: 10 bytes | |
TaskInfo: 3674 bytes | |
kube-node-0-kube-proxy: 10 bytes | |
TaskInfo: 3369 bytes | |
TaskStatus: 290 bytes | |
kube-node-0-kubelet: 10 bytes | |
TaskInfo: 4560 bytes | |
TaskStatus: 291 bytes | |
kube-node-1-commission: 10 bytes | |
TaskInfo: 3648 bytes | |
kube-node-1-decommission: 10 bytes | |
TaskInfo: 3674 bytes | |
kube-node-1-kube-proxy: 10 bytes | |
TaskInfo: 3369 bytes | |
TaskStatus: 290 bytes | |
kube-node-1-kubelet: 10 bytes | |
TaskInfo: 4560 bytes | |
TaskStatus: 291 bytes | |
kube-scheduler-0-instance: 10 bytes | |
TaskInfo: 3227 bytes | |
TaskStatus: 302 bytes | |
kube-scheduler-1-instance: 10 bytes | |
TaskInfo: 3227 bytes | |
TaskStatus: 302 bytes | |
kube-scheduler-2-instance: 10 bytes | |
TaskInfo: 3227 bytes | |
TaskStatus: 302 bytes | |
mandatory-addons-0-dashboard: 10 bytes | |
TaskInfo: 3078 bytes | |
TaskStatus: 314 bytes | |
mandatory-addons-0-heapster: 10 bytes | |
TaskInfo: 3052 bytes | |
TaskStatus: 313 bytes | |
mandatory-addons-0-kube-dns: 10 bytes | |
TaskInfo: 3235 bytes | |
TaskStatus: 313 bytes | |
lock: 0 bytes | |
leases: 0 bytes | |
_c_e652ace0-f273-4d97-812b-4c4385b43537-lease-0000000013: 10 bytes | |
locks: 0 bytes | |
servicename: 10 bytes | |
WARN 2017-12-06 17:43:59,721 [main] com.mesosphere.sdk.state.StateStore:fetchStatus(349): No TaskStatus found for the requested name: etcd-0-recover at: Tasks/etcd-0-recover/TaskStatus | |
WARN 2017-12-06 17:43:59,721 [main] com.mesosphere.sdk.state.StateStoreUtils:repairTaskIDs(191): Found StateStore status inconsistency for task etcd-0-recover: task.taskId=value: "" | |
, no status | |
WARN 2017-12-06 17:43:59,929 [main] com.mesosphere.sdk.state.StateStore:fetchStatus(349): No TaskStatus found for the requested name: etcd-1-recover at: Tasks/etcd-1-recover/TaskStatus | |
WARN 2017-12-06 17:43:59,929 [main] com.mesosphere.sdk.state.StateStoreUtils:repairTaskIDs(191): Found StateStore status inconsistency for task etcd-1-recover: task.taskId=value: "" | |
, no status | |
WARN 2017-12-06 17:43:59,930 [main] com.mesosphere.sdk.state.StateStore:fetchStatus(349): No TaskStatus found for the requested name: etcd-2-recover at: Tasks/etcd-2-recover/TaskStatus | |
WARN 2017-12-06 17:43:59,930 [main] com.mesosphere.sdk.state.StateStoreUtils:repairTaskIDs(191): Found StateStore status inconsistency for task etcd-2-recover: task.taskId=value: "" | |
, no status | |
WARN 2017-12-06 17:43:59,930 [main] com.mesosphere.sdk.state.StateStore:fetchStatus(349): No TaskStatus found for the requested name: kube-node-0-commission at: Tasks/kube-node-0-commission/TaskStatus | |
WARN 2017-12-06 17:43:59,930 [main] com.mesosphere.sdk.state.StateStoreUtils:repairTaskIDs(191): Found StateStore status inconsistency for task kube-node-0-commission: task.taskId=value: "" | |
, no status | |
WARN 2017-12-06 17:43:59,931 [main] com.mesosphere.sdk.state.StateStore:fetchStatus(349): No TaskStatus found for the requested name: kube-node-0-decommission at: Tasks/kube-node-0-decommission/TaskStatus | |
WARN 2017-12-06 17:43:59,931 [main] com.mesosphere.sdk.state.StateStoreUtils:repairTaskIDs(191): Found StateStore status inconsistency for task kube-node-0-decommission: task.taskId=value: "" | |
, no status | |
WARN 2017-12-06 17:43:59,931 [main] com.mesosphere.sdk.state.StateStore:fetchStatus(349): No TaskStatus found for the requested name: kube-node-1-commission at: Tasks/kube-node-1-commission/TaskStatus | |
WARN 2017-12-06 17:43:59,931 [main] com.mesosphere.sdk.state.StateStoreUtils:repairTaskIDs(191): Found StateStore status inconsistency for task kube-node-1-commission: task.taskId=value: "" | |
, no status | |
WARN 2017-12-06 17:43:59,931 [main] com.mesosphere.sdk.state.StateStore:fetchStatus(349): No TaskStatus found for the requested name: kube-node-1-decommission at: Tasks/kube-node-1-decommission/TaskStatus | |
WARN 2017-12-06 17:43:59,931 [main] com.mesosphere.sdk.state.StateStoreUtils:repairTaskIDs(191): Found StateStore status inconsistency for task kube-node-1-decommission: task.taskId=value: "" | |
, no status | |
INFO 2017-12-06 17:44:01,924 [main] com.mesosphere.sdk.state.ConfigStore:fetch(100): Fetching configuration with ID=dd27fcb7-4095-4c86-8f9d-b5aa708a6ca5 from Configurations/dd27fcb7-4095-4c86-8f9d-b5aa708a6ca5 | |
INFO 2017-12-06 17:44:03,130 [main] com.mesosphere.sdk.scheduler.plan.DefaultStepFactory:getStep(40): Generating step for pod: etcd-0, with tasks: [recover] | |
INFO 2017-12-06 17:44:03,139 [main] com.mesosphere.sdk.scheduler.plan.DeploymentStep:setStatus(65): etcd-0:[recover]: changed status from: PENDING to: PENDING (interrupted=false) | |
WARN 2017-12-06 17:44:03,142 [main] com.mesosphere.sdk.state.StateStore:fetchStatus(349): No TaskStatus found for the requested name: etcd-0-recover at: Tasks/etcd-0-recover/TaskStatus | |
INFO 2017-12-06 17:44:03,221 [main] com.mesosphere.sdk.scheduler.plan.DefaultStepFactory:getStatus(140): Deployment of task 'etcd-0-recover' is PENDING: onTarget=true reachedGoal=false permanentlyFailed=false | |
INFO 2017-12-06 17:44:03,221 [main] com.mesosphere.sdk.scheduler.plan.DeploymentStep:setStatus(65): etcd-0:[recover]: changed status from: PENDING to: PENDING (interrupted=false) | |
INFO 2017-12-06 17:44:03,225 [main] com.mesosphere.sdk.scheduler.plan.DefaultStepFactory:getStep(40): Generating step for pod: etcd-0, with tasks: [peer] | |
INFO 2017-12-06 17:44:03,226 [main] com.mesosphere.sdk.scheduler.plan.DeploymentStep:setStatus(65): etcd-0:[peer]: changed status from: PENDING to: PENDING (interrupted=false) | |
INFO 2017-12-06 17:44:03,227 [main] com.mesosphere.sdk.scheduler.plan.DefaultStepFactory:getStatus(137): Deployment of task 'etcd-0-peer' is COMPLETE: onTarget=true reachedGoal=true permanentlyFailed=false | |
INFO 2017-12-06 17:44:03,227 [main] com.mesosphere.sdk.scheduler.plan.DeploymentStep:setStatus(65): etcd-0:[peer]: changed status from: PENDING to: COMPLETE (interrupted=false) | |
INFO 2017-12-06 17:44:03,228 [main] com.mesosphere.sdk.scheduler.plan.DefaultStepFactory:getStep(40): Generating step for pod: etcd-1, with tasks: [recover] | |
INFO 2017-12-06 17:44:03,228 [main] com.mesosphere.sdk.scheduler.plan.DeploymentStep:setStatus(65): etcd-1:[recover]: changed status from: PENDING to: PENDING (interrupted=false) | |
WARN 2017-12-06 17:44:03,229 [main] com.mesosphere.sdk.state.StateStore:fetchStatus(349): No TaskStatus found for the requested name: etcd-1-recover at: Tasks/etcd-1-recover/TaskStatus | |
INFO 2017-12-06 17:44:03,229 [main] com.mesosphere.sdk.scheduler.plan.DefaultStepFactory:getStatus(140): Deployment of task 'etcd-1-recover' is PENDING: onTarget=true reachedGoal=false permanentlyFailed=false | |
INFO 2017-12-06 17:44:03,229 [main] com.mesosphere.sdk.scheduler.plan.DeploymentStep:setStatus(65): etcd-1:[recover]: changed status from: PENDING to: PENDING (interrupted=false) | |
INFO 2017-12-06 17:44:03,229 [main] com.mesosphere.sdk.scheduler.plan.DefaultStepFactory:getStep(40): Generating step for pod: etcd-1, with tasks: [peer] | |
INFO 2017-12-06 17:44:03,231 [main] com.mesosphere.sdk.scheduler.plan.DeploymentStep:setStatus(65): etcd-1:[peer]: changed status from: PENDING to: PENDING (interrupted=false) | |
INFO 2017-12-06 17:44:03,231 [main] com.mesosphere.sdk.scheduler.plan.DefaultStepFactory:getStatus(137): Deployment of task 'etcd-1-peer' is COMPLETE: onTarget=true reachedGoal=true permanentlyFailed=false | |
INFO 2017-12-06 17:44:03,231 [main] com.mesosphere.sdk.scheduler.plan.DeploymentStep:setStatus(65): etcd-1:[peer]: changed status from: PENDING to: COMPLETE (interrupted=false) | |
INFO 2017-12-06 17:44:03,232 [main] com.mesosphere.sdk.scheduler.plan.DefaultStepFactory:getStep(40): Generating step for pod: etcd-2, with tasks: [recover] | |
INFO 2017-12-06 17:44:03,232 [main] com.mesosphere.sdk.scheduler.plan.DeploymentStep:setStatus(65): etcd-2:[recover]: changed status from: PENDING to: PENDING (interrupted=false) | |
WARN 2017-12-06 17:44:03,234 [main] com.mesosphere.sdk.state.StateStore:fetchStatus(349): No TaskStatus found for the requested name: etcd-2-recover at: Tasks/etcd-2-recover/TaskStatus | |
INFO 2017-12-06 17:44:03,235 [main] com.mesosphere.sdk.scheduler.plan.DefaultStepFactory:getStatus(140): Deployment of task 'etcd-2-recover' is PENDING: onTarget=true reachedGoal=false permanentlyFailed=false | |
INFO 2017-12-06 17:44:03,235 [main] com.mesosphere.sdk.scheduler.plan.DeploymentStep:setStatus(65): etcd-2:[recover]: changed status from: PENDING to: PENDING (interrupted=false) | |
INFO 2017-12-06 17:44:03,235 [main] com.mesosphere.sdk.scheduler.plan.DefaultStepFactory:getStep(40): Generating step for pod: etcd-2, with tasks: [peer] | |
INFO 2017-12-06 17:44:03,237 [main] com.mesosphere.sdk.scheduler.plan.DeploymentStep:setStatus(65): etcd-2:[peer]: changed status from: PENDING to: PENDING (interrupted=false) | |
INFO 2017-12-06 17:44:03,238 [main] com.mesosphere.sdk.scheduler.plan.DefaultStepFactory:getStatus(137): Deployment of task 'etcd-2-peer' is COMPLETE: onTarget=true reachedGoal=true permanentlyFailed=false | |
INFO 2017-12-06 17:44:03,238 [main] com.mesosphere.sdk.scheduler.plan.DeploymentStep:setStatus(65): etcd-2:[peer]: changed status from: PENDING to: COMPLETE (interrupted=false) | |
INFO 2017-12-06 17:44:03,241 [main] com.mesosphere.sdk.scheduler.plan.DefaultStepFactory:getStep(40): Generating step for pod: kube-node-0, with tasks: [recover] | |
WARN 2017-12-06 17:44:03,241 [main] com.mesosphere.sdk.state.StateStore:fetchTask(290): No TaskInfo found for the requested name: kube-node-0-recover at: Tasks/kube-node-0-recover/TaskInfo | |
INFO 2017-12-06 17:44:03,241 [main] com.mesosphere.sdk.scheduler.plan.DeploymentStep:setStatus(65): kube-node-0:[recover]: changed status from: PENDING to: PENDING (interrupted=false) | |
INFO 2017-12-06 17:44:03,241 [main] com.mesosphere.sdk.scheduler.plan.DeploymentStep:setStatus(65): kube-node-0:[recover]: changed status from: PENDING to: PENDING (interrupted=false) | |
INFO 2017-12-06 17:44:03,241 [main] com.mesosphere.sdk.scheduler.plan.DefaultStepFactory:getStep(40): Generating step for pod: kube-node-0, with tasks: [kube-proxy, kubelet] | |
INFO 2017-12-06 17:44:03,242 [main] com.mesosphere.sdk.scheduler.plan.DeploymentStep:setStatus(65): kube-node-0:[kube-proxy, kubelet]: changed status from: PENDING to: PENDING (interrupted=false) | |
INFO 2017-12-06 17:44:03,243 [main] com.mesosphere.sdk.scheduler.plan.DefaultStepFactory:getStatus(137): Deployment of task 'kube-node-0-kube-proxy' is COMPLETE: onTarget=true reachedGoal=true permanentlyFailed=false | |
INFO 2017-12-06 17:44:03,243 [main] com.mesosphere.sdk.scheduler.plan.DefaultStepFactory:getStatus(137): Deployment of task 'kube-node-0-kubelet' is COMPLETE: onTarget=true reachedGoal=true permanentlyFailed=false | |
INFO 2017-12-06 17:44:03,243 [main] com.mesosphere.sdk.scheduler.plan.DeploymentStep:setStatus(65): kube-node-0:[kube-proxy, kubelet]: changed status from: PENDING to: COMPLETE (interrupted=false) | |
INFO 2017-12-06 17:44:03,243 [main] com.mesosphere.sdk.scheduler.plan.DefaultStepFactory:getStep(40): Generating step for pod: kube-node-1, with tasks: [recover] | |
WARN 2017-12-06 17:44:03,244 [main] com.mesosphere.sdk.state.StateStore:fetchTask(290): No TaskInfo found for the requested name: kube-node-1-recover at: Tasks/kube-node-1-recover/TaskInfo | |
INFO 2017-12-06 17:44:03,244 [main] com.mesosphere.sdk.scheduler.plan.DeploymentStep:setStatus(65): kube-node-1:[recover]: changed status from: PENDING to: PENDING (interrupted=false) | |
INFO 2017-12-06 17:44:03,244 [main] com.mesosphere.sdk.scheduler.plan.DeploymentStep:setStatus(65): kube-node-1:[recover]: changed status from: PENDING to: PENDING (interrupted=false) | |
INFO 2017-12-06 17:44:03,244 [main] com.mesosphere.sdk.scheduler.plan.DefaultStepFactory:getStep(40): Generating step for pod: kube-node-1, with tasks: [kube-proxy, kubelet] | |
INFO 2017-12-06 17:44:03,245 [main] com.mesosphere.sdk.scheduler.plan.DeploymentStep:setStatus(65): kube-node-1:[kube-proxy, kubelet]: changed status from: PENDING to: PENDING (interrupted=false) | |
INFO 2017-12-06 17:44:03,245 [main] com.mesosphere.sdk.scheduler.plan.DefaultStepFactory:getStatus(137): Deployment of task 'kube-node-1-kube-proxy' is COMPLETE: onTarget=true reachedGoal=true permanentlyFailed=false | |
INFO 2017-12-06 17:44:03,246 [main] com.mesosphere.sdk.scheduler.plan.DefaultStepFactory:getStatus(137): Deployment of task 'kube-node-1-kubelet' is COMPLETE: onTarget=true reachedGoal=true permanentlyFailed=false | |
INFO 2017-12-06 17:44:03,246 [main] com.mesosphere.sdk.scheduler.plan.DeploymentStep:setStatus(65): kube-node-1:[kube-proxy, kubelet]: changed status from: PENDING to: COMPLETE (interrupted=false) | |
INFO 2017-12-06 17:44:03,247 [main] com.mesosphere.sdk.scheduler.plan.DefaultStepFactory:getStep(40): Generating step for pod: etcd-0, with tasks: [peer] | |
INFO 2017-12-06 17:44:03,248 [main] com.mesosphere.sdk.scheduler.plan.DeploymentStep:setStatus(65): etcd-0:[peer]: changed status from: PENDING to: PENDING (interrupted=false) | |
INFO 2017-12-06 17:44:03,248 [main] com.mesosphere.sdk.scheduler.plan.DefaultStepFactory:getStatus(137): Deployment of task 'etcd-0-peer' is COMPLETE: onTarget=true reachedGoal=true permanentlyFailed=false | |
INFO 2017-12-06 17:44:03,248 [main] com.mesosphere.sdk.scheduler.plan.DeploymentStep:setStatus(65): etcd-0:[peer]: changed status from: PENDING to: COMPLETE (interrupted=false) | |
INFO 2017-12-06 17:44:03,249 [main] com.mesosphere.sdk.scheduler.plan.DefaultStepFactory:getStep(40): Generating step for pod: etcd-1, with tasks: [peer] | |
INFO 2017-12-06 17:44:03,249 [main] com.mesosphere.sdk.scheduler.plan.DeploymentStep:setStatus(65): etcd-1:[peer]: changed status from: PENDING to: PENDING (interrupted=false) | |
INFO 2017-12-06 17:44:03,249 [main] com.mesosphere.sdk.scheduler.plan.DefaultStepFactory:getStatus(137): Deployment of task 'etcd-1-peer' is COMPLETE: onTarget=true reachedGoal=true permanentlyFailed=false | |
INFO 2017-12-06 17:44:03,250 [main] com.mesosphere.sdk.scheduler.plan.DeploymentStep:setStatus(65): etcd-1:[peer]: changed status from: PENDING to: COMPLETE (interrupted=false) | |
INFO 2017-12-06 17:44:03,250 [main] com.mesosphere.sdk.scheduler.plan.DefaultStepFactory:getStep(40): Generating step for pod: etcd-2, with tasks: [peer] | |
INFO 2017-12-06 17:44:03,321 [main] com.mesosphere.sdk.scheduler.plan.DeploymentStep:setStatus(65): etcd-2:[peer]: changed status from: PENDING to: PENDING (interrupted=false) | |
INFO 2017-12-06 17:44:03,322 [main] com.mesosphere.sdk.scheduler.plan.DefaultStepFactory:getStatus(137): Deployment of task 'etcd-2-peer' is COMPLETE: onTarget=true reachedGoal=true permanentlyFailed=false | |
INFO 2017-12-06 17:44:03,322 [main] com.mesosphere.sdk.scheduler.plan.DeploymentStep:setStatus(65): etcd-2:[peer]: changed status from: PENDING to: COMPLETE (interrupted=false) | |
INFO 2017-12-06 17:44:03,322 [main] com.mesosphere.sdk.scheduler.plan.DefaultStepFactory:getStep(40): Generating step for pod: kube-apiserver-0, with tasks: [instance] | |
INFO 2017-12-06 17:44:03,323 [main] com.mesosphere.sdk.scheduler.plan.DeploymentStep:setStatus(65): kube-apiserver-0:[instance]: changed status from: PENDING to: PENDING (interrupted=false) | |
INFO 2017-12-06 17:44:03,323 [main] com.mesosphere.sdk.scheduler.plan.DefaultStepFactory:getStatus(137): Deployment of task 'kube-apiserver-0-instance' is COMPLETE: onTarget=true reachedGoal=true permanentlyFailed=false | |
INFO 2017-12-06 17:44:03,323 [main] com.mesosphere.sdk.scheduler.plan.DeploymentStep:setStatus(65): kube-apiserver-0:[instance]: changed status from: PENDING to: COMPLETE (interrupted=false) | |
INFO 2017-12-06 17:44:03,323 [main] com.mesosphere.sdk.scheduler.plan.DefaultStepFactory:getStep(40): Generating step for pod: kube-apiserver-1, with tasks: [instance] | |
INFO 2017-12-06 17:44:03,324 [main] com.mesosphere.sdk.scheduler.plan.DeploymentStep:setStatus(65): kube-apiserver-1:[instance]: changed status from: PENDING to: PENDING (interrupted=false) | |
INFO 2017-12-06 17:44:03,324 [main] com.mesosphere.sdk.scheduler.plan.DefaultStepFactory:getStatus(137): Deployment of task 'kube-apiserver-1-instance' is COMPLETE: onTarget=true reachedGoal=true permanentlyFailed=false | |
INFO 2017-12-06 17:44:03,324 [main] com.mesosphere.sdk.scheduler.plan.DeploymentStep:setStatus(65): kube-apiserver-1:[instance]: changed status from: PENDING to: COMPLETE (interrupted=false) | |
INFO 2017-12-06 17:44:03,324 [main] com.mesosphere.sdk.scheduler.plan.DefaultStepFactory:getStep(40): Generating step for pod: kube-apiserver-2, with tasks: [instance] | |
INFO 2017-12-06 17:44:03,325 [main] com.mesosphere.sdk.scheduler.plan.DeploymentStep:setStatus(65): kube-apiserver-2:[instance]: changed status from: PENDING to: PENDING (interrupted=false) | |
INFO 2017-12-06 17:44:03,325 [main] com.mesosphere.sdk.scheduler.plan.DefaultStepFactory:getStatus(137): Deployment of task 'kube-apiserver-2-instance' is COMPLETE: onTarget=true reachedGoal=true permanentlyFailed=false | |
INFO 2017-12-06 17:44:03,326 [main] com.mesosphere.sdk.scheduler.plan.DeploymentStep:setStatus(65): kube-apiserver-2:[instance]: changed status from: PENDING to: COMPLETE (interrupted=false) | |
INFO 2017-12-06 17:44:03,326 [main] com.mesosphere.sdk.scheduler.plan.DefaultStepFactory:getStep(40): Generating step for pod: kube-controller-manager-0, with tasks: [instance] | |
INFO 2017-12-06 17:44:03,326 [main] com.mesosphere.sdk.scheduler.plan.DeploymentStep:setStatus(65): kube-controller-manager-0:[instance]: changed status from: PENDING to: PENDING (interrupted=false) | |
INFO 2017-12-06 17:44:03,327 [main] com.mesosphere.sdk.scheduler.plan.DefaultStepFactory:getStatus(137): Deployment of task 'kube-controller-manager-0-instance' is COMPLETE: onTarget=true reachedGoal=true permanentlyFailed=false | |
INFO 2017-12-06 17:44:03,327 [main] com.mesosphere.sdk.scheduler.plan.DeploymentStep:setStatus(65): kube-controller-manager-0:[instance]: changed status from: PENDING to: COMPLETE (interrupted=false) | |
INFO 2017-12-06 17:44:03,327 [main] com.mesosphere.sdk.scheduler.plan.DefaultStepFactory:getStep(40): Generating step for pod: kube-controller-manager-1, with tasks: [instance] | |
INFO 2017-12-06 17:44:03,327 [main] com.mesosphere.sdk.scheduler.plan.DeploymentStep:setStatus(65): kube-controller-manager-1:[instance]: changed status from: PENDING to: PENDING (interrupted=false) | |
INFO 2017-12-06 17:44:03,328 [main] com.mesosphere.sdk.scheduler.plan.DefaultStepFactory:getStatus(137): Deployment of task 'kube-controller-manager-1-instance' is COMPLETE: onTarget=true reachedGoal=true permanentlyFailed=false | |
INFO 2017-12-06 17:44:03,328 [main] com.mesosphere.sdk.scheduler.plan.DeploymentStep:setStatus(65): kube-controller-manager-1:[instance]: changed status from: PENDING to: COMPLETE (interrupted=false) | |
INFO 2017-12-06 17:44:03,328 [main] com.mesosphere.sdk.scheduler.plan.DefaultStepFactory:getStep(40): Generating step for pod: kube-controller-manager-2, with tasks: [instance] | |
INFO 2017-12-06 17:44:03,329 [main] com.mesosphere.sdk.scheduler.plan.DeploymentStep:setStatus(65): kube-controller-manager-2:[instance]: changed status from: PENDING to: PENDING (interrupted=false) | |
INFO 2017-12-06 17:44:03,329 [main] com.mesosphere.sdk.scheduler.plan.DefaultStepFactory:getStatus(137): Deployment of task 'kube-controller-manager-2-instance' is COMPLETE: onTarget=true reachedGoal=true permanentlyFailed=false | |
INFO 2017-12-06 17:44:03,329 [main] com.mesosphere.sdk.scheduler.plan.DeploymentStep:setStatus(65): kube-controller-manager-2:[instance]: changed status from: PENDING to: COMPLETE (interrupted=false) | |
INFO 2017-12-06 17:44:03,329 [main] com.mesosphere.sdk.scheduler.plan.DefaultStepFactory:getStep(40): Generating step for pod: kube-scheduler-0, with tasks: [instance] | |
INFO 2017-12-06 17:44:03,330 [main] com.mesosphere.sdk.scheduler.plan.DeploymentStep:setStatus(65): kube-scheduler-0:[instance]: changed status from: PENDING to: PENDING (interrupted=false) | |
INFO 2017-12-06 17:44:03,330 [main] com.mesosphere.sdk.scheduler.plan.DefaultStepFactory:getStatus(137): Deployment of task 'kube-scheduler-0-instance' is COMPLETE: onTarget=true reachedGoal=true permanentlyFailed=false | |
INFO 2017-12-06 17:44:03,330 [main] com.mesosphere.sdk.scheduler.plan.DeploymentStep:setStatus(65): kube-scheduler-0:[instance]: changed status from: PENDING to: COMPLETE (interrupted=false) | |
INFO 2017-12-06 17:44:03,331 [main] com.mesosphere.sdk.scheduler.plan.DefaultStepFactory:getStep(40): Generating step for pod: kube-scheduler-1, with tasks: [instance] | |
INFO 2017-12-06 17:44:03,331 [main] com.mesosphere.sdk.scheduler.plan.DeploymentStep:setStatus(65): kube-scheduler-1:[instance]: changed status from: PENDING to: PENDING (interrupted=false) | |
INFO 2017-12-06 17:44:03,331 [main] com.mesosphere.sdk.scheduler.plan.DefaultStepFactory:getStatus(137): Deployment of task 'kube-scheduler-1-instance' is COMPLETE: onTarget=true reachedGoal=true permanentlyFailed=false | |
INFO 2017-12-06 17:44:03,331 [main] com.mesosphere.sdk.scheduler.plan.DeploymentStep:setStatus(65): kube-scheduler-1:[instance]: changed status from: PENDING to: COMPLETE (interrupted=false) | |
INFO 2017-12-06 17:44:03,332 [main] com.mesosphere.sdk.scheduler.plan.DefaultStepFactory:getStep(40): Generating step for pod: kube-scheduler-2, with tasks: [instance] | |
INFO 2017-12-06 17:44:03,332 [main] com.mesosphere.sdk.scheduler.plan.DeploymentStep:setStatus(65): kube-scheduler-2:[instance]: changed status from: PENDING to: PENDING (interrupted=false) | |
INFO 2017-12-06 17:44:03,332 [main] com.mesosphere.sdk.scheduler.plan.DefaultStepFactory:getStatus(137): Deployment of task 'kube-scheduler-2-instance' is COMPLETE: onTarget=true reachedGoal=true permanentlyFailed=false | |
INFO 2017-12-06 17:44:03,333 [main] com.mesosphere.sdk.scheduler.plan.DeploymentStep:setStatus(65): kube-scheduler-2:[instance]: changed status from: PENDING to: COMPLETE (interrupted=false) | |
INFO 2017-12-06 17:44:03,333 [main] com.mesosphere.sdk.scheduler.plan.DefaultStepFactory:getStep(40): Generating step for pod: kube-node-0, with tasks: [decommission] | |
INFO 2017-12-06 17:44:03,333 [main] com.mesosphere.sdk.scheduler.plan.DeploymentStep:setStatus(65): kube-node-0:[decommission]: changed status from: PENDING to: PENDING (interrupted=false) | |
WARN 2017-12-06 17:44:03,333 [main] com.mesosphere.sdk.state.StateStore:fetchStatus(349): No TaskStatus found for the requested name: kube-node-0-decommission at: Tasks/kube-node-0-decommission/TaskStatus | |
INFO 2017-12-06 17:44:03,334 [main] com.mesosphere.sdk.scheduler.plan.DefaultStepFactory:getStatus(140): Deployment of task 'kube-node-0-decommission' is PENDING: onTarget=true reachedGoal=false permanentlyFailed=false | |
INFO 2017-12-06 17:44:03,334 [main] com.mesosphere.sdk.scheduler.plan.DeploymentStep:setStatus(65): kube-node-0:[decommission]: changed status from: PENDING to: PENDING (interrupted=false) | |
INFO 2017-12-06 17:44:03,334 [main] com.mesosphere.sdk.scheduler.plan.DefaultStepFactory:getStep(40): Generating step for pod: kube-node-0, with tasks: [kube-proxy, kubelet] | |
INFO 2017-12-06 17:44:03,335 [main] com.mesosphere.sdk.scheduler.plan.DeploymentStep:setStatus(65): kube-node-0:[kube-proxy, kubelet]: changed status from: PENDING to: PENDING (interrupted=false) | |
INFO 2017-12-06 17:44:03,335 [main] com.mesosphere.sdk.scheduler.plan.DefaultStepFactory:getStatus(137): Deployment of task 'kube-node-0-kube-proxy' is COMPLETE: onTarget=true reachedGoal=true permanentlyFailed=false | |
INFO 2017-12-06 17:44:03,335 [main] com.mesosphere.sdk.scheduler.plan.DefaultStepFactory:getStatus(137): Deployment of task 'kube-node-0-kubelet' is COMPLETE: onTarget=true reachedGoal=true permanentlyFailed=false | |
INFO 2017-12-06 17:44:03,336 [main] com.mesosphere.sdk.scheduler.plan.DeploymentStep:setStatus(65): kube-node-0:[kube-proxy, kubelet]: changed status from: PENDING to: COMPLETE (interrupted=false) | |
INFO 2017-12-06 17:44:03,336 [main] com.mesosphere.sdk.scheduler.plan.DefaultStepFactory:getStep(40): Generating step for pod: kube-node-0, with tasks: [commission] | |
INFO 2017-12-06 17:44:03,336 [main] com.mesosphere.sdk.scheduler.plan.DeploymentStep:setStatus(65): kube-node-0:[commission]: changed status from: PENDING to: PENDING (interrupted=false) | |
WARN 2017-12-06 17:44:03,337 [main] com.mesosphere.sdk.state.StateStore:fetchStatus(349): No TaskStatus found for the requested name: kube-node-0-commission at: Tasks/kube-node-0-commission/TaskStatus | |
INFO 2017-12-06 17:44:03,337 [main] com.mesosphere.sdk.scheduler.plan.DefaultStepFactory:getStatus(140): Deployment of task 'kube-node-0-commission' is PENDING: onTarget=true reachedGoal=false permanentlyFailed=false | |
INFO 2017-12-06 17:44:03,337 [main] com.mesosphere.sdk.scheduler.plan.DeploymentStep:setStatus(65): kube-node-0:[commission]: changed status from: PENDING to: PENDING (interrupted=false) | |
INFO 2017-12-06 17:44:03,337 [main] com.mesosphere.sdk.scheduler.plan.DefaultStepFactory:getStep(40): Generating step for pod: kube-node-1, with tasks: [decommission] | |
INFO 2017-12-06 17:44:03,338 [main] com.mesosphere.sdk.scheduler.plan.DeploymentStep:setStatus(65): kube-node-1:[decommission]: changed status from: PENDING to: PENDING (interrupted=false) | |
WARN 2017-12-06 17:44:03,338 [main] com.mesosphere.sdk.state.StateStore:fetchStatus(349): No TaskStatus found for the requested name: kube-node-1-decommission at: Tasks/kube-node-1-decommission/TaskStatus | |
INFO 2017-12-06 17:44:03,338 [main] com.mesosphere.sdk.scheduler.plan.DefaultStepFactory:getStatus(140): Deployment of task 'kube-node-1-decommission' is PENDING: onTarget=true reachedGoal=false permanentlyFailed=false | |
INFO 2017-12-06 17:44:03,338 [main] com.mesosphere.sdk.scheduler.plan.DeploymentStep:setStatus(65): kube-node-1:[decommission]: changed status from: PENDING to: PENDING (interrupted=false) | |
INFO 2017-12-06 17:44:03,338 [main] com.mesosphere.sdk.scheduler.plan.DefaultStepFactory:getStep(40): Generating step for pod: kube-node-1, with tasks: [kube-proxy, kubelet] | |
INFO 2017-12-06 17:44:03,339 [main] com.mesosphere.sdk.scheduler.plan.DeploymentStep:setStatus(65): kube-node-1:[kube-proxy, kubelet]: changed status from: PENDING to: PENDING (interrupted=false) | |
INFO 2017-12-06 17:44:03,339 [main] com.mesosphere.sdk.scheduler.plan.DefaultStepFactory:getStatus(137): Deployment of task 'kube-node-1-kube-proxy' is COMPLETE: onTarget=true reachedGoal=true permanentlyFailed=false | |
INFO 2017-12-06 17:44:03,340 [main] com.mesosphere.sdk.scheduler.plan.DefaultStepFactory:getStatus(137): Deployment of task 'kube-node-1-kubelet' is COMPLETE: onTarget=true reachedGoal=true permanentlyFailed=false | |
INFO 2017-12-06 17:44:03,340 [main] com.mesosphere.sdk.scheduler.plan.DeploymentStep:setStatus(65): kube-node-1:[kube-proxy, kubelet]: changed status from: PENDING to: COMPLETE (interrupted=false) | |
INFO 2017-12-06 17:44:03,340 [main] com.mesosphere.sdk.scheduler.plan.DefaultStepFactory:getStep(40): Generating step for pod: kube-node-1, with tasks: [commission] | |
INFO 2017-12-06 17:44:03,340 [main] com.mesosphere.sdk.scheduler.plan.DeploymentStep:setStatus(65): kube-node-1:[commission]: changed status from: PENDING to: PENDING (interrupted=false) | |
WARN 2017-12-06 17:44:03,341 [main] com.mesosphere.sdk.state.StateStore:fetchStatus(349): No TaskStatus found for the requested name: kube-node-1-commission at: Tasks/kube-node-1-commission/TaskStatus | |
INFO 2017-12-06 17:44:03,341 [main] com.mesosphere.sdk.scheduler.plan.DefaultStepFactory:getStatus(140): Deployment of task 'kube-node-1-commission' is PENDING: onTarget=true reachedGoal=false permanentlyFailed=false | |
INFO 2017-12-06 17:44:03,341 [main] com.mesosphere.sdk.scheduler.plan.DeploymentStep:setStatus(65): kube-node-1:[commission]: changed status from: PENDING to: PENDING (interrupted=false) | |
INFO 2017-12-06 17:44:03,341 [main] com.mesosphere.sdk.scheduler.plan.DefaultStepFactory:getStep(40): Generating step for pod: mandatory-addons-0, with tasks: [kube-dns] | |
INFO 2017-12-06 17:44:03,342 [main] com.mesosphere.sdk.scheduler.plan.DeploymentStep:setStatus(65): mandatory-addons-0:[kube-dns]: changed status from: PENDING to: PENDING (interrupted=false) | |
INFO 2017-12-06 17:44:03,342 [main] com.mesosphere.sdk.scheduler.plan.DefaultStepFactory:getStatus(127): Automatically on target configuration due to having reached FINISHED goal. | |
INFO 2017-12-06 17:44:03,342 [main] com.mesosphere.sdk.scheduler.plan.DefaultStepFactory:getStatus(137): Deployment of task 'mandatory-addons-0-kube-dns' is COMPLETE: onTarget=true reachedGoal=true permanentlyFailed=false | |
INFO 2017-12-06 17:44:03,342 [main] com.mesosphere.sdk.scheduler.plan.DeploymentStep:setStatus(65): mandatory-addons-0:[kube-dns]: changed status from: PENDING to: COMPLETE (interrupted=false) | |
INFO 2017-12-06 17:44:03,342 [main] com.mesosphere.sdk.scheduler.plan.DefaultStepFactory:getStep(40): Generating step for pod: mandatory-addons-0, with tasks: [heapster] | |
INFO 2017-12-06 17:44:03,343 [main] com.mesosphere.sdk.scheduler.plan.DeploymentStep:setStatus(65): mandatory-addons-0:[heapster]: changed status from: PENDING to: PENDING (interrupted=false) | |
INFO 2017-12-06 17:44:03,420 [main] com.mesosphere.sdk.scheduler.plan.DefaultStepFactory:getStatus(127): Automatically on target configuration due to having reached FINISHED goal. | |
INFO 2017-12-06 17:44:03,421 [main] com.mesosphere.sdk.scheduler.plan.DefaultStepFactory:getStatus(137): Deployment of task 'mandatory-addons-0-heapster' is COMPLETE: onTarget=true reachedGoal=true permanentlyFailed=false | |
INFO 2017-12-06 17:44:03,421 [main] com.mesosphere.sdk.scheduler.plan.DeploymentStep:setStatus(65): mandatory-addons-0:[heapster]: changed status from: PENDING to: COMPLETE (interrupted=false) | |
INFO 2017-12-06 17:44:03,421 [main] com.mesosphere.sdk.scheduler.plan.DefaultStepFactory:getStep(40): Generating step for pod: mandatory-addons-0, with tasks: [dashboard] | |
INFO 2017-12-06 17:44:03,422 [main] com.mesosphere.sdk.scheduler.plan.DeploymentStep:setStatus(65): mandatory-addons-0:[dashboard]: changed status from: PENDING to: PENDING (interrupted=false) | |
INFO 2017-12-06 17:44:03,422 [main] com.mesosphere.sdk.scheduler.plan.DefaultStepFactory:getStatus(127): Automatically on target configuration due to having reached FINISHED goal. | |
INFO 2017-12-06 17:44:03,422 [main] com.mesosphere.sdk.scheduler.plan.DefaultStepFactory:getStatus(137): Deployment of task 'mandatory-addons-0-dashboard' is COMPLETE: onTarget=true reachedGoal=true permanentlyFailed=false | |
INFO 2017-12-06 17:44:03,422 [main] com.mesosphere.sdk.scheduler.plan.DeploymentStep:setStatus(65): mandatory-addons-0:[dashboard]: changed status from: PENDING to: COMPLETE (interrupted=false) | |
INFO 2017-12-06 17:44:03,424 [main] com.mesosphere.sdk.scheduler.plan.DefaultStepFactory:getStep(40): Generating step for pod: etcd-0, with tasks: [peer] | |
INFO 2017-12-06 17:44:03,425 [main] com.mesosphere.sdk.scheduler.plan.DeploymentStep:setStatus(65): etcd-0:[peer]: changed status from: PENDING to: PENDING (interrupted=false) | |
INFO 2017-12-06 17:44:03,426 [main] com.mesosphere.sdk.scheduler.plan.DefaultStepFactory:getStatus(137): Deployment of task 'etcd-0-peer' is COMPLETE: onTarget=true reachedGoal=true permanentlyFailed=false | |
INFO 2017-12-06 17:44:03,426 [main] com.mesosphere.sdk.scheduler.plan.DeploymentStep:setStatus(65): etcd-0:[peer]: changed status from: PENDING to: COMPLETE (interrupted=false) | |
INFO 2017-12-06 17:44:03,426 [main] com.mesosphere.sdk.scheduler.plan.DefaultStepFactory:getStep(40): Generating step for pod: etcd-1, with tasks: [peer] | |
INFO 2017-12-06 17:44:03,426 [main] com.mesosphere.sdk.scheduler.plan.DeploymentStep:setStatus(65): etcd-1:[peer]: changed status from: PENDING to: PENDING (interrupted=false) | |
INFO 2017-12-06 17:44:03,427 [main] com.mesosphere.sdk.scheduler.plan.DefaultStepFactory:getStatus(137): Deployment of task 'etcd-1-peer' is COMPLETE: onTarget=true reachedGoal=true permanentlyFailed=false | |
INFO 2017-12-06 17:44:03,427 [main] com.mesosphere.sdk.scheduler.plan.DeploymentStep:setStatus(65): etcd-1:[peer]: changed status from: PENDING to: COMPLETE (interrupted=false) | |
INFO 2017-12-06 17:44:03,427 [main] com.mesosphere.sdk.scheduler.plan.DefaultStepFactory:getStep(40): Generating step for pod: etcd-2, with tasks: [peer] | |
INFO 2017-12-06 17:44:03,427 [main] com.mesosphere.sdk.scheduler.plan.DeploymentStep:setStatus(65): etcd-2:[peer]: changed status from: PENDING to: PENDING (interrupted=false) | |
INFO 2017-12-06 17:44:03,428 [main] com.mesosphere.sdk.scheduler.plan.DefaultStepFactory:getStatus(137): Deployment of task 'etcd-2-peer' is COMPLETE: onTarget=true reachedGoal=true permanentlyFailed=false | |
INFO 2017-12-06 17:44:03,428 [main] com.mesosphere.sdk.scheduler.plan.DeploymentStep:setStatus(65): etcd-2:[peer]: changed status from: PENDING to: COMPLETE (interrupted=false) | |
INFO 2017-12-06 17:44:03,429 [main] com.mesosphere.sdk.scheduler.plan.DefaultStepFactory:getStep(40): Generating step for pod: kube-apiserver-0, with tasks: [instance] | |
INFO 2017-12-06 17:44:03,429 [main] com.mesosphere.sdk.scheduler.plan.DeploymentStep:setStatus(65): kube-apiserver-0:[instance]: changed status from: PENDING to: PENDING (interrupted=false) | |
INFO 2017-12-06 17:44:03,430 [main] com.mesosphere.sdk.scheduler.plan.DefaultStepFactory:getStatus(137): Deployment of task 'kube-apiserver-0-instance' is COMPLETE: onTarget=true reachedGoal=true permanentlyFailed=false | |
INFO 2017-12-06 17:44:03,430 [main] com.mesosphere.sdk.scheduler.plan.DeploymentStep:setStatus(65): kube-apiserver-0:[instance]: changed status from: PENDING to: COMPLETE (interrupted=false) | |
INFO 2017-12-06 17:44:03,430 [main] com.mesosphere.sdk.scheduler.plan.DefaultStepFactory:getStep(40): Generating step for pod: kube-apiserver-1, with tasks: [instance] | |
INFO 2017-12-06 17:44:03,430 [main] com.mesosphere.sdk.scheduler.plan.DeploymentStep:setStatus(65): kube-apiserver-1:[instance]: changed status from: PENDING to: PENDING (interrupted=false) | |
INFO 2017-12-06 17:44:03,431 [main] com.mesosphere.sdk.scheduler.plan.DefaultStepFactory:getStatus(137): Deployment of task 'kube-apiserver-1-instance' is COMPLETE: onTarget=true reachedGoal=true permanentlyFailed=false | |
INFO 2017-12-06 17:44:03,431 [main] com.mesosphere.sdk.scheduler.plan.DeploymentStep:setStatus(65): kube-apiserver-1:[instance]: changed status from: PENDING to: COMPLETE (interrupted=false) | |
INFO 2017-12-06 17:44:03,431 [main] com.mesosphere.sdk.scheduler.plan.DefaultStepFactory:getStep(40): Generating step for pod: kube-apiserver-2, with tasks: [instance] | |
INFO 2017-12-06 17:44:03,432 [main] com.mesosphere.sdk.scheduler.plan.DeploymentStep:setStatus(65): kube-apiserver-2:[instance]: changed status from: PENDING to: PENDING (interrupted=false) | |
INFO 2017-12-06 17:44:03,432 [main] com.mesosphere.sdk.scheduler.plan.DefaultStepFactory:getStatus(137): Deployment of task 'kube-apiserver-2-instance' is COMPLETE: onTarget=true reachedGoal=true permanentlyFailed=false | |
INFO 2017-12-06 17:44:03,432 [main] com.mesosphere.sdk.scheduler.plan.DeploymentStep:setStatus(65): kube-apiserver-2:[instance]: changed status from: PENDING to: COMPLETE (interrupted=false) | |
INFO 2017-12-06 17:44:03,432 [main] com.mesosphere.sdk.scheduler.plan.DefaultStepFactory:getStep(40): Generating step for pod: kube-controller-manager-0, with tasks: [instance] | |
INFO 2017-12-06 17:44:03,433 [main] com.mesosphere.sdk.scheduler.plan.DeploymentStep:setStatus(65): kube-controller-manager-0:[instance]: changed status from: PENDING to: PENDING (interrupted=false) | |
INFO 2017-12-06 17:44:03,433 [main] com.mesosphere.sdk.scheduler.plan.DefaultStepFactory:getStatus(137): Deployment of task 'kube-controller-manager-0-instance' is COMPLETE: onTarget=true reachedGoal=true permanentlyFailed=false | |
INFO 2017-12-06 17:44:03,433 [main] com.mesosphere.sdk.scheduler.plan.DeploymentStep:setStatus(65): kube-controller-manager-0:[instance]: changed status from: PENDING to: COMPLETE (interrupted=false) | |
INFO 2017-12-06 17:44:03,433 [main] com.mesosphere.sdk.scheduler.plan.DefaultStepFactory:getStep(40): Generating step for pod: kube-controller-manager-1, with tasks: [instance] | |
INFO 2017-12-06 17:44:03,434 [main] com.mesosphere.sdk.scheduler.plan.DeploymentStep:setStatus(65): kube-controller-manager-1:[instance]: changed status from: PENDING to: PENDING (interrupted=false) | |
INFO 2017-12-06 17:44:03,435 [main] com.mesosphere.sdk.scheduler.plan.DefaultStepFactory:getStatus(137): Deployment of task 'kube-controller-manager-1-instance' is COMPLETE: onTarget=true reachedGoal=true permanentlyFailed=false | |
INFO 2017-12-06 17:44:03,435 [main] com.mesosphere.sdk.scheduler.plan.DeploymentStep:setStatus(65): kube-controller-manager-1:[instance]: changed status from: PENDING to: COMPLETE (interrupted=false) | |
INFO 2017-12-06 17:44:03,435 [main] com.mesosphere.sdk.scheduler.plan.DefaultStepFactory:getStep(40): Generating step for pod: kube-controller-manager-2, with tasks: [instance] | |
INFO 2017-12-06 17:44:03,436 [main] com.mesosphere.sdk.scheduler.plan.DeploymentStep:setStatus(65): kube-controller-manager-2:[instance]: changed status from: PENDING to: PENDING (interrupted=false) | |
INFO 2017-12-06 17:44:03,436 [main] com.mesosphere.sdk.scheduler.plan.DefaultStepFactory:getStatus(137): Deployment of task 'kube-controller-manager-2-instance' is COMPLETE: onTarget=true reachedGoal=true permanentlyFailed=false | |
INFO 2017-12-06 17:44:03,436 [main] com.mesosphere.sdk.scheduler.plan.DeploymentStep:setStatus(65): kube-controller-manager-2:[instance]: changed status from: PENDING to: COMPLETE (interrupted=false) | |
INFO 2017-12-06 17:44:03,437 [main] com.mesosphere.sdk.scheduler.plan.DefaultStepFactory:getStep(40): Generating step for pod: kube-scheduler-0, with tasks: [instance] | |
INFO 2017-12-06 17:44:03,437 [main] com.mesosphere.sdk.scheduler.plan.DeploymentStep:setStatus(65): kube-scheduler-0:[instance]: changed status from: PENDING to: PENDING (interrupted=false) | |
INFO 2017-12-06 17:44:03,438 [main] com.mesosphere.sdk.scheduler.plan.DefaultStepFactory:getStatus(137): Deployment of task 'kube-scheduler-0-instance' is COMPLETE: onTarget=true reachedGoal=true permanentlyFailed=false | |
INFO 2017-12-06 17:44:03,438 [main] com.mesosphere.sdk.scheduler.plan.DeploymentStep:setStatus(65): kube-scheduler-0:[instance]: changed status from: PENDING to: COMPLETE (interrupted=false) | |
INFO 2017-12-06 17:44:03,438 [main] com.mesosphere.sdk.scheduler.plan.DefaultStepFactory:getStep(40): Generating step for pod: kube-scheduler-1, with tasks: [instance] | |
INFO 2017-12-06 17:44:03,439 [main] com.mesosphere.sdk.scheduler.plan.DeploymentStep:setStatus(65): kube-scheduler-1:[instance]: changed status from: PENDING to: PENDING (interrupted=false) | |
INFO 2017-12-06 17:44:03,440 [main] com.mesosphere.sdk.scheduler.plan.DefaultStepFactory:getStatus(137): Deployment of task 'kube-scheduler-1-instance' is COMPLETE: onTarget=true reachedGoal=true permanentlyFailed=false | |
INFO 2017-12-06 17:44:03,440 [main] com.mesosphere.sdk.scheduler.plan.DeploymentStep:setStatus(65): kube-scheduler-1:[instance]: changed status from: PENDING to: COMPLETE (interrupted=false) | |
INFO 2017-12-06 17:44:03,440 [main] com.mesosphere.sdk.scheduler.plan.DefaultStepFactory:getStep(40): Generating step for pod: kube-scheduler-2, with tasks: [instance] | |
INFO 2017-12-06 17:44:03,441 [main] com.mesosphere.sdk.scheduler.plan.DeploymentStep:setStatus(65): kube-scheduler-2:[instance]: changed status from: PENDING to: PENDING (interrupted=false) | |
INFO 2017-12-06 17:44:03,441 [main] com.mesosphere.sdk.scheduler.plan.DefaultStepFactory:getStatus(137): Deployment of task 'kube-scheduler-2-instance' is COMPLETE: onTarget=true reachedGoal=true permanentlyFailed=false | |
INFO 2017-12-06 17:44:03,441 [main] com.mesosphere.sdk.scheduler.plan.DeploymentStep:setStatus(65): kube-scheduler-2:[instance]: changed status from: PENDING to: COMPLETE (interrupted=false) | |
INFO 2017-12-06 17:44:03,442 [main] com.mesosphere.sdk.scheduler.plan.DefaultStepFactory:getStep(40): Generating step for pod: kube-node-0, with tasks: [kube-proxy, kubelet] | |
INFO 2017-12-06 17:44:03,521 [main] com.mesosphere.sdk.scheduler.plan.DeploymentStep:setStatus(65): kube-node-0:[kube-proxy, kubelet]: changed status from: PENDING to: PENDING (interrupted=false) | |
INFO 2017-12-06 17:44:03,522 [main] com.mesosphere.sdk.scheduler.plan.DefaultStepFactory:getStatus(137): Deployment of task 'kube-node-0-kube-proxy' is COMPLETE: onTarget=true reachedGoal=true permanentlyFailed=false | |
INFO 2017-12-06 17:44:03,522 [main] com.mesosphere.sdk.scheduler.plan.DefaultStepFactory:getStatus(137): Deployment of task 'kube-node-0-kubelet' is COMPLETE: onTarget=true reachedGoal=true permanentlyFailed=false | |
INFO 2017-12-06 17:44:03,522 [main] com.mesosphere.sdk.scheduler.plan.DeploymentStep:setStatus(65): kube-node-0:[kube-proxy, kubelet]: changed status from: PENDING to: COMPLETE (interrupted=false) | |
INFO 2017-12-06 17:44:03,522 [main] com.mesosphere.sdk.scheduler.plan.DefaultStepFactory:getStep(40): Generating step for pod: kube-node-1, with tasks: [kube-proxy, kubelet] | |
INFO 2017-12-06 17:44:03,523 [main] com.mesosphere.sdk.scheduler.plan.DeploymentStep:setStatus(65): kube-node-1:[kube-proxy, kubelet]: changed status from: PENDING to: PENDING (interrupted=false) | |
INFO 2017-12-06 17:44:03,523 [main] com.mesosphere.sdk.scheduler.plan.DefaultStepFactory:getStatus(137): Deployment of task 'kube-node-1-kube-proxy' is COMPLETE: onTarget=true reachedGoal=true permanentlyFailed=false | |
INFO 2017-12-06 17:44:03,524 [main] com.mesosphere.sdk.scheduler.plan.DefaultStepFactory:getStatus(137): Deployment of task 'kube-node-1-kubelet' is COMPLETE: onTarget=true reachedGoal=true permanentlyFailed=false | |
INFO 2017-12-06 17:44:03,524 [main] com.mesosphere.sdk.scheduler.plan.DeploymentStep:setStatus(65): kube-node-1:[kube-proxy, kubelet]: changed status from: PENDING to: COMPLETE (interrupted=false) | |
INFO 2017-12-06 17:44:03,524 [main] com.mesosphere.sdk.scheduler.plan.DefaultStepFactory:getStep(40): Generating step for pod: mandatory-addons-0, with tasks: [kube-dns] | |
INFO 2017-12-06 17:44:03,524 [main] com.mesosphere.sdk.scheduler.plan.DeploymentStep:setStatus(65): mandatory-addons-0:[kube-dns]: changed status from: PENDING to: PENDING (interrupted=false) | |
INFO 2017-12-06 17:44:03,525 [main] com.mesosphere.sdk.scheduler.plan.DefaultStepFactory:getStatus(127): Automatically on target configuration due to having reached FINISHED goal. | |
INFO 2017-12-06 17:44:03,525 [main] com.mesosphere.sdk.scheduler.plan.DefaultStepFactory:getStatus(137): Deployment of task 'mandatory-addons-0-kube-dns' is COMPLETE: onTarget=true reachedGoal=true permanentlyFailed=false | |
INFO 2017-12-06 17:44:03,525 [main] com.mesosphere.sdk.scheduler.plan.DeploymentStep:setStatus(65): mandatory-addons-0:[kube-dns]: changed status from: PENDING to: COMPLETE (interrupted=false) | |
INFO 2017-12-06 17:44:03,525 [main] com.mesosphere.sdk.scheduler.plan.DefaultStepFactory:getStep(40): Generating step for pod: mandatory-addons-0, with tasks: [heapster] | |
INFO 2017-12-06 17:44:03,526 [main] com.mesosphere.sdk.scheduler.plan.DeploymentStep:setStatus(65): mandatory-addons-0:[heapster]: changed status from: PENDING to: PENDING (interrupted=false) | |
INFO 2017-12-06 17:44:03,526 [main] com.mesosphere.sdk.scheduler.plan.DefaultStepFactory:getStatus(127): Automatically on target configuration due to having reached FINISHED goal. | |
INFO 2017-12-06 17:44:03,526 [main] com.mesosphere.sdk.scheduler.plan.DefaultStepFactory:getStatus(137): Deployment of task 'mandatory-addons-0-heapster' is COMPLETE: onTarget=true reachedGoal=true permanentlyFailed=false | |
INFO 2017-12-06 17:44:03,526 [main] com.mesosphere.sdk.scheduler.plan.DeploymentStep:setStatus(65): mandatory-addons-0:[heapster]: changed status from: PENDING to: COMPLETE (interrupted=false) | |
INFO 2017-12-06 17:44:03,526 [main] com.mesosphere.sdk.scheduler.plan.DefaultStepFactory:getStep(40): Generating step for pod: mandatory-addons-0, with tasks: [dashboard] | |
INFO 2017-12-06 17:44:03,527 [main] com.mesosphere.sdk.scheduler.plan.DeploymentStep:setStatus(65): mandatory-addons-0:[dashboard]: changed status from: PENDING to: PENDING (interrupted=false) | |
INFO 2017-12-06 17:44:03,527 [main] com.mesosphere.sdk.scheduler.plan.DefaultStepFactory:getStatus(127): Automatically on target configuration due to having reached FINISHED goal. | |
INFO 2017-12-06 17:44:03,527 [main] com.mesosphere.sdk.scheduler.plan.DefaultStepFactory:getStatus(137): Deployment of task 'mandatory-addons-0-dashboard' is COMPLETE: onTarget=true reachedGoal=true permanentlyFailed=false | |
INFO 2017-12-06 17:44:03,527 [main] com.mesosphere.sdk.scheduler.plan.DeploymentStep:setStatus(65): mandatory-addons-0:[dashboard]: changed status from: PENDING to: COMPLETE (interrupted=false) | |
INFO 2017-12-06 17:44:03,528 [main] com.mesosphere.sdk.scheduler.SchedulerBuilder:getPlans(375): Got 3 YAML plans: [replace, update, deploy] | |
INFO 2017-12-06 17:44:03,534 [main] com.mesosphere.sdk.scheduler.SchedulerBuilder:getDefaultScheduler(276): Marking deployment as having been previously completed | |
INFO 2017-12-06 17:44:03,539 [main] com.mesosphere.sdk.scheduler.SchedulerBuilder:updateConfig(461): Updating config with 8 validators... | |
INFO 2017-12-06 17:44:03,540 [main] com.mesosphere.sdk.config.DefaultConfigurationUpdater:updateConfiguration(66): Loading current target configuration: dd27fcb7-4095-4c86-8f9d-b5aa708a6ca5 | |
INFO 2017-12-06 17:44:03,624 [main] com.mesosphere.sdk.config.DefaultConfigurationUpdater:updateConfiguration(78): New prospective config: | |
{ | |
"name" : "kubernetes", | |
"role" : "kubernetes-role", | |
"principal" : "kubernetes", | |
"web-url" : null, | |
"zookeeper" : "master.mesos:2181", | |
"pod-specs" : [ { | |
"type" : "etcd", | |
"user" : "root", | |
"count" : 3, | |
"image" : null, | |
"networks" : [ ], | |
"rlimits" : [ ], | |
"uris" : [ "https://storage.googleapis.com/etcd/v3.2.9/etcd-v3.2.9-linux-amd64.tar.gz", "https://s3.eu-central-1.amazonaws.com/dcos-kubernetes-test/executor.zip" ], | |
"task-specs" : [ { | |
"name" : "peer", | |
"goal" : "RUNNING", | |
"resource-set" : { | |
"id" : "etcd", | |
"resource-specifications" : [ { | |
"@type" : "DefaultResourceSpec", | |
"name" : "cpus", | |
"value" : { | |
"type" : "SCALAR", | |
"scalar" : { | |
"value" : 0.5 | |
}, | |
"ranges" : null, | |
"set" : null, | |
"text" : null | |
}, | |
"role" : "kubernetes-role", | |
"pre-reserved-role" : "*", | |
"principal" : "kubernetes" | |
}, { | |
"@type" : "DefaultResourceSpec", | |
"name" : "mem", | |
"value" : { | |
"type" : "SCALAR", | |
"scalar" : { | |
"value" : 1024.0 | |
}, | |
"ranges" : null, | |
"set" : null, | |
"text" : null | |
}, | |
"role" : "kubernetes-role", | |
"pre-reserved-role" : "*", | |
"principal" : "kubernetes" | |
}, { | |
"@type" : "NamedVIPSpec", | |
"value" : { | |
"type" : "RANGES", | |
"scalar" : null, | |
"ranges" : { | |
"range" : [ { | |
"begin" : 2380, | |
"end" : 2380 | |
} ] | |
}, | |
"set" : null, | |
"text" : null | |
}, | |
"role" : "kubernetes-role", | |
"pre-reserved-role" : "*", | |
"principal" : "kubernetes", | |
"env-key" : "ETCD_LISTEN_PEER_PORT", | |
"port-name" : "peer", | |
"protocol" : "tcp", | |
"visibility" : "CLUSTER", | |
"vip-name" : "etcd-peer", | |
"vip-port" : 2380, | |
"network-names" : [ ], | |
"name" : "ports" | |
}, { | |
"@type" : "NamedVIPSpec", | |
"value" : { | |
"type" : "RANGES", | |
"scalar" : null, | |
"ranges" : { | |
"range" : [ { | |
"begin" : 2379, | |
"end" : 2379 | |
} ] | |
}, | |
"set" : null, | |
"text" : null | |
}, | |
"role" : "kubernetes-role", | |
"pre-reserved-role" : "*", | |
"principal" : "kubernetes", | |
"env-key" : "ETCD_LISTEN_CLIENT_PORT", | |
"port-name" : "client", | |
"protocol" : "tcp", | |
"visibility" : "CLUSTER", | |
"vip-name" : "etcd", | |
"vip-port" : 2379, | |
"network-names" : [ ], | |
"name" : "ports" | |
} ], | |
"volume-specifications" : [ ], | |
"role" : "kubernetes-role", | |
"principal" : "kubernetes" | |
}, | |
"command-spec" : { | |
"value" : "printf \"\\n\\n ###### Starting etcd -- ${TASK_NAME} ###### \\n\"\n\n# In a graceful shutdown, we remove the peer from the cluster\nterminated () {\n\n printf \"Removing member etcd-$POD_INSTANCE_INDEX-peer \\n\"\n\n DEAD_PEER_ID=$(\n ./etcd-v3.2.9-linux-amd64/etcdctl \\\n --cert-file etcd-crt.pem \\\n --key-file etcd-key.pem \\\n --ca-file ca-crt.pem \\\n --endpoints=https://etcd.kubernetes.l4lb.thisdcos.directory:$ETCD_LISTEN_CLIENT_PORT member list | awk \"/etcd-${POD_INSTANCE_INDEX}-peer/ { gsub(\\\":\\\", \\\"\\\", \\$1); print \\$1 }\" \\\n )\n\n printf $DEAD_PEER_ID\n\n ./etcd-v3.2.9-linux-amd64/etcdctl \\\n --cert-file etcd-crt.pem \\\n --key-file etcd-key.pem \\\n --ca-file ca-crt.pem \\\n --endpoints https://etcd.kubernetes.l4lb.thisdcos.directory:$ETCD_LISTEN_CLIENT_PORT member remove $DEAD_PEER_ID\n\n rm -rf data-dir/member\n\n printf \"Member etcd-$POD_INSTANCE_INDEX-peer removed!\\n\"\n\n exit 0\n}\n\ntrap terminated SIGTERM EXIT\n\necho \"Trapping etcd SIGTERM and EXIT!\"\n\n./etcd-v3.2.9-linux-amd64/etcd \\\n--name=infra$POD_INSTANCE_INDEX \\\n--cert-file=etcd-crt.pem \\\n--key-file=etcd-key.pem \\\n--client-cert-auth \\\n--trusted-ca-file=ca-crt.pem \\\n--peer-cert-file=etcd-crt.pem \\\n--peer-key-file=etcd-key.pem \\\n--peer-trusted-ca-file=ca-crt.pem \\\n--listen-peer-urls=https://$MESOS_CONTAINER_IP:$ETCD_LISTEN_PEER_PORT \\\n--initial-advertise-peer-urls=https://etcd-$POD_INSTANCE_INDEX-peer.kubernetes.mesos:$ETCD_LISTEN_PEER_PORT \\\n--listen-client-urls=https://$MESOS_CONTAINER_IP:$ETCD_LISTEN_CLIENT_PORT \\\n--advertise-client-urls=https://etcd-$POD_INSTANCE_INDEX-peer.kubernetes.mesos:$ETCD_LISTEN_CLIENT_PORT \\\n--log-output=stdout \\\n--quota-backend-bytes=3221225472 \\\n--election-timeout=5000 \\\n--heartbeat-interval=250 \\\n--initial-cluster infra0=https://etcd-0-peer.kubernetes.mesos:$ETCD_LISTEN_PEER_PORT,infra1=https://etcd-1-peer.kubernetes.mesos:$ETCD_LISTEN_PEER_PORT,infra2=https://etcd-2-peer.kubernetes.mesos:$ETCD_LISTEN_PEER_PORT\n", | |
"environment" : { | |
"ETCD_DATA_DIR" : "data-dir", | |
"ETCD_INITIAL_CLUSTER_TOKEN" : "kubernetes", | |
"ETCD_VERSION" : "v3.2.9", | |
"ETCD_WAL_DIR" : "wal-pv/wal-dir" | |
} | |
}, | |
"health-check-spec" : { | |
"command" : "HEALTHY_PEER=$(./etcd-v3.2.9-linux-amd64/etcdctl --cert-file etcd-crt.pem --key-file etcd-key.pem --ca-file ca-crt.pem --endpoints=https://etcd-$POD_INSTANCE_INDEX-peer.kubernetes.mesos:$ETCD_LISTEN_CLIENT_PORT cluster-health | grep \"etcd-${POD_INSTANCE_INDEX}-peer\" | grep -c 'is healthy') && [ \"$HEALTHY_PEER\" -eq \"1\" ]\n", | |
"max-consecutive-failures" : 4, | |
"delay" : 0, | |
"interval" : 15, | |
"timeout" : 10, | |
"gracePeriod" : 60 | |
}, | |
"readiness-check-spec" : { | |
"command" : "HEALTHY_CLUSTER=$(./etcd-v3.2.9-linux-amd64/etcdctl --cert-file etcd-crt.pem --key-file etcd-key.pem --ca-file ca-crt.pem --endpoints=https://etcd-$POD_INSTANCE_INDEX-peer.kubernetes.mesos:$ETCD_LISTEN_CLIENT_PORT cluster-health | grep '^cluster ' | grep -c 'cluster is healthy') && [ \"$HEALTHY_CLUSTER\" -eq \"1\" ]\n", | |
"delay" : 0, | |
"interval" : 30, | |
"timeout" : 10 | |
}, | |
"config-files" : [ ], | |
"discovery-spec" : null, | |
"kill-grace-period" : 30, | |
"transport-encryption" : [ ] | |
}, { | |
"name" : "recover", | |
"goal" : "FINISHED", | |
"resource-set" : { | |
"id" : "recover-resource-set", | |
"resource-specifications" : [ { | |
"@type" : "DefaultResourceSpec", | |
"name" : "cpus", | |
"value" : { | |
"type" : "SCALAR", | |
"scalar" : { | |
"value" : 0.1 | |
}, | |
"ranges" : null, | |
"set" : null, | |
"text" : null | |
}, | |
"role" : "kubernetes-role", | |
"pre-reserved-role" : "*", | |
"principal" : "kubernetes" | |
}, { | |
"@type" : "DefaultResourceSpec", | |
"name" : "mem", | |
"value" : { | |
"type" : "SCALAR", | |
"scalar" : { | |
"value" : 32.0 | |
}, | |
"ranges" : null, | |
"set" : null, | |
"text" : null | |
}, | |
"role" : "kubernetes-role", | |
"pre-reserved-role" : "*", | |
"principal" : "kubernetes" | |
} ], | |
"volume-specifications" : [ ], | |
"role" : "kubernetes-role", | |
"principal" : "kubernetes" | |
}, | |
"command-spec" : { | |
"value" : "DEAD_PEER_ID=$(\n ./etcd-v3.2.9-linux-amd64/etcdctl \\\n --cert-file etcd-crt.pem \\\n --key-file etcd-key.pem \\\n --ca-file ca-crt.pem \\\n --endpoints=https://etcd.kubernetes.l4lb.thisdcos.directory:$ETCD_LISTEN_CLIENT_PORT member list | awk \"/etcd-${POD_INSTANCE_INDEX}-peer/ { gsub(\\\":\\\", \\\"\\\", \\$1); print \\$1 }\" \\\n)\n./etcd-v3.2.9-linux-amd64/etcdctl --cert-file etcd-crt.pem --key-file etcd-key.pem --ca-file ca-crt.pem --endpoints https://etcd.kubernetes.l4lb.thisdcos.directory:$ETCD_LISTEN_CLIENT_PORT member remove $DEAD_PEER_ID\nrm -rf data-dir/member\n./etcd-v3.2.9-linux-amd64/etcdctl --cert-file etcd-crt.pem --key-file etcd-key.pem --ca-file ca-crt.pem --endpoints https://etcd.kubernetes.l4lb.thisdcos.directory:$ETCD_LISTEN_CLIENT_PORT member add infra$POD_INSTANCE_INDEX https://etcd-$POD_INSTANCE_INDEX-peer.kubernetes.mesos:$ETCD_LISTEN_PEER_PORT\n", | |
"environment" : { | |
"ETCD_LISTEN_CLIENT_PORT" : "2379", | |
"ETCD_LISTEN_PEER_PORT" : "2380", | |
"ETCD_VERSION" : "v3.2.9" | |
} | |
}, | |
"health-check-spec" : null, | |
"readiness-check-spec" : null, | |
"config-files" : [ ], | |
"discovery-spec" : null, | |
"kill-grace-period" : 0, | |
"transport-encryption" : [ ] | |
} ], | |
"placement-rule" : { | |
"@type" : "MaxPerHostnameRule", | |
"max" : 1, | |
"task-filter" : { | |
"@type" : "RegexMatcher", | |
"pattern" : "etcd-.*" | |
} | |
}, | |
"volumes" : [ { | |
"@type" : "DefaultVolumeSpec", | |
"type" : "ROOT", | |
"container-path" : "data-dir", | |
"name" : "disk", | |
"value" : { | |
"type" : "SCALAR", | |
"scalar" : { | |
"value" : 3072.0 | |
}, | |
"ranges" : null, | |
"set" : null, | |
"text" : null | |
}, | |
"role" : "kubernetes-role", | |
"pre-reserved-role" : "*", | |
"principal" : "kubernetes" | |
}, { | |
"@type" : "DefaultVolumeSpec", | |
"type" : "ROOT", | |
"container-path" : "wal-pv", | |
"name" : "disk", | |
"value" : { | |
"type" : "SCALAR", | |
"scalar" : { | |
"value" : 512.0 | |
}, | |
"ranges" : null, | |
"set" : null, | |
"text" : null | |
}, | |
"role" : "kubernetes-role", | |
"pre-reserved-role" : "*", | |
"principal" : "kubernetes" | |
} ], | |
"pre-reserved-role" : "*", | |
"secrets" : [ { | |
"secret" : "kubernetes/ca_crt__tmp", | |
"env-key" : null, | |
"file" : "ca-crt.pem" | |
}, { | |
"secret" : "kubernetes/etcd_chain__tmp", | |
"env-key" : null, | |
"file" : "etcd-crt.pem" | |
}, { | |
"secret" : "kubernetes/etcd_private_key__tmp", | |
"env-key" : null, | |
"file" : "etcd-key.pem" | |
} ], | |
"share-pid-namespace" : false, | |
"allow-decommission" : true | |
}, { | |
"type" : "kube-apiserver", | |
"user" : "root", | |
"count" : 3, | |
"image" : null, | |
"networks" : [ ], | |
"rlimits" : [ ], | |
"uris" : [ "https://s3.eu-central-1.amazonaws.com/dcos-kubernetes-test/bootstrap.zip", "https://storage.googleapis.com/kubernetes-release/release/v1.7.11/bin/linux/amd64/kube-apiserver", "https://s3.eu-central-1.amazonaws.com/dcos-kubernetes-test/executor.zip" ], | |
"task-specs" : [ { | |
"name" : "instance", | |
"goal" : "RUNNING", | |
"resource-set" : { | |
"id" : "instance-resource-set", | |
"resource-specifications" : [ { | |
"@type" : "DefaultResourceSpec", | |
"name" : "cpus", | |
"value" : { | |
"type" : "SCALAR", | |
"scalar" : { | |
"value" : 0.5 | |
}, | |
"ranges" : null, | |
"set" : null, | |
"text" : null | |
}, | |
"role" : "kubernetes-role", | |
"pre-reserved-role" : "*", | |
"principal" : "kubernetes" | |
}, { | |
"@type" : "DefaultResourceSpec", | |
"name" : "mem", | |
"value" : { | |
"type" : "SCALAR", | |
"scalar" : { | |
"value" : 1024.0 | |
}, | |
"ranges" : null, | |
"set" : null, | |
"text" : null | |
}, | |
"role" : "kubernetes-role", | |
"pre-reserved-role" : "*", | |
"principal" : "kubernetes" | |
}, { | |
"@type" : "NamedVIPSpec", | |
"value" : { | |
"type" : "RANGES", | |
"scalar" : null, | |
"ranges" : { | |
"range" : [ { | |
"begin" : 6443, | |
"end" : 6443 | |
} ] | |
}, | |
"set" : null, | |
"text" : null | |
}, | |
"role" : "kubernetes-role", | |
"pre-reserved-role" : "*", | |
"principal" : "kubernetes", | |
"env-key" : "KUBE_APISERVER_PORT", | |
"port-name" : "apiserver", | |
"protocol" : "tcp", | |
"visibility" : "CLUSTER", | |
"vip-name" : "apiserver", | |
"vip-port" : 6443, | |
"network-names" : [ ], | |
"name" : "ports" | |
}, { | |
"@type" : "NamedVIPSpec", | |
"value" : { | |
"type" : "RANGES", | |
"scalar" : null, | |
"ranges" : { | |
"range" : [ { | |
"begin" : 9000, | |
"end" : 9000 | |
} ] | |
}, | |
"set" : null, | |
"text" : null | |
}, | |
"role" : "kubernetes-role", | |
"pre-reserved-role" : "*", | |
"principal" : "kubernetes", | |
"env-key" : "KUBE_APISERVER_INSECURE_PORT", | |
"port-name" : "apiserver_insecure", | |
"protocol" : "tcp", | |
"visibility" : "CLUSTER", | |
"vip-name" : "apiserver-insecure", | |
"vip-port" : 9000, | |
"network-names" : [ ], | |
"name" : "ports" | |
} ], | |
"volume-specifications" : [ ], | |
"role" : "kubernetes-role", | |
"principal" : "kubernetes" | |
}, | |
"command-spec" : { | |
"value" : "\nchmod +x kube-apiserver\nprintf \"\\n\\n ###### Starting Kube API SERVER -- ${TASK_NAME} ###### \\n\"\n./kube-apiserver --etcd-servers=https://etcd-0-peer.kubernetes.mesos:2379,https://etcd-1-peer.kubernetes.mesos:2379,https://etcd-2-peer.kubernetes.mesos:2379 --etcd-cafile=ca-crt.pem --etcd-certfile=kube-apiserver-crt.pem --etcd-keyfile=kube-apiserver-key.pem --etcd-prefix=\"/registry/cluster-0\" --etcd-quorum-read --bind-address=$MESOS_CONTAINER_IP --insecure-bind-address=$MESOS_CONTAINER_IP --insecure-port=9000 --secure-port=6443 --apiserver-count=3 --allow-privileged --service-cluster-ip-range=10.100.0.0/16 --authorization-mode=AlwaysAllow --admission-control=NamespaceLifecycle,LimitRanger,ServiceAccount,PersistentVolumeLabel,DefaultStorageClass,ResourceQuota,DefaultTolerationSeconds --runtime-config=batch/v2alpha1=true,admissionregistration.k8s.io/v1alpha1=true --service-account-key-file=service-account-key.pem --tls-ca-file=ca-crt.pem --tls-cert-file=kube-apiserver-crt.pem --tls-private-key-file=kube-apiserver-key.pem --client-ca-file=ca-crt.pem --target-ram-mb=1024 2>&1\n", | |
"environment" : { | |
"KUBERNETES_VERSION" : "v1.7.11" | |
} | |
}, | |
"health-check-spec" : { | |
"command" : "HTTP_CODE=$(/opt/mesosphere/bin/curl --silent --output /dev/null --fail --write-out \"%{http_code}\" --cert kube-apiserver-crt.pem --key kube-apiserver-key.pem --cacert ca-crt.pem https://kube-apiserver-$POD_INSTANCE_INDEX-instance.kubernetes.mesos:6443/healthz) && [ \"$HTTP_CODE\" -eq \"200\" ]\n", | |
"max-consecutive-failures" : 3, | |
"delay" : 0, | |
"interval" : 15, | |
"timeout" : 10, | |
"gracePeriod" : 30 | |
}, | |
"readiness-check-spec" : null, | |
"config-files" : [ ], | |
"discovery-spec" : null, | |
"kill-grace-period" : 0, | |
"transport-encryption" : [ ] | |
} ], | |
"placement-rule" : { | |
"@type" : "MaxPerHostnameRule", | |
"max" : 1, | |
"task-filter" : { | |
"@type" : "RegexMatcher", | |
"pattern" : "kube-apiserver-.*" | |
} | |
}, | |
"volumes" : [ ], | |
"pre-reserved-role" : "*", | |
"secrets" : [ { | |
"secret" : "kubernetes/service_account_private_key__tmp", | |
"env-key" : null, | |
"file" : "service-account-key.pem" | |
}, { | |
"secret" : "kubernetes/ca_crt__tmp", | |
"env-key" : null, | |
"file" : "ca-crt.pem" | |
}, { | |
"secret" : "kubernetes/kube_apiserver_chain__tmp", | |
"env-key" : null, | |
"file" : "kube-apiserver-crt.pem" | |
}, { | |
"secret" : "kubernetes/kube_apiserver_private_key__tmp", | |
"env-key" : null, | |
"file" : "kube-apiserver-key.pem" | |
} ], | |
"share-pid-namespace" : false, | |
"allow-decommission" : true | |
}, { | |
"type" : "kube-controller-manager", | |
"user" : "root", | |
"count" : 3, | |
"image" : null, | |
"networks" : [ ], | |
"rlimits" : [ ], | |
"uris" : [ "https://s3.eu-central-1.amazonaws.com/dcos-kubernetes-test/bootstrap.zip", "https://storage.googleapis.com/kubernetes-release/release/v1.7.11/bin/linux/amd64/kube-controller-manager", "https://s3.eu-central-1.amazonaws.com/dcos-kubernetes-test/executor.zip" ], | |
"task-specs" : [ { | |
"name" : "instance", | |
"goal" : "RUNNING", | |
"resource-set" : { | |
"id" : "instance-resource-set", | |
"resource-specifications" : [ { | |
"@type" : "DefaultResourceSpec", | |
"name" : "cpus", | |
"value" : { | |
"type" : "SCALAR", | |
"scalar" : { | |
"value" : 0.5 | |
}, | |
"ranges" : null, | |
"set" : null, | |
"text" : null | |
}, | |
"role" : "kubernetes-role", | |
"pre-reserved-role" : "*", | |
"principal" : "kubernetes" | |
}, { | |
"@type" : "DefaultResourceSpec", | |
"name" : "mem", | |
"value" : { | |
"type" : "SCALAR", | |
"scalar" : { | |
"value" : 512.0 | |
}, | |
"ranges" : null, | |
"set" : null, | |
"text" : null | |
}, | |
"role" : "kubernetes-role", | |
"pre-reserved-role" : "*", | |
"principal" : "kubernetes" | |
} ], | |
"volume-specifications" : [ ], | |
"role" : "kubernetes-role", | |
"principal" : "kubernetes" | |
}, | |
"command-spec" : { | |
"value" : "./bootstrap --resolve=false 2>&1\nchmod +x kube-controller-manager\nprintf \"\\n\\n ###### Starting Kube CONTROLLER MANAGER -- ${TASK_NAME} ###### \\n\"\n./kube-controller-manager --address=$MESOS_CONTAINER_IP --port=10252 --kubeconfig=kubeconfig.yaml --root-ca-file=ca-crt.pem --service-account-private-key-file=service-account-key.pem --leader-elect 2>&1\n", | |
"environment" : { | |
"KUBERNETES_VERSION" : "v1.7.11", | |
"USERNAME" : "kube-controller-manager" | |
} | |
}, | |
"health-check-spec" : { | |
"command" : "HTTP_CODE=$(/opt/mesosphere/bin/curl --silent --output /dev/null --fail --write-out \"%{http_code}\" http://$MESOS_CONTAINER_IP:10252/healthz) && [ \"$HTTP_CODE\" -eq \"200\" ]\n", | |
"max-consecutive-failures" : 3, | |
"delay" : 0, | |
"interval" : 15, | |
"timeout" : 10, | |
"gracePeriod" : 30 | |
}, | |
"readiness-check-spec" : { | |
"command" : "HTTP_CODE=$(/opt/mesosphere/bin/curl --silent --output /dev/null --fail --write-out \"%{http_code}\" http://apiserver-insecure.kubernetes.l4lb.thisdcos.directory:9000) && [ \"$HTTP_CODE\" -eq \"200\" ]\n", | |
"delay" : 0, | |
"interval" : 5, | |
"timeout" : 10 | |
}, | |
"config-files" : [ { | |
"name" : "kubeconfig", | |
"relative-path" : "kubeconfig.yaml", | |
"template-content" : "apiVersion: v1\nkind: Config\nclusters:\n- name: local\n cluster:\n certificate-authority: ca-crt.pem\n server: https://apiserver.kubernetes.l4lb.thisdcos.directory:6443\nusers:\n- name: {{USERNAME}}\n user:\n client-certificate: {{USERNAME}}-crt.pem\n client-key: {{USERNAME}}-key.pem\ncontexts:\n- context:\n cluster: local\n user: {{USERNAME}}\n name: default\ncurrent-context: default\n" | |
} ], | |
"discovery-spec" : null, | |
"kill-grace-period" : 0, | |
"transport-encryption" : [ ] | |
} ], | |
"placement-rule" : { | |
"@type" : "MaxPerHostnameRule", | |
"max" : 1, | |
"task-filter" : { | |
"@type" : "RegexMatcher", | |
"pattern" : "kube-controller-manager-.*" | |
} | |
}, | |
"volumes" : [ ], | |
"pre-reserved-role" : "*", | |
"secrets" : [ { | |
"secret" : "kubernetes/service_account_private_key__tmp", | |
"env-key" : null, | |
"file" : "service-account-key.pem" | |
}, { | |
"secret" : "kubernetes/ca_crt__tmp", | |
"env-key" : null, | |
"file" : "ca-crt.pem" | |
}, { | |
"secret" : "kubernetes/kube_controller_manager_chain__tmp", | |
"env-key" : null, | |
"file" : "kube-controller-manager-crt.pem" | |
}, { | |
"secret" : "kubernetes/kube_controller_manager_private_key__tmp", | |
"env-key" : null, | |
"file" : "kube-controller-manager-key.pem" | |
} ], | |
"share-pid-namespace" : false, | |
"allow-decommission" : true | |
}, { | |
"type" : "kube-scheduler", | |
"user" : "root", | |
"count" : 3, | |
"image" : null, | |
"networks" : [ ], | |
"rlimits" : [ ], | |
"uris" : [ "https://s3.eu-central-1.amazonaws.com/dcos-kubernetes-test/bootstrap.zip", "https://storage.googleapis.com/kubernetes-release/release/v1.7.11/bin/linux/amd64/kube-scheduler", "https://s3.eu-central-1.amazonaws.com/dcos-kubernetes-test/executor.zip" ], | |
"task-specs" : [ { | |
"name" : "instance", | |
"goal" : "RUNNING", | |
"resource-set" : { | |
"id" : "instance-resource-set", | |
"resource-specifications" : [ { | |
"@type" : "DefaultResourceSpec", | |
"name" : "cpus", | |
"value" : { | |
"type" : "SCALAR", | |
"scalar" : { | |
"value" : 0.5 | |
}, | |
"ranges" : null, | |
"set" : null, | |
"text" : null | |
}, | |
"role" : "kubernetes-role", | |
"pre-reserved-role" : "*", | |
"principal" : "kubernetes" | |
}, { | |
"@type" : "DefaultResourceSpec", | |
"name" : "mem", | |
"value" : { | |
"type" : "SCALAR", | |
"scalar" : { | |
"value" : 512.0 | |
}, | |
"ranges" : null, | |
"set" : null, | |
"text" : null | |
}, | |
"role" : "kubernetes-role", | |
"pre-reserved-role" : "*", | |
"principal" : "kubernetes" | |
} ], | |
"volume-specifications" : [ ], | |
"role" : "kubernetes-role", | |
"principal" : "kubernetes" | |
}, | |
"command-spec" : { | |
"value" : "./bootstrap --resolve=false 2>&1\nchmod +x kube-scheduler\nprintf \"\\n\\n ###### Starting Kube SCHEDULER -- ${TASK_NAME} ###### \\n\"\n./kube-scheduler --address=$MESOS_CONTAINER_IP --kubeconfig=kubeconfig.yaml --leader-elect --kube-api-burst=120 --kube-api-qps=80 2>&1\n", | |
"environment" : { | |
"KUBERNETES_VERSION" : "v1.7.11", | |
"USERNAME" : "kube-scheduler" | |
} | |
}, | |
"health-check-spec" : { | |
"command" : "HTTP_CODE=$(/opt/mesosphere/bin/curl --silent --output /dev/null --fail --write-out \"%{http_code}\" http://$MESOS_CONTAINER_IP:10251/healthz) && [ \"$HTTP_CODE\" -eq \"200\" ]\n", | |
"max-consecutive-failures" : 3, | |
"delay" : 0, | |
"interval" : 15, | |
"timeout" : 10, | |
"gracePeriod" : 30 | |
}, | |
"readiness-check-spec" : null, | |
"config-files" : [ { | |
"name" : "kubeconfig", | |
"relative-path" : "kubeconfig.yaml", | |
"template-content" : "apiVersion: v1\nkind: Config\nclusters:\n- name: local\n cluster:\n certificate-authority: ca-crt.pem\n server: https://apiserver.kubernetes.l4lb.thisdcos.directory:6443\nusers:\n- name: {{USERNAME}}\n user:\n client-certificate: {{USERNAME}}-crt.pem\n client-key: {{USERNAME}}-key.pem\ncontexts:\n- context:\n cluster: local\n user: {{USERNAME}}\n name: default\ncurrent-context: default\n" | |
} ], | |
"discovery-spec" : null, | |
"kill-grace-period" : 0, | |
"transport-encryption" : [ ] | |
} ], | |
"placement-rule" : { | |
"@type" : "MaxPerHostnameRule", | |
"max" : 1, | |
"task-filter" : { | |
"@type" : "RegexMatcher", | |
"pattern" : "kube-scheduler-.*" | |
} | |
}, | |
"volumes" : [ ], | |
"pre-reserved-role" : "*", | |
"secrets" : [ { | |
"secret" : "kubernetes/ca_crt__tmp", | |
"env-key" : null, | |
"file" : "ca-crt.pem" | |
}, { | |
"secret" : "kubernetes/kube_scheduler_chain__tmp", | |
"env-key" : null, | |
"file" : "kube-scheduler-crt.pem" | |
}, { | |
"secret" : "kubernetes/kube_scheduler_private_key__tmp", | |
"env-key" : null, | |
"file" : "kube-scheduler-key.pem" | |
} ], | |
"share-pid-namespace" : false, | |
"allow-decommission" : true | |
}, { | |
"type" : "kube-node", | |
"user" : "root", | |
"count" : 1, | |
"image" : null, | |
"networks" : [ ], | |
"rlimits" : [ ], | |
"uris" : [ "https://s3.eu-central-1.amazonaws.com/dcos-kubernetes-test/bootstrap.zip", "https://download.docker.com/linux/static/stable/x86_64/docker-17.09.0-ce.tgz", "https://downloads.mesosphere.com/kubernetes/socat/socat.d-1.7.3.2-2.fc26.tar.gz", "https://storage.googleapis.com/kubernetes-release/release/v1.7.11/bin/linux/amd64/kube-proxy", "https://storage.googleapis.com/kubernetes-release/release/v1.7.11/bin/linux/amd64/kubelet", "https://storage.googleapis.com/kubernetes-release/release/v1.7.11/bin/linux/amd64/kubectl", "https://infinity-artifacts.s3.amazonaws.com/autodelete7d/kubernetes/20171206-172217-Z5QGImH7dVzO3lwz/resource-container.gz", "https://s3.eu-central-1.amazonaws.com/dcos-kubernetes-test/executor.zip" ], | |
"task-specs" : [ { | |
"name" : "kube-proxy", | |
"goal" : "RUNNING", | |
"resource-set" : { | |
"id" : "kube-proxy-resource-set", | |
"resource-specifications" : [ { | |
"@type" : "DefaultResourceSpec", | |
"name" : "cpus", | |
"value" : { | |
"type" : "SCALAR", | |
"scalar" : { | |
"value" : 0.1 | |
}, | |
"ranges" : null, | |
"set" : null, | |
"text" : null | |
}, | |
"role" : "kubernetes-role", | |
"pre-reserved-role" : "*", | |
"principal" : "kubernetes" | |
}, { | |
"@type" : "DefaultResourceSpec", | |
"name" : "mem", | |
"value" : { | |
"type" : "SCALAR", | |
"scalar" : { | |
"value" : 512.0 | |
}, | |
"ranges" : null, | |
"set" : null, | |
"text" : null | |
}, | |
"role" : "kubernetes-role", | |
"pre-reserved-role" : "*", | |
"principal" : "kubernetes" | |
} ], | |
"volume-specifications" : [ ], | |
"role" : "kubernetes-role", | |
"principal" : "kubernetes" | |
}, | |
"command-spec" : { | |
"value" : "./bootstrap --resolve=false 2>&1\nchmod +x kube-proxy\nprintf \"\\n\\n ###### Starting Kube PROXY -- ${TASK_NAME} ###### \\n\"\n./kube-proxy --hostname-override=kube-node-$POD_INSTANCE_INDEX-kube-proxy.kubernetes.mesos --bind-address=127.0.0.1 --kubeconfig=kubeconfig.yaml --resource-container=\"\" --healthz-port=0 2>&1\n", | |
"environment" : { | |
"KUBERNETES_VERSION" : "v1.7.11", | |
"PATH" : "/usr/local/sbin:/usr/local/bin:/usr/sbin:/usr/bin:/sbin:/bin:/opt/bin:/opt/mesosphere/bin/dcos-path", | |
"USERNAME" : "kube-node" | |
} | |
}, | |
"health-check-spec" : null, | |
"readiness-check-spec" : null, | |
"config-files" : [ { | |
"name" : "kubeconfig", | |
"relative-path" : "kubeconfig.yaml", | |
"template-content" : "apiVersion: v1\nkind: Config\nclusters:\n- name: local\n cluster:\n certificate-authority: ca-crt.pem\n server: https://apiserver.kubernetes.l4lb.thisdcos.directory:6443\nusers:\n- name: {{USERNAME}}\n user:\n client-certificate: {{USERNAME}}-crt.pem\n client-key: {{USERNAME}}-key.pem\ncontexts:\n- context:\n cluster: local\n user: {{USERNAME}}\n name: default\ncurrent-context: default\n" | |
} ], | |
"discovery-spec" : null, | |
"kill-grace-period" : 0, | |
"transport-encryption" : [ ] | |
}, { | |
"name" : "kubelet", | |
"goal" : "RUNNING", | |
"resource-set" : { | |
"id" : "kube-node-kubelet", | |
"resource-specifications" : [ { | |
"@type" : "DefaultResourceSpec", | |
"name" : "cpus", | |
"value" : { | |
"type" : "SCALAR", | |
"scalar" : { | |
"value" : 3.0 | |
}, | |
"ranges" : null, | |
"set" : null, | |
"text" : null | |
}, | |
"role" : "kubernetes-role", | |
"pre-reserved-role" : "*", | |
"principal" : "kubernetes" | |
}, { | |
"@type" : "DefaultResourceSpec", | |
"name" : "mem", | |
"value" : { | |
"type" : "SCALAR", | |
"scalar" : { | |
"value" : 4100.0 | |
}, | |
"ranges" : null, | |
"set" : null, | |
"text" : null | |
}, | |
"role" : "kubernetes-role", | |
"pre-reserved-role" : "*", | |
"principal" : "kubernetes" | |
} ], | |
"volume-specifications" : [ ], | |
"role" : "kubernetes-role", | |
"principal" : "kubernetes" | |
}, | |
"command-spec" : { | |
"value" : "./bootstrap --resolve=false 2>&1\n\nchmod +x kubelet-wrapper.sh\n\nprintf \"\\n\\n ###### Starting Kubelet -- ${TASK_NAME} ###### \\n\"\n\n./kubelet-wrapper.sh 2>&1\n", | |
"environment" : { | |
"FRAMEWORK_NAME" : "", | |
"KUBERNETES_NODE_DOCKER_VERSION" : "17.09.0", | |
"KUBE_ALLOCATABLE_CPUS" : "2", | |
"KUBE_ALLOCATABLE_MEM" : "2048", | |
"KUBE_RESERVED_CPUS" : "1", | |
"KUBE_RESERVED_MEM" : "2052", | |
"PATH" : "/usr/local/sbin:/usr/local/bin:/usr/sbin:/usr/bin:/sbin:/bin:/opt/bin:/opt/mesosphere/bin/dcos-path", | |
"PAUSE_DOCKER_IMAGE" : "gcr.io/google_containers/pause-amd64:3.0", | |
"USERNAME" : "kube-node" | |
} | |
}, | |
"health-check-spec" : { | |
"command" : "HTTP_CODE=$(/opt/mesosphere/bin/curl --silent --output /dev/null --fail --write-out \"%{http_code}\" http://$MESOS_CONTAINER_IP:10258/healthz) && [ \"$HTTP_CODE\" -eq \"200\" ]\n", | |
"max-consecutive-failures" : 3, | |
"delay" : 0, | |
"interval" : 15, | |
"timeout" : 10, | |
"gracePeriod" : 30 | |
}, | |
"readiness-check-spec" : null, | |
"config-files" : [ { | |
"name" : "kubeconfig", | |
"relative-path" : "kubeconfig.yaml", | |
"template-content" : "apiVersion: v1\nkind: Config\nclusters:\n- name: local\n cluster:\n certificate-authority: ca-crt.pem\n server: https://apiserver.kubernetes.l4lb.thisdcos.directory:6443\nusers:\n- name: {{USERNAME}}\n user:\n client-certificate: {{USERNAME}}-crt.pem\n client-key: {{USERNAME}}-key.pem\ncontexts:\n- context:\n cluster: local\n user: {{USERNAME}}\n name: default\ncurrent-context: default\n" | |
}, { | |
"name" : "kubelet-wrapper.sh", | |
"relative-path" : "kubelet-wrapper.sh", | |
"template-content" : "#!/bin/bash\n\nset -e\n\n# In a graceful shutdown, we delete the node from the cluster\nterminated () {\n chmod +x kubectl\n\n printf \"Deleting node kube-node-$POD_INSTANCE_INDEX-kubelet.$FRAMEWORK_NAME.mesos \\n\"\n ./kubectl --kubeconfig=kubeconfig.yaml delete node kube-node-$POD_INSTANCE_INDEX-kubelet.$FRAMEWORK_NAME.mesos\n\n exit 0\n}\n\ntrap terminated SIGTERM EXIT\necho \"Trapping SIGTERM and EXIT!\"\n\nprintf \"Configuring task environment...\\n\"\n\n[ -z \"$KUBE_ALLOCATABLE_CPUS\" ] && (printf \"Error: KUBE_ALLOCATABLE_CPUS not set or empty!\" >&2 ; exit 1)\n[ -z \"$KUBE_ALLOCATABLE_MEM\" ] && (printf \"Error: KUBELET_ALLOCATABLE_MEM not set or empty!\" >&2 ; exit 1)\n[ -z \"$KUBE_RESERVED_CPUS\" ] && (printf \"Error: KUBE_RESERVED_CPUS not set or empty!\" >&2 ; exit 1)\n[ -z \"$KUBE_RESERVED_MEM\" ] && (printf \"Error: KUBELET_RESERVED_MEM not set or empty!\" >&2 ; exit 1)\n\n# use the DC/OS proxy for dockerd and the kubelet\n# reading this file from /opt/mesosphere is a hack. Ideally these vars would be injected by mesos\nset -o allexport\nsource /opt/mesosphere/etc/proxy.env\nset +o allexport\n\n# The kubelet sees all of the host resources.\n# To override the resources it will advertise, we set the allocatable resources\n# as follows:\n# - reserved_cpu:\n# read total cpu available\n# subtract amount configured by user\n# convert to millicores format expected by kubelet\n# - reserved_mem is calculated as follows:\n# read total system memory in Kb\n# subtract memory the user configured (in Mb)\n#\nSYSTEM_RESERVED_CPUS=$(lscpu | awk -v requested=$KUBE_ALLOCATABLE_CPUS -v reserved=$KUBE_RESERVED_CPUS '/^CPU\\(s\\)/ {print ($NF - requested - reserved) * 1000}')\nSYSTEM_RESERVED_MEM=$(awk -v requested=$KUBE_ALLOCATABLE_MEM -v reserved=$KUBE_RESERVED_MEM '/MemTotal/ {print int(($2 - requested * 1000 - reserved * 1000))}' /proc/meminfo)\nCGROUP_ROOT=$(grep memory /proc/self/cgroup | cut -d: -f3)\n\n[ -z \"$CGROUP_ROOT\" ] && (printf \"Error: Unable to find CGROUP_ROOT!\" >&2 ; exit 1)\n\n# Docker must run with a few special arguments.\n# data-root is important, because it seems /var/lib/docker\n# mount bind we will be doing below doesn't work (at least)\n# on CentOS 7. FYI, it works fine (at least) on CoreOS.\nDOCKER_ARGS=(\n --iptables=false\n --ip-masq=false\n --cgroup-parent=${CGROUP_ROOT}\n --data-root=var/new/lib/docker\n)\n\n# For now, we enforce Docker storage driver to overlay2.\nDOCKER_ARGS+=(\n --storage-driver=overlay2\n --storage-opt=\"overlay2.override_kernel_check=true\"\n)\n\n# Before running the kubelet, we need to make sure it supports\n# the hairpin mode.\necho 1 > /proc/sys/net/bridge/bridge-nf-call-iptables\n\n# We need to convert this to millicores\nKUBE_RESERVED_CPUS_M=$((${KUBE_RESERVED_CPUS} * 1000))\nKUBE_RESERVED_MEM_M=$((${KUBE_RESERVED_MEM} * 1000))\n\n# Kubelet must run with a few special arguments.\n#\n# FRAMEWORK_NAME, KUBELET_CPUS and KUBELET_MEM are framework variables\n# set by the framework scheduler when processing the service spec.\nKUBELET_ARGS=(\n --address=$MESOS_CONTAINER_IP\n --hostname-override=kube-node-$POD_INSTANCE_INDEX-kubelet.$FRAMEWORK_NAME.mesos\n --node-ip=$MESOS_CONTAINER_IP\n --require-kubeconfig\n --allow-privileged\n --network-plugin=cni\n --cni-bin-dir=/opt/mesosphere/active/cni\n --cni-conf-dir=/opt/mesosphere/etc/dcos/network/cni\n --healthz-bind-address=$MESOS_CONTAINER_IP\n --healthz-port=10258\n --cluster-dns=10.100.0.10\n --cluster-domain=cluster.local\n --system-reserved=\"cpu=${SYSTEM_RESERVED_CPUS}m,memory=${SYSTEM_RESERVED_MEM}Ki\"\n --kube-reserved=\"cpu=${KUBE_RESERVED_CPUS_M}m,memory=${KUBE_RESERVED_MEM_M}Ki\"\n --cgroup-driver=cgroupfs\n --kube-api-qps=15\n --kube-api-burst=30\n --event-qps=15\n --event-burst=30\n --max-pods=100\n --cgroup-root=${CGROUP_ROOT}\n --pod-infra-container-image=$PAUSE_DOCKER_IMAGE\n --kubeconfig=kubeconfig.yaml\n --tls-cert-file=kube-node-crt.pem\n --tls-private-key-file=kube-node-key.pem\n --kube-reserved-cgroup=${CGROUP_ROOT}/podruntime\n)\n\nprintf \"Sandboxing...\\n\"\n\n# Since the persistent volume \"var\" may have been previously used by the same\n# task, we need to make sure it's empty before proceeding.\nrm -rf var/*\n\nDIRS=( containerd docker dockershim kubelet )\n\nfor DIR in \"${DIRS[@]}\"\ndo\n mkdir -p /var/lib/${DIR} var/new/lib/${DIR}\n mount --bind var/new/lib/${DIR} /var/lib/${DIR}\ndone\n\nmkdir -p var/new/log\nmount --bind var/new/log /var/log\n\n# Isolate docker daemon from any existing configuration in /etc/docker\nmkdir -p etc/docker/\nmount --bind etc/docker /etc/docker\n\n# Move mount /run to ./run and mount bind only the necessary paths (/run/dcos\n# and /run/mesos). This is done in order to present a clean /run to both dockerd\n# and the kubelet.\n\nmkdir -p run\nmount -n -t tmpfs tmpfs run\n\n# On CentOS it exits with a non 0 error code but move mount works anyway. ?\\_(?)_/?\nmount -M /run/ run || true\n# Double-check if move mount worked.\n[ -d run/mesos ] || (printf \"Error: Mount move failed.\\n\" >&2 && exit 1)\n# Clean up /run before continuing and mount bind only what's necessary.\nrm -rf /run/*\n\nDIRS=( dcos mesos )\n\n# Ubuntu requires lxcfs\nif [ -d run/lxcfs ]; then\n DIRS+=( lxcfs )\nfi\n\nfor DIR in \"${DIRS[@]}\"\ndo\n mkdir /run/${DIR}\n mount --bind run/${DIR} /run/${DIR}\ndone\n\nprintf \"Configuring network...\\n\"\n\n# For now, we use DC/OS overlay CNI integration.\nCONFDIR=/opt/mesosphere/etc/dcos/network/cni\nif [ -f $CONFDIR/dcos.conf ]; then\n mv $CONFDIR/dcos.conf $CONFDIR/dcos.1.conf\n CNICONF='{\"name\":\"dcos\",'`cat ${CONFDIR}/dcos.1.conf | cut -d \"{\" -f3-5 | cut -d \"}\" -f1-4`\n echo ${CNICONF} > ${CONFDIR}/dcos.1.conf\nfi\n\n# socat is needed for kubectl port-forward.\ncat << EOF > socat.d/socat\n#! /bin/bash\nPATH=/usr/bin:/bin:/usr/sbin:/sbin:$(pwd)/socat.d/\nLD_LIBRARY_PATH=$(pwd)/socat.d/lib:$LD_LIBRARY_PATH exec $(pwd)/socat.d/bin/socat \"\\$@\"\nEOF\n\nchmod +x kubelet socat.d/socat\n\nprintf \"Starting docker...\\n\"\n\nPATH=$(pwd)/docker:$(pwd)/socat.d:$PATH ./docker/dockerd ${DOCKER_ARGS[@]} &\n\npid=$!\n\nprintf \"Setting up cgroups for docker pid=\"$pid\"\\n\"\n\n# Start dockerd and kubelet, add them to all cgroup subsystems available on the\n# system and wait for kubelet process to exit.\n\nchmod +x resource-container\n\n./resource-container --parent ${CGROUP_ROOT} --cgroup podruntime --cpus ${KUBE_RESERVED_CPUS} --mem ${KUBE_RESERVED_MEM} --pid $pid\n\nprintf \"Starting kubelet...\\n\"\n\nPATH=$(pwd)/docker:$(pwd)/socat.d:$PATH ./kubelet ${KUBELET_ARGS[@]} &\n\npid=$!\n\nprintf \"Setting up cgroups for kubelet pid=\"$pid\"\\n\"\n\n./resource-container --parent ${CGROUP_ROOT} --cgroup podruntime --cpus ${KUBE_RESERVED_CPUS} --mem ${KUBE_RESERVED_MEM} --pid $pid\n\nwait $pid\n" | |
} ], | |
"discovery-spec" : null, | |
"kill-grace-period" : 5, | |
"transport-encryption" : [ ] | |
}, { | |
"name" : "decommission", | |
"goal" : "FINISHED", | |
"resource-set" : { | |
"id" : "decommission-resource-set", | |
"resource-specifications" : [ { | |
"@type" : "DefaultResourceSpec", | |
"name" : "cpus", | |
"value" : { | |
"type" : "SCALAR", | |
"scalar" : { | |
"value" : 0.1 | |
}, | |
"ranges" : null, | |
"set" : null, | |
"text" : null | |
}, | |
"role" : "kubernetes-role", | |
"pre-reserved-role" : "*", | |
"principal" : "kubernetes" | |
}, { | |
"@type" : "DefaultResourceSpec", | |
"name" : "mem", | |
"value" : { | |
"type" : "SCALAR", | |
"scalar" : { | |
"value" : 32.0 | |
}, | |
"ranges" : null, | |
"set" : null, | |
"text" : null | |
}, | |
"role" : "kubernetes-role", | |
"pre-reserved-role" : "*", | |
"principal" : "kubernetes" | |
} ], | |
"volume-specifications" : [ ], | |
"role" : "kubernetes-role", | |
"principal" : "kubernetes" | |
}, | |
"command-spec" : { | |
"value" : "\nprintf \"Starting to decommission the node...\\n\"\n\n./bootstrap --resolve=false 2>&1 ;\n\nchmod +x kubectl ;\n\nNODE_FOUND=$(./kubectl --kubeconfig=kubeconfig.yaml get node --ignore-not-found kube-node-$POD_INSTANCE_INDEX-kubelet.kubernetes.mesos | grep \"kube-node-$POD_INSTANCE_INDEX-kubelet.kubernetes.mesos\" | grep -c \"Ready\")\n\nif [ \"$IS_UPGRADE_PLAN\" = \"YES\" ] && [ \"$NODE_FOUND\" -eq \"1\" ] ; then\n printf \"Starting to decommission the kube-node-$POD_INSTANCE_INDEX-kubelet.kubernetes.mesos -- ${TASK_NAME} \\n\" ;\n ./kubectl --kubeconfig=kubeconfig.yaml drain kube-node-$POD_INSTANCE_INDEX-kubelet.kubernetes.mesos ;\nelse\n printf \"Ignored the decommission process of kube-node-$POD_INSTANCE_INDEX-kubelet.kubernetes.mesos!\" ;\nfi\n", | |
"environment" : { | |
"KUBERNETES_NODE_DOCKER_VERSION" : "17.09.0", | |
"KUBERNETES_VERSION" : "v1.7.11", | |
"USERNAME" : "kube-node" | |
} | |
}, | |
"health-check-spec" : null, | |
"readiness-check-spec" : null, | |
"config-files" : [ { | |
"name" : "kubeconfig", | |
"relative-path" : "kubeconfig.yaml", | |
"template-content" : "apiVersion: v1\nkind: Config\nclusters:\n- name: local\n cluster:\n certificate-authority: ca-crt.pem\n server: https://apiserver.kubernetes.l4lb.thisdcos.directory:6443\nusers:\n- name: {{USERNAME}}\n user:\n client-certificate: {{USERNAME}}-crt.pem\n client-key: {{USERNAME}}-key.pem\ncontexts:\n- context:\n cluster: local\n user: {{USERNAME}}\n name: default\ncurrent-context: default\n" | |
} ], | |
"discovery-spec" : null, | |
"kill-grace-period" : 0, | |
"transport-encryption" : [ ] | |
}, { | |
"name" : "commission", | |
"goal" : "FINISHED", | |
"resource-set" : { | |
"id" : "commission-resource-set", | |
"resource-specifications" : [ { | |
"@type" : "DefaultResourceSpec", | |
"name" : "cpus", | |
"value" : { | |
"type" : "SCALAR", | |
"scalar" : { | |
"value" : 0.1 | |
}, | |
"ranges" : null, | |
"set" : null, | |
"text" : null | |
}, | |
"role" : "kubernetes-role", | |
"pre-reserved-role" : "*", | |
"principal" : "kubernetes" | |
}, { | |
"@type" : "DefaultResourceSpec", | |
"name" : "mem", | |
"value" : { | |
"type" : "SCALAR", | |
"scalar" : { | |
"value" : 32.0 | |
}, | |
"ranges" : null, | |
"set" : null, | |
"text" : null | |
}, | |
"role" : "kubernetes-role", | |
"pre-reserved-role" : "*", | |
"principal" : "kubernetes" | |
} ], | |
"volume-specifications" : [ ], | |
"role" : "kubernetes-role", | |
"principal" : "kubernetes" | |
}, | |
"command-spec" : { | |
"value" : "\nprintf \"Starting to commission the node...\\n\"\n\n./bootstrap --resolve=false 2>&1 ;\n\nchmod +x kubectl ;\n\nNODE_FOUND=$(./kubectl --kubeconfig=kubeconfig.yaml get node --ignore-not-found kube-node-$POD_INSTANCE_INDEX-kubelet.kubernetes.mesos | grep -c \"kube-node-$POD_INSTANCE_INDEX-kubelet.kubernetes.mesos\")\n\nif [ \"$IS_UPGRADE_PLAN\" = \"YES\" ] && [ \"$NODE_FOUND\" -eq \"1\" ] ; then\n printf \"Starting to commission the kube-node-$POD_INSTANCE_INDEX-kubelet.kubernetes.mesos -- ${TASK_NAME} \\n\" ;\n ./kubectl --kubeconfig=kubeconfig.yaml uncordon kube-node-$POD_INSTANCE_INDEX-kubelet.kubernetes.mesos ;\nelse\n printf \"Ignored the commission process of kube-node-$POD_INSTANCE_INDEX-kubelet.kubernetes.mesos!\" ;\nfi\n", | |
"environment" : { | |
"KUBERNETES_NODE_DOCKER_VERSION" : "17.09.0", | |
"KUBERNETES_VERSION" : "v1.7.11", | |
"USERNAME" : "kube-node" | |
} | |
}, | |
"health-check-spec" : null, | |
"readiness-check-spec" : null, | |
"config-files" : [ { | |
"name" : "kubeconfig", | |
"relative-path" : "kubeconfig.yaml", | |
"template-content" : "apiVersion: v1\nkind: Config\nclusters:\n- name: local\n cluster:\n certificate-authority: ca-crt.pem\n server: https://apiserver.kubernetes.l4lb.thisdcos.directory:6443\nusers:\n- name: {{USERNAME}}\n user:\n client-certificate: {{USERNAME}}-crt.pem\n client-key: {{USERNAME}}-key.pem\ncontexts:\n- context:\n cluster: local\n user: {{USERNAME}}\n name: default\ncurrent-context: default\n" | |
} ], | |
"discovery-spec" : null, | |
"kill-grace-period" : 0, | |
"transport-encryption" : [ ] | |
}, { | |
"name" : "recover", | |
"goal" : "FINISHED", | |
"resource-set" : { | |
"id" : "kube-node-kubelet", | |
"resource-specifications" : [ { | |
"@type" : "DefaultResourceSpec", | |
"name" : "cpus", | |
"value" : { | |
"type" : "SCALAR", | |
"scalar" : { | |
"value" : 3.0 | |
}, | |
"ranges" : null, | |
"set" : null, | |
"text" : null | |
}, | |
"role" : "kubernetes-role", | |
"pre-reserved-role" : "*", | |
"principal" : "kubernetes" | |
}, { | |
"@type" : "DefaultResourceSpec", | |
"name" : "mem", | |
"value" : { | |
"type" : "SCALAR", | |
"scalar" : { | |
"value" : 4100.0 | |
}, | |
"ranges" : null, | |
"set" : null, | |
"text" : null | |
}, | |
"role" : "kubernetes-role", | |
"pre-reserved-role" : "*", | |
"principal" : "kubernetes" | |
} ], | |
"volume-specifications" : [ ], | |
"role" : "kubernetes-role", | |
"principal" : "kubernetes" | |
}, | |
"command-spec" : { | |
"value" : "./bootstrap --resolve=false 2>&1\n\nchmod +x kubectl\n\n./kubectl --kubeconfig=kubeconfig.yaml delete node kube-node-$POD_INSTANCE_INDEX-kubelet.kubernetes.mesos\n", | |
"environment" : { | |
"KUBERNETES_VERSION" : "v1.7.11", | |
"USERNAME" : "kube-node" | |
} | |
}, | |
"health-check-spec" : null, | |
"readiness-check-spec" : null, | |
"config-files" : [ { | |
"name" : "kubeconfig", | |
"relative-path" : "kubeconfig.yaml", | |
"template-content" : "apiVersion: v1\nkind: Config\nclusters:\n- name: local\n cluster:\n certificate-authority: ca-crt.pem\n server: https://apiserver.kubernetes.l4lb.thisdcos.directory:6443\nusers:\n- name: {{USERNAME}}\n user:\n client-certificate: {{USERNAME}}-crt.pem\n client-key: {{USERNAME}}-key.pem\ncontexts:\n- context:\n cluster: local\n user: {{USERNAME}}\n name: default\ncurrent-context: default\n" | |
} ], | |
"discovery-spec" : null, | |
"kill-grace-period" : 0, | |
"transport-encryption" : [ ] | |
} ], | |
"placement-rule" : { | |
"@type" : "MaxPerHostnameRule", | |
"max" : 1, | |
"task-filter" : { | |
"@type" : "RegexMatcher", | |
"pattern" : "kube-node-.*" | |
} | |
}, | |
"volumes" : [ { | |
"@type" : "DefaultVolumeSpec", | |
"type" : "ROOT", | |
"container-path" : "var", | |
"name" : "disk", | |
"value" : { | |
"type" : "SCALAR", | |
"scalar" : { | |
"value" : 1024.0 | |
}, | |
"ranges" : null, | |
"set" : null, | |
"text" : null | |
}, | |
"role" : "kubernetes-role", | |
"pre-reserved-role" : "*", | |
"principal" : "kubernetes" | |
} ], | |
"pre-reserved-role" : "*", | |
"secrets" : [ { | |
"secret" : "kubernetes/ca_crt__tmp", | |
"env-key" : null, | |
"file" : "ca-crt.pem" | |
}, { | |
"secret" : "kubernetes/kube_node_chain__tmp", | |
"env-key" : null, | |
"file" : "kube-node-crt.pem" | |
}, { | |
"secret" : "kubernetes/kube_node_private_key__tmp", | |
"env-key" : null, | |
"file" : "kube-node-key.pem" | |
} ], | |
"share-pid-namespace" : false, | |
"allow-decommission" : true | |
}, { | |
"type" : "mandatory-addons", | |
"user" : "root", | |
"count" : 1, | |
"image" : null, | |
"networks" : [ ], | |
"rlimits" : [ ], | |
"uris" : [ "https://s3.eu-central-1.amazonaws.com/dcos-kubernetes-test/bootstrap.zip", "https://storage.googleapis.com/kubernetes-release/release/v1.7.11/bin/linux/amd64/kubectl", "https://s3.eu-central-1.amazonaws.com/dcos-kubernetes-test/executor.zip" ], | |
"task-specs" : [ { | |
"name" : "kube-dns", | |
"goal" : "FINISHED", | |
"resource-set" : { | |
"id" : "addons", | |
"resource-specifications" : [ { | |
"@type" : "DefaultResourceSpec", | |
"name" : "cpus", | |
"value" : { | |
"type" : "SCALAR", | |
"scalar" : { | |
"value" : 0.1 | |
}, | |
"ranges" : null, | |
"set" : null, | |
"text" : null | |
}, | |
"role" : "kubernetes-role", | |
"pre-reserved-role" : "*", | |
"principal" : "kubernetes" | |
}, { | |
"@type" : "DefaultResourceSpec", | |
"name" : "mem", | |
"value" : { | |
"type" : "SCALAR", | |
"scalar" : { | |
"value" : 32.0 | |
}, | |
"ranges" : null, | |
"set" : null, | |
"text" : null | |
}, | |
"role" : "kubernetes-role", | |
"pre-reserved-role" : "*", | |
"principal" : "kubernetes" | |
} ], | |
"volume-specifications" : [ ], | |
"role" : "kubernetes-role", | |
"principal" : "kubernetes" | |
}, | |
"command-spec" : { | |
"value" : "./bootstrap --resolve=false 2>&1\n\nTASK_NAME= ADDON=kube-dns bash ./install_addon.sh\n", | |
"environment" : { | |
"KUBEDNS_DNSMASQ_DOCKER_IMAGE" : "gcr.io/google_containers/k8s-dns-dnsmasq-nanny-amd64:1.14.7", | |
"KUBEDNS_DOCKER_IMAGE" : "gcr.io/google_containers/k8s-dns-kube-dns-amd64:1.14.7", | |
"KUBEDNS_SIDECAR_DOCKER_IMAGE" : "gcr.io/google_containers/k8s-dns-sidecar-amd64:1.14.7", | |
"KUBERNETES_VERSION" : "v1.7.11", | |
"USERNAME" : "mandatory-addons" | |
} | |
}, | |
"health-check-spec" : null, | |
"readiness-check-spec" : null, | |
"config-files" : [ { | |
"name" : "kubeconfig", | |
"relative-path" : "kubeconfig.yaml", | |
"template-content" : "apiVersion: v1\nkind: Config\nclusters:\n- name: local\n cluster:\n certificate-authority: ca-crt.pem\n server: https://apiserver.kubernetes.l4lb.thisdcos.directory:6443\nusers:\n- name: {{USERNAME}}\n user:\n client-certificate: {{USERNAME}}-crt.pem\n client-key: {{USERNAME}}-key.pem\ncontexts:\n- context:\n cluster: local\n user: {{USERNAME}}\n name: default\ncurrent-context: default\n" | |
}, { | |
"name" : "kubedns", | |
"relative-path" : "kube-dns.yaml", | |
"template-content" : "# Copyright 2016 The Kubernetes Authors.\n#\n# Licensed under the Apache License, Version 2.0 (the \"License\");\n# you may not use this file except in compliance with the License.\n# You may obtain a copy of the License at\n#\n# http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS,\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n# See the License for the specific language governing permissions and\n# limitations under the License.\n\napiVersion: v1\nkind: ConfigMap\nmetadata:\n name: kube-dns\n namespace: kube-system\n labels:\n addonmanager.kubernetes.io/mode: EnsureExists\ndata:\n upstreamNameservers: |\n [\"198.51.100.1\"]\n---\n\napiVersion: v1\nkind: Service\nmetadata:\n name: kube-dns\n namespace: kube-system\n labels:\n k8s-app: kube-dns\n kubernetes.io/cluster-service: \"true\"\n kubernetes.io/name: \"KubeDNS\"\nspec:\n selector:\n k8s-app: kube-dns\n clusterIP: 10.100.0.10\n ports:\n - name: dns\n port: 53\n protocol: UDP\n - name: dns-tcp\n port: 53\n protocol: TCP\n---\n\napiVersion: apps/v1beta1\nkind: Deployment\nmetadata:\n name: kube-dns\n namespace: kube-system\n labels:\n k8s-app: kube-dns\n kubernetes.io/cluster-service: \"true\"\nspec:\n # We initially require 3 replicas to handle HA during upgrade operations.\n replicas: 3\n strategy:\n rollingUpdate:\n maxSurge: 10%\n maxUnavailable: 0\n selector:\n matchLabels:\n k8s-app: kube-dns\n template:\n metadata:\n labels:\n k8s-app: kube-dns\n annotations:\n scheduler.alpha.kubernetes.io/critical-pod: ''\n spec:\n tolerations:\n - key: \"CriticalAddonsOnly\"\n operator: \"Exists\"\n volumes:\n - name: kube-dns-config\n configMap:\n name: kube-dns\n optional: true\n containers:\n - name: kubedns\n image: {{KUBEDNS_DOCKER_IMAGE}}\n resources:\n # TODO: Set memory limits when we've profiled the container for large\n # clusters, then set request = limit to keep this container in\n # guaranteed class. Currently, this container falls into the\n # \"burstable\" category so the kubelet doesn't backoff from restarting it.\n limits:\n memory: 170Mi\n requests:\n cpu: 100m\n memory: 70Mi\n livenessProbe:\n httpGet:\n path: /healthcheck/kubedns\n port: 10054\n scheme: HTTP\n initialDelaySeconds: 60\n timeoutSeconds: 5\n successThreshold: 1\n failureThreshold: 5\n readinessProbe:\n httpGet:\n path: /readiness\n port: 8081\n scheme: HTTP\n # we poll on pod startup for the Kubernetes master service and\n # only setup the /readiness HTTP server once that's available.\n initialDelaySeconds: 3\n timeoutSeconds: 5\n args:\n - --domain=cluster.local.\n - --dns-port=10053\n - --config-dir=/kube-dns-config\n - --v=2\n env:\n - name: PROMETHEUS_PORT\n value: \"10055\"\n ports:\n - containerPort: 10053\n name: dns-local\n protocol: UDP\n - containerPort: 10053\n name: dns-tcp-local\n protocol: TCP\n - containerPort: 10055\n name: metrics\n protocol: TCP\n volumeMounts:\n - name: kube-dns-config\n mountPath: /kube-dns-config\n - name: dnsmasq\n image: {{KUBEDNS_DNSMASQ_DOCKER_IMAGE}}\n livenessProbe:\n httpGet:\n path: /healthcheck/dnsmasq\n port: 10054\n scheme: HTTP\n initialDelaySeconds: 60\n timeoutSeconds: 5\n successThreshold: 1\n failureThreshold: 5\n args:\n - -v=2\n - -logtostderr\n - -configDir=/etc/k8s/dns/dnsmasq-nanny\n - -restartDnsmasq=true\n - --\n - -k\n - --cache-size=1000\n - --no-negcache\n - --log-facility=-\n - --server=/cluster.local/127.0.0.1#10053\n - --server=/in-addr.arpa/127.0.0.1#10053\n - --server=/ip6.arpa/127.0.0.1#10053\n ports:\n - containerPort: 53\n name: dns\n protocol: UDP\n - containerPort: 53\n name: dns-tcp\n protocol: TCP\n # see: https://github.com/kubernetes/kubernetes/issues/29055 for details\n resources:\n requests:\n cpu: 150m\n memory: 20Mi\n volumeMounts:\n - name: kube-dns-config\n mountPath: /etc/k8s/dns/dnsmasq-nanny\n - name: sidecar\n image: {{KUBEDNS_SIDECAR_DOCKER_IMAGE}}\n livenessProbe:\n httpGet:\n path: /metrics\n port: 10054\n scheme: HTTP\n initialDelaySeconds: 60\n timeoutSeconds: 5\n successThreshold: 1\n failureThreshold: 5\n args:\n - --v=2\n - --logtostderr\n - --probe=kubedns,127.0.0.1:10053,kubernetes.default.svc.cluster.local,5,SRV\n - --probe=dnsmasq,127.0.0.1:53,kubernetes.default.svc.cluster.local,5,SRV\n ports:\n - containerPort: 10054\n name: metrics\n protocol: TCP\n resources:\n requests:\n memory: 20Mi\n cpu: 10m\n dnsPolicy: Default # Don't use cluster DNS.\n #serviceAccountName: kube-dns\n" | |
}, { | |
"name" : "install_addon.sh", | |
"relative-path" : "install_addon.sh", | |
"template-content" : "#!/bin/bash\n\nset -e\n\nchmod +x kubectl\n\nprintf \"\\n\\n ###### Deploying ${ADDON} components -- ${TASK_NAME} ###### \\n\"\n\n./kubectl --kubeconfig=kubeconfig.yaml apply -f $ADDON.yaml\nsleep 10\n./kubectl --kubeconfig=kubeconfig.yaml get pods -n kube-system | grep $ADDON | grep -i running && \\\n./kubectl --kubeconfig=kubeconfig.yaml get svc -n kube-system | grep $ADDON\nif [ $? -ne 0 ]\nthen\n echo \"$ADDON check failed and is not ready\"\n exit 1\nfi\n" | |
} ], | |
"discovery-spec" : null, | |
"kill-grace-period" : 0, | |
"transport-encryption" : [ ] | |
}, { | |
"name" : "heapster", | |
"goal" : "FINISHED", | |
"resource-set" : { | |
"id" : "addons", | |
"resource-specifications" : [ { | |
"@type" : "DefaultResourceSpec", | |
"name" : "cpus", | |
"value" : { | |
"type" : "SCALAR", | |
"scalar" : { | |
"value" : 0.1 | |
}, | |
"ranges" : null, | |
"set" : null, | |
"text" : null | |
}, | |
"role" : "kubernetes-role", | |
"pre-reserved-role" : "*", | |
"principal" : "kubernetes" | |
}, { | |
"@type" : "DefaultResourceSpec", | |
"name" : "mem", | |
"value" : { | |
"type" : "SCALAR", | |
"scalar" : { | |
"value" : 32.0 | |
}, | |
"ranges" : null, | |
"set" : null, | |
"text" : null | |
}, | |
"role" : "kubernetes-role", | |
"pre-reserved-role" : "*", | |
"principal" : "kubernetes" | |
} ], | |
"volume-specifications" : [ ], | |
"role" : "kubernetes-role", | |
"principal" : "kubernetes" | |
}, | |
"command-spec" : { | |
"value" : "./bootstrap --resolve=false 2>&1\n\nTASK_NAME= ADDON=heapster bash ./install_addon.sh\n", | |
"environment" : { | |
"HEAPSTER_DOCKER_IMAGE" : "gcr.io/google_containers/heapster-amd64:v1.4.3", | |
"KUBERNETES_VERSION" : "v1.7.11", | |
"USERNAME" : "mandatory-addons" | |
} | |
}, | |
"health-check-spec" : null, | |
"readiness-check-spec" : null, | |
"config-files" : [ { | |
"name" : "kubeconfig", | |
"relative-path" : "kubeconfig.yaml", | |
"template-content" : "apiVersion: v1\nkind: Config\nclusters:\n- name: local\n cluster:\n certificate-authority: ca-crt.pem\n server: https://apiserver.kubernetes.l4lb.thisdcos.directory:6443\nusers:\n- name: {{USERNAME}}\n user:\n client-certificate: {{USERNAME}}-crt.pem\n client-key: {{USERNAME}}-key.pem\ncontexts:\n- context:\n cluster: local\n user: {{USERNAME}}\n name: default\ncurrent-context: default\n" | |
}, { | |
"name" : "heapster", | |
"relative-path" : "heapster.yaml", | |
"template-content" : "apiVersion: v1\nkind: ServiceAccount\nmetadata:\n name: heapster\n namespace: kube-system\n---\napiVersion: apps/v1beta1\nkind: Deployment\nmetadata:\n name: heapster\n namespace: kube-system\nspec:\n replicas: 1\n template:\n metadata:\n labels:\n task: monitoring\n k8s-app: heapster\n spec:\n serviceAccountName: heapster\n containers:\n - name: heapster\n image: {{HEAPSTER_DOCKER_IMAGE}}\n imagePullPolicy: IfNotPresent\n command:\n - /heapster\n - --source=kubernetes.summary_api:https://kubernetes.default\n---\napiVersion: v1\nkind: Service\nmetadata:\n labels:\n task: monitoring\n # For use as a Cluster add-on (https://github.com/kubernetes/kubernetes/tree/master/cluster/addons)\n # If you are NOT using this as an addon, you should comment out this line.\n kubernetes.io/cluster-service: 'true'\n kubernetes.io/name: Heapster\n name: heapster\n namespace: kube-system\nspec:\n ports:\n - port: 80\n targetPort: 8082\n selector:\n k8s-app: heapster\n" | |
}, { | |
"name" : "install_addon.sh", | |
"relative-path" : "install_addon.sh", | |
"template-content" : "#!/bin/bash\n\nset -e\n\nchmod +x kubectl\n\nprintf \"\\n\\n ###### Deploying ${ADDON} components -- ${TASK_NAME} ###### \\n\"\n\n./kubectl --kubeconfig=kubeconfig.yaml apply -f $ADDON.yaml\nsleep 10\n./kubectl --kubeconfig=kubeconfig.yaml get pods -n kube-system | grep $ADDON | grep -i running && \\\n./kubectl --kubeconfig=kubeconfig.yaml get svc -n kube-system | grep $ADDON\nif [ $? -ne 0 ]\nthen\n echo \"$ADDON check failed and is not ready\"\n exit 1\nfi\n" | |
} ], | |
"discovery-spec" : null, | |
"kill-grace-period" : 0, | |
"transport-encryption" : [ ] | |
}, { | |
"name" : "dashboard", | |
"goal" : "FINISHED", | |
"resource-set" : { | |
"id" : "addons", | |
"resource-specifications" : [ { | |
"@type" : "DefaultResourceSpec", | |
"name" : "cpus", | |
"value" : { | |
"type" : "SCALAR", | |
"scalar" : { | |
"value" : 0.1 | |
}, | |
"ranges" : null, | |
"set" : null, | |
"text" : null | |
}, | |
"role" : "kubernetes-role", | |
"pre-reserved-role" : "*", | |
"principal" : "kubernetes" | |
}, { | |
"@type" : "DefaultResourceSpec", | |
"name" : "mem", | |
"value" : { | |
"type" : "SCALAR", | |
"scalar" : { | |
"value" : 32.0 | |
}, | |
"ranges" : null, | |
"set" : null, | |
"text" : null | |
}, | |
"role" : "kubernetes-role", | |
"pre-reserved-role" : "*", | |
"principal" : "kubernetes" | |
} ], | |
"volume-specifications" : [ ], | |
"role" : "kubernetes-role", | |
"principal" : "kubernetes" | |
}, | |
"command-spec" : { | |
"value" : "./bootstrap --resolve=false 2>&1\n\nTASK_NAME= ADDON=dashboard bash ./install_addon.sh\n", | |
"environment" : { | |
"DASHBOARD_DOCKER_IMAGE" : "gcr.io/google_containers/kubernetes-dashboard-amd64:v1.6.3", | |
"KUBERNETES_VERSION" : "v1.7.11", | |
"USERNAME" : "mandatory-addons" | |
} | |
}, | |
"health-check-spec" : null, | |
"readiness-check-spec" : null, | |
"config-files" : [ { | |
"name" : "kubeconfig", | |
"relative-path" : "kubeconfig.yaml", | |
"template-content" : "apiVersion: v1\nkind: Config\nclusters:\n- name: local\n cluster:\n certificate-authority: ca-crt.pem\n server: https://apiserver.kubernetes.l4lb.thisdcos.directory:6443\nusers:\n- name: {{USERNAME}}\n user:\n client-certificate: {{USERNAME}}-crt.pem\n client-key: {{USERNAME}}-key.pem\ncontexts:\n- context:\n cluster: local\n user: {{USERNAME}}\n name: default\ncurrent-context: default\n" | |
}, { | |
"name" : "dashboard", | |
"relative-path" : "dashboard.yaml", | |
"template-content" : "kind: Deployment\napiVersion: apps/v1beta1\nmetadata:\n labels:\n k8s-app: kubernetes-dashboard\n name: kubernetes-dashboard\n namespace: kube-system\nspec:\n replicas: 1\n revisionHistoryLimit: 10\n selector:\n matchLabels:\n k8s-app: kubernetes-dashboard\n template:\n metadata:\n labels:\n k8s-app: kubernetes-dashboard\n spec:\n containers:\n - name: kubernetes-dashboard\n image: {{DASHBOARD_DOCKER_IMAGE}}\n ports:\n - containerPort: 9090\n protocol: TCP\n args:\n # Uncomment the following line to manually specify Kubernetes API server Host\n # If not specified, Dashboard will attempt to auto discover the API server and connect\n # to it. Uncomment only if the default does not work.\n # - --apiserver-host=https://kubernetes.default\n livenessProbe:\n httpGet:\n path: /\n port: 9090\n initialDelaySeconds: 30\n timeoutSeconds: 30\n---\n\nkind: Service\napiVersion: v1\nmetadata:\n labels:\n k8s-app: kubernetes-dashboard\n name: kubernetes-dashboard\n namespace: kube-system\nspec:\n ports:\n - port: 80\n targetPort: 9090\n selector:\n k8s-app: kubernetes-dashboard\n" | |
}, { | |
"name" : "install_addon.sh", | |
"relative-path" : "install_addon.sh", | |
"template-content" : "#!/bin/bash\n\nset -e\n\nchmod +x kubectl\n\nprintf \"\\n\\n ###### Deploying ${ADDON} components -- ${TASK_NAME} ###### \\n\"\n\n./kubectl --kubeconfig=kubeconfig.yaml apply -f $ADDON.yaml\nsleep 10\n./kubectl --kubeconfig=kubeconfig.yaml get pods -n kube-system | grep $ADDON | grep -i running && \\\n./kubectl --kubeconfig=kubeconfig.yaml get svc -n kube-system | grep $ADDON\nif [ $? -ne 0 ]\nthen\n echo \"$ADDON check failed and is not ready\"\n exit 1\nfi\n" | |
} ], | |
"discovery-spec" : null, | |
"kill-grace-period" : 0, | |
"transport-encryption" : [ ] | |
} ], | |
"placement-rule" : null, | |
"volumes" : [ ], | |
"pre-reserved-role" : "*", | |
"secrets" : [ { | |
"secret" : "kubernetes/ca_crt__tmp", | |
"env-key" : null, | |
"file" : "ca-crt.pem" | |
}, { | |
"secret" : "kubernetes/kube_node_chain__tmp", | |
"env-key" : null, | |
"file" : "mandatory-addons-crt.pem" | |
}, { | |
"secret" : "kubernetes/kube_node_private_key__tmp", | |
"env-key" : null, | |
"file" : "mandatory-addons-key.pem" | |
} ], | |
"share-pid-namespace" : false, | |
"allow-decommission" : false | |
} ], | |
"replacement-failure-policy" : null, | |
"user" : "root" | |
} | |
INFO 2017-12-06 17:44:04,225 [main] com.mesosphere.sdk.config.DefaultConfigurationUpdater:updateConfiguration(92): Prior target config: | |
{ | |
"name" : "kubernetes", | |
"role" : "kubernetes-role", | |
"principal" : "kubernetes", | |
"web-url" : null, | |
"zookeeper" : "master.mesos:2181", | |
"pod-specs" : [ { | |
"type" : "etcd", | |
"user" : "root", | |
"count" : 3, | |
"image" : null, | |
"networks" : [ ], | |
"rlimits" : [ ], | |
"uris" : [ "https://storage.googleapis.com/etcd/v3.2.9/etcd-v3.2.9-linux-amd64.tar.gz", "https://s3.eu-central-1.amazonaws.com/dcos-kubernetes-test/executor.zip" ], | |
"task-specs" : [ { | |
"name" : "peer", | |
"goal" : "RUNNING", | |
"resource-set" : { | |
"id" : "etcd", | |
"resource-specifications" : [ { | |
"@type" : "DefaultResourceSpec", | |
"name" : "cpus", | |
"value" : { | |
"type" : "SCALAR", | |
"scalar" : { | |
"value" : 0.5 | |
}, | |
"ranges" : null, | |
"set" : null, | |
"text" : null | |
}, | |
"role" : "kubernetes-role", | |
"pre-reserved-role" : "*", | |
"principal" : "kubernetes" | |
}, { | |
"@type" : "DefaultResourceSpec", | |
"name" : "mem", | |
"value" : { | |
"type" : "SCALAR", | |
"scalar" : { | |
"value" : 1024.0 | |
}, | |
"ranges" : null, | |
"set" : null, | |
"text" : null | |
}, | |
"role" : "kubernetes-role", | |
"pre-reserved-role" : "*", | |
"principal" : "kubernetes" | |
}, { | |
"@type" : "NamedVIPSpec", | |
"value" : { | |
"type" : "RANGES", | |
"scalar" : null, | |
"ranges" : { | |
"range" : [ { | |
"begin" : 2380, | |
"end" : 2380 | |
} ] | |
}, | |
"set" : null, | |
"text" : null | |
}, | |
"role" : "kubernetes-role", | |
"pre-reserved-role" : "*", | |
"principal" : "kubernetes", | |
"env-key" : "ETCD_LISTEN_PEER_PORT", | |
"port-name" : "peer", | |
"protocol" : "tcp", | |
"visibility" : "CLUSTER", | |
"vip-name" : "etcd-peer", | |
"vip-port" : 2380, | |
"network-names" : [ ], | |
"name" : "ports" | |
}, { | |
"@type" : "NamedVIPSpec", | |
"value" : { | |
"type" : "RANGES", | |
"scalar" : null, | |
"ranges" : { | |
"range" : [ { | |
"begin" : 2379, | |
"end" : 2379 | |
} ] | |
}, | |
"set" : null, | |
"text" : null | |
}, | |
"role" : "kubernetes-role", | |
"pre-reserved-role" : "*", | |
"principal" : "kubernetes", | |
"env-key" : "ETCD_LISTEN_CLIENT_PORT", | |
"port-name" : "client", | |
"protocol" : "tcp", | |
"visibility" : "CLUSTER", | |
"vip-name" : "etcd", | |
"vip-port" : 2379, | |
"network-names" : [ ], | |
"name" : "ports" | |
} ], | |
"volume-specifications" : [ ], | |
"role" : "kubernetes-role", | |
"principal" : "kubernetes" | |
}, | |
"command-spec" : { | |
"value" : "printf \"\\n\\n ###### Starting etcd -- ${TASK_NAME} ###### \\n\"\n\n# In a graceful shutdown, we remove the peer from the cluster\nterminated () {\n\n printf \"Removing member etcd-$POD_INSTANCE_INDEX-peer \\n\"\n\n DEAD_PEER_ID=$(\n ./etcd-v3.2.9-linux-amd64/etcdctl \\\n --cert-file etcd-crt.pem \\\n --key-file etcd-key.pem \\\n --ca-file ca-crt.pem \\\n --endpoints=https://etcd.kubernetes.l4lb.thisdcos.directory:$ETCD_LISTEN_CLIENT_PORT member list | awk \"/etcd-${POD_INSTANCE_INDEX}-peer/ { gsub(\\\":\\\", \\\"\\\", \\$1); print \\$1 }\" \\\n )\n\n printf $DEAD_PEER_ID\n\n ./etcd-v3.2.9-linux-amd64/etcdctl \\\n --cert-file etcd-crt.pem \\\n --key-file etcd-key.pem \\\n --ca-file ca-crt.pem \\\n --endpoints https://etcd.kubernetes.l4lb.thisdcos.directory:$ETCD_LISTEN_CLIENT_PORT member remove $DEAD_PEER_ID\n\n rm -rf data-dir/member\n\n printf \"Member etcd-$POD_INSTANCE_INDEX-peer removed!\\n\"\n\n exit 0\n}\n\ntrap terminated SIGTERM EXIT\n\necho \"Trapping etcd SIGTERM and EXIT!\"\n\n./etcd-v3.2.9-linux-amd64/etcd \\\n--name=infra$POD_INSTANCE_INDEX \\\n--cert-file=etcd-crt.pem \\\n--key-file=etcd-key.pem \\\n--client-cert-auth \\\n--trusted-ca-file=ca-crt.pem \\\n--peer-cert-file=etcd-crt.pem \\\n--peer-key-file=etcd-key.pem \\\n--peer-trusted-ca-file=ca-crt.pem \\\n--listen-peer-urls=https://$MESOS_CONTAINER_IP:$ETCD_LISTEN_PEER_PORT \\\n--initial-advertise-peer-urls=https://etcd-$POD_INSTANCE_INDEX-peer.kubernetes.mesos:$ETCD_LISTEN_PEER_PORT \\\n--listen-client-urls=https://$MESOS_CONTAINER_IP:$ETCD_LISTEN_CLIENT_PORT \\\n--advertise-client-urls=https://etcd-$POD_INSTANCE_INDEX-peer.kubernetes.mesos:$ETCD_LISTEN_CLIENT_PORT \\\n--log-output=stdout \\\n--quota-backend-bytes=3221225472 \\\n--election-timeout=5000 \\\n--heartbeat-interval=250 \\\n--initial-cluster infra0=https://etcd-0-peer.kubernetes.mesos:$ETCD_LISTEN_PEER_PORT,infra1=https://etcd-1-peer.kubernetes.mesos:$ETCD_LISTEN_PEER_PORT,infra2=https://etcd-2-peer.kubernetes.mesos:$ETCD_LISTEN_PEER_PORT\n", | |
"environment" : { | |
"ETCD_DATA_DIR" : "data-dir", | |
"ETCD_INITIAL_CLUSTER_TOKEN" : "kubernetes", | |
"ETCD_VERSION" : "v3.2.9", | |
"ETCD_WAL_DIR" : "wal-pv/wal-dir" | |
} | |
}, | |
"health-check-spec" : { | |
"command" : "HEALTHY_PEER=$(./etcd-v3.2.9-linux-amd64/etcdctl --cert-file etcd-crt.pem --key-file etcd-key.pem --ca-file ca-crt.pem --endpoints=https://etcd-$POD_INSTANCE_INDEX-peer.kubernetes.mesos:$ETCD_LISTEN_CLIENT_PORT cluster-health | grep \"etcd-${POD_INSTANCE_INDEX}-peer\" | grep -c 'is healthy') && [ \"$HEALTHY_PEER\" -eq \"1\" ]\n", | |
"max-consecutive-failures" : 4, | |
"delay" : 0, | |
"interval" : 15, | |
"timeout" : 10, | |
"gracePeriod" : 60 | |
}, | |
"readiness-check-spec" : { | |
"command" : "HEALTHY_CLUSTER=$(./etcd-v3.2.9-linux-amd64/etcdctl --cert-file etcd-crt.pem --key-file etcd-key.pem --ca-file ca-crt.pem --endpoints=https://etcd-$POD_INSTANCE_INDEX-peer.kubernetes.mesos:$ETCD_LISTEN_CLIENT_PORT cluster-health | grep '^cluster ' | grep -c 'cluster is healthy') && [ \"$HEALTHY_CLUSTER\" -eq \"1\" ]\n", | |
"delay" : 0, | |
"interval" : 30, | |
"timeout" : 10 | |
}, | |
"config-files" : [ ], | |
"discovery-spec" : null, | |
"kill-grace-period" : 30, | |
"transport-encryption" : [ ] | |
}, { | |
"name" : "recover", | |
"goal" : "FINISHED", | |
"resource-set" : { | |
"id" : "recover-resource-set", | |
"resource-specifications" : [ { | |
"@type" : "DefaultResourceSpec", | |
"name" : "cpus", | |
"value" : { | |
"type" : "SCALAR", | |
"scalar" : { | |
"value" : 0.1 | |
}, | |
"ranges" : null, | |
"set" : null, | |
"text" : null | |
}, | |
"role" : "kubernetes-role", | |
"pre-reserved-role" : "*", | |
"principal" : "kubernetes" | |
}, { | |
"@type" : "DefaultResourceSpec", | |
"name" : "mem", | |
"value" : { | |
"type" : "SCALAR", | |
"scalar" : { | |
"value" : 32.0 | |
}, | |
"ranges" : null, | |
"set" : null, | |
"text" : null | |
}, | |
"role" : "kubernetes-role", | |
"pre-reserved-role" : "*", | |
"principal" : "kubernetes" | |
} ], | |
"volume-specifications" : [ ], | |
"role" : "kubernetes-role", | |
"principal" : "kubernetes" | |
}, | |
"command-spec" : { | |
"value" : "DEAD_PEER_ID=$(\n ./etcd-v3.2.9-linux-amd64/etcdctl \\\n --cert-file etcd-crt.pem \\\n --key-file etcd-key.pem \\\n --ca-file ca-crt.pem \\\n --endpoints=https://etcd.kubernetes.l4lb.thisdcos.directory:$ETCD_LISTEN_CLIENT_PORT member list | awk \"/etcd-${POD_INSTANCE_INDEX}-peer/ { gsub(\\\":\\\", \\\"\\\", \\$1); print \\$1 }\" \\\n)\n./etcd-v3.2.9-linux-amd64/etcdctl --cert-file etcd-crt.pem --key-file etcd-key.pem --ca-file ca-crt.pem --endpoints https://etcd.kubernetes.l4lb.thisdcos.directory:$ETCD_LISTEN_CLIENT_PORT member remove $DEAD_PEER_ID\nrm -rf data-dir/member\n./etcd-v3.2.9-linux-amd64/etcdctl --cert-file etcd-crt.pem --key-file etcd-key.pem --ca-file ca-crt.pem --endpoints https://etcd.kubernetes.l4lb.thisdcos.directory:$ETCD_LISTEN_CLIENT_PORT member add infra$POD_INSTANCE_INDEX https://etcd-$POD_INSTANCE_INDEX-peer.kubernetes.mesos:$ETCD_LISTEN_PEER_PORT\n", | |
"environment" : { | |
"ETCD_LISTEN_CLIENT_PORT" : "2379", | |
"ETCD_LISTEN_PEER_PORT" : "2380", | |
"ETCD_VERSION" : "v3.2.9" | |
} | |
}, | |
"health-check-spec" : null, | |
"readiness-check-spec" : null, | |
"config-files" : [ ], | |
"discovery-spec" : null, | |
"kill-grace-period" : 0, | |
"transport-encryption" : [ ] | |
} ], | |
"placement-rule" : { | |
"@type" : "MaxPerHostnameRule", | |
"max" : 1, | |
"task-filter" : { | |
"@type" : "RegexMatcher", | |
"pattern" : "etcd-.*" | |
} | |
}, | |
"volumes" : [ { | |
"@type" : "DefaultVolumeSpec", | |
"type" : "ROOT", | |
"container-path" : "data-dir", | |
"name" : "disk", | |
"value" : { | |
"type" : "SCALAR", | |
"scalar" : { | |
"value" : 3072.0 | |
}, | |
"ranges" : null, | |
"set" : null, | |
"text" : null | |
}, | |
"role" : "kubernetes-role", | |
"pre-reserved-role" : "*", | |
"principal" : "kubernetes" | |
}, { | |
"@type" : "DefaultVolumeSpec", | |
"type" : "ROOT", | |
"container-path" : "wal-pv", | |
"name" : "disk", | |
"value" : { | |
"type" : "SCALAR", | |
"scalar" : { | |
"value" : 512.0 | |
}, | |
"ranges" : null, | |
"set" : null, | |
"text" : null | |
}, | |
"role" : "kubernetes-role", | |
"pre-reserved-role" : "*", | |
"principal" : "kubernetes" | |
} ], | |
"pre-reserved-role" : "*", | |
"secrets" : [ { | |
"secret" : "kubernetes/ca_crt__tmp", | |
"env-key" : null, | |
"file" : "ca-crt.pem" | |
}, { | |
"secret" : "kubernetes/etcd_chain__tmp", | |
"env-key" : null, | |
"file" : "etcd-crt.pem" | |
}, { | |
"secret" : "kubernetes/etcd_private_key__tmp", | |
"env-key" : null, | |
"file" : "etcd-key.pem" | |
} ], | |
"share-pid-namespace" : false, | |
"allow-decommission" : true | |
}, { | |
"type" : "kube-apiserver", | |
"user" : "root", | |
"count" : 3, | |
"image" : null, | |
"networks" : [ ], | |
"rlimits" : [ ], | |
"uris" : [ "https://s3.eu-central-1.amazonaws.com/dcos-kubernetes-test/bootstrap.zip", "https://storage.googleapis.com/kubernetes-release/release/v1.7.11/bin/linux/amd64/kube-apiserver", "https://s3.eu-central-1.amazonaws.com/dcos-kubernetes-test/executor.zip" ], | |
"task-specs" : [ { | |
"name" : "instance", | |
"goal" : "RUNNING", | |
"resource-set" : { | |
"id" : "instance-resource-set", | |
"resource-specifications" : [ { | |
"@type" : "DefaultResourceSpec", | |
"name" : "cpus", | |
"value" : { | |
"type" : "SCALAR", | |
"scalar" : { | |
"value" : 0.5 | |
}, | |
"ranges" : null, | |
"set" : null, | |
"text" : null | |
}, | |
"role" : "kubernetes-role", | |
"pre-reserved-role" : "*", | |
"principal" : "kubernetes" | |
}, { | |
"@type" : "DefaultResourceSpec", | |
"name" : "mem", | |
"value" : { | |
"type" : "SCALAR", | |
"scalar" : { | |
"value" : 1024.0 | |
}, | |
"ranges" : null, | |
"set" : null, | |
"text" : null | |
}, | |
"role" : "kubernetes-role", | |
"pre-reserved-role" : "*", | |
"principal" : "kubernetes" | |
}, { | |
"@type" : "NamedVIPSpec", | |
"value" : { | |
"type" : "RANGES", | |
"scalar" : null, | |
"ranges" : { | |
"range" : [ { | |
"begin" : 6443, | |
"end" : 6443 | |
} ] | |
}, | |
"set" : null, | |
"text" : null | |
}, | |
"role" : "kubernetes-role", | |
"pre-reserved-role" : "*", | |
"principal" : "kubernetes", | |
"env-key" : "KUBE_APISERVER_PORT", | |
"port-name" : "apiserver", | |
"protocol" : "tcp", | |
"visibility" : "CLUSTER", | |
"vip-name" : "apiserver", | |
"vip-port" : 6443, | |
"network-names" : [ ], | |
"name" : "ports" | |
}, { | |
"@type" : "NamedVIPSpec", | |
"value" : { | |
"type" : "RANGES", | |
"scalar" : null, | |
"ranges" : { | |
"range" : [ { | |
"begin" : 9000, | |
"end" : 9000 | |
} ] | |
}, | |
"set" : null, | |
"text" : null | |
}, | |
"role" : "kubernetes-role", | |
"pre-reserved-role" : "*", | |
"principal" : "kubernetes", | |
"env-key" : "KUBE_APISERVER_INSECURE_PORT", | |
"port-name" : "apiserver_insecure", | |
"protocol" : "tcp", | |
"visibility" : "CLUSTER", | |
"vip-name" : "apiserver-insecure", | |
"vip-port" : 9000, | |
"network-names" : [ ], | |
"name" : "ports" | |
} ], | |
"volume-specifications" : [ ], | |
"role" : "kubernetes-role", | |
"principal" : "kubernetes" | |
}, | |
"command-spec" : { | |
"value" : "\nchmod +x kube-apiserver\nprintf \"\\n\\n ###### Starting Kube API SERVER -- ${TASK_NAME} ###### \\n\"\n./kube-apiserver --etcd-servers=https://etcd-0-peer.kubernetes.mesos:2379,https://etcd-1-peer.kubernetes.mesos:2379,https://etcd-2-peer.kubernetes.mesos:2379 --etcd-cafile=ca-crt.pem --etcd-certfile=kube-apiserver-crt.pem --etcd-keyfile=kube-apiserver-key.pem --etcd-prefix=\"/registry/cluster-0\" --etcd-quorum-read --bind-address=$MESOS_CONTAINER_IP --insecure-bind-address=$MESOS_CONTAINER_IP --insecure-port=9000 --secure-port=6443 --apiserver-count=3 --allow-privileged --service-cluster-ip-range=10.100.0.0/16 --authorization-mode=AlwaysAllow --admission-control=NamespaceLifecycle,LimitRanger,ServiceAccount,PersistentVolumeLabel,DefaultStorageClass,ResourceQuota,DefaultTolerationSeconds --runtime-config=batch/v2alpha1=true,admissionregistration.k8s.io/v1alpha1=true --service-account-key-file=service-account-key.pem --tls-ca-file=ca-crt.pem --tls-cert-file=kube-apiserver-crt.pem --tls-private-key-file=kube-apiserver-key.pem --client-ca-file=ca-crt.pem --target-ram-mb=1024 2>&1\n", | |
"environment" : { | |
"KUBERNETES_VERSION" : "v1.7.11" | |
} | |
}, | |
"health-check-spec" : { | |
"command" : "HTTP_CODE=$(/opt/mesosphere/bin/curl --silent --output /dev/null --fail --write-out \"%{http_code}\" --cert kube-apiserver-crt.pem --key kube-apiserver-key.pem --cacert ca-crt.pem https://kube-apiserver-$POD_INSTANCE_INDEX-instance.kubernetes.mesos:6443/healthz) && [ \"$HTTP_CODE\" -eq \"200\" ]\n", | |
"max-consecutive-failures" : 3, | |
"delay" : 0, | |
"interval" : 15, | |
"timeout" : 10, | |
"gracePeriod" : 30 | |
}, | |
"readiness-check-spec" : null, | |
"config-files" : [ ], | |
"discovery-spec" : null, | |
"kill-grace-period" : 0, | |
"transport-encryption" : [ ] | |
} ], | |
"placement-rule" : { | |
"@type" : "MaxPerHostnameRule", | |
"max" : 1, | |
"task-filter" : { | |
"@type" : "RegexMatcher", | |
"pattern" : "kube-apiserver-.*" | |
} | |
}, | |
"volumes" : [ ], | |
"pre-reserved-role" : "*", | |
"secrets" : [ { | |
"secret" : "kubernetes/service_account_private_key__tmp", | |
"env-key" : null, | |
"file" : "service-account-key.pem" | |
}, { | |
"secret" : "kubernetes/ca_crt__tmp", | |
"env-key" : null, | |
"file" : "ca-crt.pem" | |
}, { | |
"secret" : "kubernetes/kube_apiserver_chain__tmp", | |
"env-key" : null, | |
"file" : "kube-apiserver-crt.pem" | |
}, { | |
"secret" : "kubernetes/kube_apiserver_private_key__tmp", | |
"env-key" : null, | |
"file" : "kube-apiserver-key.pem" | |
} ], | |
"share-pid-namespace" : false, | |
"allow-decommission" : true | |
}, { | |
"type" : "kube-controller-manager", | |
"user" : "root", | |
"count" : 3, | |
"image" : null, | |
"networks" : [ ], | |
"rlimits" : [ ], | |
"uris" : [ "https://s3.eu-central-1.amazonaws.com/dcos-kubernetes-test/bootstrap.zip", "https://storage.googleapis.com/kubernetes-release/release/v1.7.11/bin/linux/amd64/kube-controller-manager", "https://s3.eu-central-1.amazonaws.com/dcos-kubernetes-test/executor.zip" ], | |
"task-specs" : [ { | |
"name" : "instance", | |
"goal" : "RUNNING", | |
"resource-set" : { | |
"id" : "instance-resource-set", | |
"resource-specifications" : [ { | |
"@type" : "DefaultResourceSpec", | |
"name" : "cpus", | |
"value" : { | |
"type" : "SCALAR", | |
"scalar" : { | |
"value" : 0.5 | |
}, | |
"ranges" : null, | |
"set" : null, | |
"text" : null | |
}, | |
"role" : "kubernetes-role", | |
"pre-reserved-role" : "*", | |
"principal" : "kubernetes" | |
}, { | |
"@type" : "DefaultResourceSpec", | |
"name" : "mem", | |
"value" : { | |
"type" : "SCALAR", | |
"scalar" : { | |
"value" : 512.0 | |
}, | |
"ranges" : null, | |
"set" : null, | |
"text" : null | |
}, | |
"role" : "kubernetes-role", | |
"pre-reserved-role" : "*", | |
"principal" : "kubernetes" | |
} ], | |
"volume-specifications" : [ ], | |
"role" : "kubernetes-role", | |
"principal" : "kubernetes" | |
}, | |
"command-spec" : { | |
"value" : "./bootstrap --resolve=false 2>&1\nchmod +x kube-controller-manager\nprintf \"\\n\\n ###### Starting Kube CONTROLLER MANAGER -- ${TASK_NAME} ###### \\n\"\n./kube-controller-manager --address=$MESOS_CONTAINER_IP --port=10252 --kubeconfig=kubeconfig.yaml --root-ca-file=ca-crt.pem --service-account-private-key-file=service-account-key.pem --leader-elect 2>&1\n", | |
"environment" : { | |
"KUBERNETES_VERSION" : "v1.7.11", | |
"USERNAME" : "kube-controller-manager" | |
} | |
}, | |
"health-check-spec" : { | |
"command" : "HTTP_CODE=$(/opt/mesosphere/bin/curl --silent --output /dev/null --fail --write-out \"%{http_code}\" http://$MESOS_CONTAINER_IP:10252/healthz) && [ \"$HTTP_CODE\" -eq \"200\" ]\n", | |
"max-consecutive-failures" : 3, | |
"delay" : 0, | |
"interval" : 15, | |
"timeout" : 10, | |
"gracePeriod" : 30 | |
}, | |
"readiness-check-spec" : { | |
"command" : "HTTP_CODE=$(/opt/mesosphere/bin/curl --silent --output /dev/null --fail --write-out \"%{http_code}\" http://apiserver-insecure.kubernetes.l4lb.thisdcos.directory:9000) && [ \"$HTTP_CODE\" -eq \"200\" ]\n", | |
"delay" : 0, | |
"interval" : 5, | |
"timeout" : 10 | |
}, | |
"config-files" : [ { | |
"name" : "kubeconfig", | |
"relative-path" : "kubeconfig.yaml", | |
"template-content" : "apiVersion: v1\nkind: Config\nclusters:\n- name: local\n cluster:\n certificate-authority: ca-crt.pem\n server: https://apiserver.kubernetes.l4lb.thisdcos.directory:6443\nusers:\n- name: {{USERNAME}}\n user:\n client-certificate: {{USERNAME}}-crt.pem\n client-key: {{USERNAME}}-key.pem\ncontexts:\n- context:\n cluster: local\n user: {{USERNAME}}\n name: default\ncurrent-context: default\n" | |
} ], | |
"discovery-spec" : null, | |
"kill-grace-period" : 0, | |
"transport-encryption" : [ ] | |
} ], | |
"placement-rule" : { | |
"@type" : "MaxPerHostnameRule", | |
"max" : 1, | |
"task-filter" : { | |
"@type" : "RegexMatcher", | |
"pattern" : "kube-controller-manager-.*" | |
} | |
}, | |
"volumes" : [ ], | |
"pre-reserved-role" : "*", | |
"secrets" : [ { | |
"secret" : "kubernetes/service_account_private_key__tmp", | |
"env-key" : null, | |
"file" : "service-account-key.pem" | |
}, { | |
"secret" : "kubernetes/ca_crt__tmp", | |
"env-key" : null, | |
"file" : "ca-crt.pem" | |
}, { | |
"secret" : "kubernetes/kube_controller_manager_chain__tmp", | |
"env-key" : null, | |
"file" : "kube-controller-manager-crt.pem" | |
}, { | |
"secret" : "kubernetes/kube_controller_manager_private_key__tmp", | |
"env-key" : null, | |
"file" : "kube-controller-manager-key.pem" | |
} ], | |
"share-pid-namespace" : false, | |
"allow-decommission" : true | |
}, { | |
"type" : "kube-scheduler", | |
"user" : "root", | |
"count" : 3, | |
"image" : null, | |
"networks" : [ ], | |
"rlimits" : [ ], | |
"uris" : [ "https://s3.eu-central-1.amazonaws.com/dcos-kubernetes-test/bootstrap.zip", "https://storage.googleapis.com/kubernetes-release/release/v1.7.11/bin/linux/amd64/kube-scheduler", "https://s3.eu-central-1.amazonaws.com/dcos-kubernetes-test/executor.zip" ], | |
"task-specs" : [ { | |
"name" : "instance", | |
"goal" : "RUNNING", | |
"resource-set" : { | |
"id" : "instance-resource-set", | |
"resource-specifications" : [ { | |
"@type" : "DefaultResourceSpec", | |
"name" : "cpus", | |
"value" : { | |
"type" : "SCALAR", | |
"scalar" : { | |
"value" : 0.5 | |
}, | |
"ranges" : null, | |
"set" : null, | |
"text" : null | |
}, | |
"role" : "kubernetes-role", | |
"pre-reserved-role" : "*", | |
"principal" : "kubernetes" | |
}, { | |
"@type" : "DefaultResourceSpec", | |
"name" : "mem", | |
"value" : { | |
"type" : "SCALAR", | |
"scalar" : { | |
"value" : 512.0 | |
}, | |
"ranges" : null, | |
"set" : null, | |
"text" : null | |
}, | |
"role" : "kubernetes-role", | |
"pre-reserved-role" : "*", | |
"principal" : "kubernetes" | |
} ], | |
"volume-specifications" : [ ], | |
"role" : "kubernetes-role", | |
"principal" : "kubernetes" | |
}, | |
"command-spec" : { | |
"value" : "./bootstrap --resolve=false 2>&1\nchmod +x kube-scheduler\nprintf \"\\n\\n ###### Starting Kube SCHEDULER -- ${TASK_NAME} ###### \\n\"\n./kube-scheduler --address=$MESOS_CONTAINER_IP --kubeconfig=kubeconfig.yaml --leader-elect --kube-api-burst=120 --kube-api-qps=80 2>&1\n", | |
"environment" : { | |
"KUBERNETES_VERSION" : "v1.7.11", | |
"USERNAME" : "kube-scheduler" | |
} | |
}, | |
"health-check-spec" : { | |
"command" : "HTTP_CODE=$(/opt/mesosphere/bin/curl --silent --output /dev/null --fail --write-out \"%{http_code}\" http://$MESOS_CONTAINER_IP:10251/healthz) && [ \"$HTTP_CODE\" -eq \"200\" ]\n", | |
"max-consecutive-failures" : 3, | |
"delay" : 0, | |
"interval" : 15, | |
"timeout" : 10, | |
"gracePeriod" : 30 | |
}, | |
"readiness-check-spec" : null, | |
"config-files" : [ { | |
"name" : "kubeconfig", | |
"relative-path" : "kubeconfig.yaml", | |
"template-content" : "apiVersion: v1\nkind: Config\nclusters:\n- name: local\n cluster:\n certificate-authority: ca-crt.pem\n server: https://apiserver.kubernetes.l4lb.thisdcos.directory:6443\nusers:\n- name: {{USERNAME}}\n user:\n client-certificate: {{USERNAME}}-crt.pem\n client-key: {{USERNAME}}-key.pem\ncontexts:\n- context:\n cluster: local\n user: {{USERNAME}}\n name: default\ncurrent-context: default\n" | |
} ], | |
"discovery-spec" : null, | |
"kill-grace-period" : 0, | |
"transport-encryption" : [ ] | |
} ], | |
"placement-rule" : { | |
"@type" : "MaxPerHostnameRule", | |
"max" : 1, | |
"task-filter" : { | |
"@type" : "RegexMatcher", | |
"pattern" : "kube-scheduler-.*" | |
} | |
}, | |
"volumes" : [ ], | |
"pre-reserved-role" : "*", | |
"secrets" : [ { | |
"secret" : "kubernetes/ca_crt__tmp", | |
"env-key" : null, | |
"file" : "ca-crt.pem" | |
}, { | |
"secret" : "kubernetes/kube_scheduler_chain__tmp", | |
"env-key" : null, | |
"file" : "kube-scheduler-crt.pem" | |
}, { | |
"secret" : "kubernetes/kube_scheduler_private_key__tmp", | |
"env-key" : null, | |
"file" : "kube-scheduler-key.pem" | |
} ], | |
"share-pid-namespace" : false, | |
"allow-decommission" : true | |
}, { | |
"type" : "kube-node", | |
"user" : "root", | |
"count" : 2, | |
"image" : null, | |
"networks" : [ ], | |
"rlimits" : [ ], | |
"uris" : [ "https://s3.eu-central-1.amazonaws.com/dcos-kubernetes-test/bootstrap.zip", "https://download.docker.com/linux/static/stable/x86_64/docker-17.09.0-ce.tgz", "https://downloads.mesosphere.com/kubernetes/socat/socat.d-1.7.3.2-2.fc26.tar.gz", "https://storage.googleapis.com/kubernetes-release/release/v1.7.11/bin/linux/amd64/kube-proxy", "https://storage.googleapis.com/kubernetes-release/release/v1.7.11/bin/linux/amd64/kubelet", "https://storage.googleapis.com/kubernetes-release/release/v1.7.11/bin/linux/amd64/kubectl", "https://infinity-artifacts.s3.amazonaws.com/autodelete7d/kubernetes/20171206-172217-Z5QGImH7dVzO3lwz/resource-container.gz", "https://s3.eu-central-1.amazonaws.com/dcos-kubernetes-test/executor.zip" ], | |
"task-specs" : [ { | |
"name" : "kube-proxy", | |
"goal" : "RUNNING", | |
"resource-set" : { | |
"id" : "kube-proxy-resource-set", | |
"resource-specifications" : [ { | |
"@type" : "DefaultResourceSpec", | |
"name" : "cpus", | |
"value" : { | |
"type" : "SCALAR", | |
"scalar" : { | |
"value" : 0.1 | |
}, | |
"ranges" : null, | |
"set" : null, | |
"text" : null | |
}, | |
"role" : "kubernetes-role", | |
"pre-reserved-role" : "*", | |
"principal" : "kubernetes" | |
}, { | |
"@type" : "DefaultResourceSpec", | |
"name" : "mem", | |
"value" : { | |
"type" : "SCALAR", | |
"scalar" : { | |
"value" : 512.0 | |
}, | |
"ranges" : null, | |
"set" : null, | |
"text" : null | |
}, | |
"role" : "kubernetes-role", | |
"pre-reserved-role" : "*", | |
"principal" : "kubernetes" | |
} ], | |
"volume-specifications" : [ ], | |
"role" : "kubernetes-role", | |
"principal" : "kubernetes" | |
}, | |
"command-spec" : { | |
"value" : "./bootstrap --resolve=false 2>&1\nchmod +x kube-proxy\nprintf \"\\n\\n ###### Starting Kube PROXY -- ${TASK_NAME} ###### \\n\"\n./kube-proxy --hostname-override=kube-node-$POD_INSTANCE_INDEX-kube-proxy.kubernetes.mesos --bind-address=127.0.0.1 --kubeconfig=kubeconfig.yaml --resource-container=\"\" --healthz-port=0 2>&1\n", | |
"environment" : { | |
"KUBERNETES_VERSION" : "v1.7.11", | |
"PATH" : "/usr/local/sbin:/usr/local/bin:/usr/sbin:/usr/bin:/sbin:/bin:/opt/bin:/opt/mesosphere/bin/dcos-path", | |
"USERNAME" : "kube-node" | |
} | |
}, | |
"health-check-spec" : null, | |
"readiness-check-spec" : null, | |
"config-files" : [ { | |
"name" : "kubeconfig", | |
"relative-path" : "kubeconfig.yaml", | |
"template-content" : "apiVersion: v1\nkind: Config\nclusters:\n- name: local\n cluster:\n certificate-authority: ca-crt.pem\n server: https://apiserver.kubernetes.l4lb.thisdcos.directory:6443\nusers:\n- name: {{USERNAME}}\n user:\n client-certificate: {{USERNAME}}-crt.pem\n client-key: {{USERNAME}}-key.pem\ncontexts:\n- context:\n cluster: local\n user: {{USERNAME}}\n name: default\ncurrent-context: default\n" | |
} ], | |
"discovery-spec" : null, | |
"kill-grace-period" : 0, | |
"transport-encryption" : [ ] | |
}, { | |
"name" : "kubelet", | |
"goal" : "RUNNING", | |
"resource-set" : { | |
"id" : "kube-node-kubelet", | |
"resource-specifications" : [ { | |
"@type" : "DefaultResourceSpec", | |
"name" : "cpus", | |
"value" : { | |
"type" : "SCALAR", | |
"scalar" : { | |
"value" : 3.0 | |
}, | |
"ranges" : null, | |
"set" : null, | |
"text" : null | |
}, | |
"role" : "kubernetes-role", | |
"pre-reserved-role" : "*", | |
"principal" : "kubernetes" | |
}, { | |
"@type" : "DefaultResourceSpec", | |
"name" : "mem", | |
"value" : { | |
"type" : "SCALAR", | |
"scalar" : { | |
"value" : 4100.0 | |
}, | |
"ranges" : null, | |
"set" : null, | |
"text" : null | |
}, | |
"role" : "kubernetes-role", | |
"pre-reserved-role" : "*", | |
"principal" : "kubernetes" | |
} ], | |
"volume-specifications" : [ ], | |
"role" : "kubernetes-role", | |
"principal" : "kubernetes" | |
}, | |
"command-spec" : { | |
"value" : "./bootstrap --resolve=false 2>&1\n\nchmod +x kubelet-wrapper.sh\n\nprintf \"\\n\\n ###### Starting Kubelet -- ${TASK_NAME} ###### \\n\"\n\n./kubelet-wrapper.sh 2>&1\n", | |
"environment" : { | |
"FRAMEWORK_NAME" : "", | |
"KUBERNETES_NODE_DOCKER_VERSION" : "17.09.0", | |
"KUBE_ALLOCATABLE_CPUS" : "2", | |
"KUBE_ALLOCATABLE_MEM" : "2048", | |
"KUBE_RESERVED_CPUS" : "1", | |
"KUBE_RESERVED_MEM" : "2052", | |
"PATH" : "/usr/local/sbin:/usr/local/bin:/usr/sbin:/usr/bin:/sbin:/bin:/opt/bin:/opt/mesosphere/bin/dcos-path", | |
"PAUSE_DOCKER_IMAGE" : "gcr.io/google_containers/pause-amd64:3.0", | |
"USERNAME" : "kube-node" | |
} | |
}, | |
"health-check-spec" : { | |
"command" : "HTTP_CODE=$(/opt/mesosphere/bin/curl --silent --output /dev/null --fail --write-out \"%{http_code}\" http://$MESOS_CONTAINER_IP:10258/healthz) && [ \"$HTTP_CODE\" -eq \"200\" ]\n", | |
"max-consecutive-failures" : 3, | |
"delay" : 0, | |
"interval" : 15, | |
"timeout" : 10, | |
"gracePeriod" : 30 | |
}, | |
"readiness-check-spec" : null, | |
"config-files" : [ { | |
"name" : "kubeconfig", | |
"relative-path" : "kubeconfig.yaml", | |
"template-content" : "apiVersion: v1\nkind: Config\nclusters:\n- name: local\n cluster:\n certificate-authority: ca-crt.pem\n server: https://apiserver.kubernetes.l4lb.thisdcos.directory:6443\nusers:\n- name: {{USERNAME}}\n user:\n client-certificate: {{USERNAME}}-crt.pem\n client-key: {{USERNAME}}-key.pem\ncontexts:\n- context:\n cluster: local\n user: {{USERNAME}}\n name: default\ncurrent-context: default\n" | |
}, { | |
"name" : "kubelet-wrapper.sh", | |
"relative-path" : "kubelet-wrapper.sh", | |
"template-content" : "#!/bin/bash\n\nset -e\n\n# In a graceful shutdown, we delete the node from the cluster\nterminated () {\n chmod +x kubectl\n\n printf \"Deleting node kube-node-$POD_INSTANCE_INDEX-kubelet.$FRAMEWORK_NAME.mesos \\n\"\n ./kubectl --kubeconfig=kubeconfig.yaml delete node kube-node-$POD_INSTANCE_INDEX-kubelet.$FRAMEWORK_NAME.mesos\n\n exit 0\n}\n\ntrap terminated SIGTERM EXIT\necho \"Trapping SIGTERM and EXIT!\"\n\nprintf \"Configuring task environment...\\n\"\n\n[ -z \"$KUBE_ALLOCATABLE_CPUS\" ] && (printf \"Error: KUBE_ALLOCATABLE_CPUS not set or empty!\" >&2 ; exit 1)\n[ -z \"$KUBE_ALLOCATABLE_MEM\" ] && (printf \"Error: KUBELET_ALLOCATABLE_MEM not set or empty!\" >&2 ; exit 1)\n[ -z \"$KUBE_RESERVED_CPUS\" ] && (printf \"Error: KUBE_RESERVED_CPUS not set or empty!\" >&2 ; exit 1)\n[ -z \"$KUBE_RESERVED_MEM\" ] && (printf \"Error: KUBELET_RESERVED_MEM not set or empty!\" >&2 ; exit 1)\n\n# use the DC/OS proxy for dockerd and the kubelet\n# reading this file from /opt/mesosphere is a hack. Ideally these vars would be injected by mesos\nset -o allexport\nsource /opt/mesosphere/etc/proxy.env\nset +o allexport\n\n# The kubelet sees all of the host resources.\n# To override the resources it will advertise, we set the allocatable resources\n# as follows:\n# - reserved_cpu:\n# read total cpu available\n# subtract amount configured by user\n# convert to millicores format expected by kubelet\n# - reserved_mem is calculated as follows:\n# read total system memory in Kb\n# subtract memory the user configured (in Mb)\n#\nSYSTEM_RESERVED_CPUS=$(lscpu | awk -v requested=$KUBE_ALLOCATABLE_CPUS -v reserved=$KUBE_RESERVED_CPUS '/^CPU\\(s\\)/ {print ($NF - requested - reserved) * 1000}')\nSYSTEM_RESERVED_MEM=$(awk -v requested=$KUBE_ALLOCATABLE_MEM -v reserved=$KUBE_RESERVED_MEM '/MemTotal/ {print int(($2 - requested * 1000 - reserved * 1000))}' /proc/meminfo)\nCGROUP_ROOT=$(grep memory /proc/self/cgroup | cut -d: -f3)\n\n[ -z \"$CGROUP_ROOT\" ] && (printf \"Error: Unable to find CGROUP_ROOT!\" >&2 ; exit 1)\n\n# Docker must run with a few special arguments.\n# data-root is important, because it seems /var/lib/docker\n# mount bind we will be doing below doesn't work (at least)\n# on CentOS 7. FYI, it works fine (at least) on CoreOS.\nDOCKER_ARGS=(\n --iptables=false\n --ip-masq=false\n --cgroup-parent=${CGROUP_ROOT}\n --data-root=var/new/lib/docker\n)\n\n# For now, we enforce Docker storage driver to overlay2.\nDOCKER_ARGS+=(\n --storage-driver=overlay2\n --storage-opt=\"overlay2.override_kernel_check=true\"\n)\n\n# Before running the kubelet, we need to make sure it supports\n# the hairpin mode.\necho 1 > /proc/sys/net/bridge/bridge-nf-call-iptables\n\n# We need to convert this to millicores\nKUBE_RESERVED_CPUS_M=$((${KUBE_RESERVED_CPUS} * 1000))\nKUBE_RESERVED_MEM_M=$((${KUBE_RESERVED_MEM} * 1000))\n\n# Kubelet must run with a few special arguments.\n#\n# FRAMEWORK_NAME, KUBELET_CPUS and KUBELET_MEM are framework variables\n# set by the framework scheduler when processing the service spec.\nKUBELET_ARGS=(\n --address=$MESOS_CONTAINER_IP\n --hostname-override=kube-node-$POD_INSTANCE_INDEX-kubelet.$FRAMEWORK_NAME.mesos\n --node-ip=$MESOS_CONTAINER_IP\n --require-kubeconfig\n --allow-privileged\n --network-plugin=cni\n --cni-bin-dir=/opt/mesosphere/active/cni\n --cni-conf-dir=/opt/mesosphere/etc/dcos/network/cni\n --healthz-bind-address=$MESOS_CONTAINER_IP\n --healthz-port=10258\n --cluster-dns=10.100.0.10\n --cluster-domain=cluster.local\n --system-reserved=\"cpu=${SYSTEM_RESERVED_CPUS}m,memory=${SYSTEM_RESERVED_MEM}Ki\"\n --kube-reserved=\"cpu=${KUBE_RESERVED_CPUS_M}m,memory=${KUBE_RESERVED_MEM_M}Ki\"\n --cgroup-driver=cgroupfs\n --kube-api-qps=15\n --kube-api-burst=30\n --event-qps=15\n --event-burst=30\n --max-pods=100\n --cgroup-root=${CGROUP_ROOT}\n --pod-infra-container-image=$PAUSE_DOCKER_IMAGE\n --kubeconfig=kubeconfig.yaml\n --tls-cert-file=kube-node-crt.pem\n --tls-private-key-file=kube-node-key.pem\n --kube-reserved-cgroup=${CGROUP_ROOT}/podruntime\n)\n\nprintf \"Sandboxing...\\n\"\n\n# Since the persistent volume \"var\" may have been previously used by the same\n# task, we need to make sure it's empty before proceeding.\nrm -rf var/*\n\nDIRS=( containerd docker dockershim kubelet )\n\nfor DIR in \"${DIRS[@]}\"\ndo\n mkdir -p /var/lib/${DIR} var/new/lib/${DIR}\n mount --bind var/new/lib/${DIR} /var/lib/${DIR}\ndone\n\nmkdir -p var/new/log\nmount --bind var/new/log /var/log\n\n# Isolate docker daemon from any existing configuration in /etc/docker\nmkdir -p etc/docker/\nmount --bind etc/docker /etc/docker\n\n# Move mount /run to ./run and mount bind only the necessary paths (/run/dcos\n# and /run/mesos). This is done in order to present a clean /run to both dockerd\n# and the kubelet.\n\nmkdir -p run\nmount -n -t tmpfs tmpfs run\n\n# On CentOS it exits with a non 0 error code but move mount works anyway. ?\\_(?)_/?\nmount -M /run/ run || true\n# Double-check if move mount worked.\n[ -d run/mesos ] || (printf \"Error: Mount move failed.\\n\" >&2 && exit 1)\n# Clean up /run before continuing and mount bind only what's necessary.\nrm -rf /run/*\n\nDIRS=( dcos mesos )\n\n# Ubuntu requires lxcfs\nif [ -d run/lxcfs ]; then\n DIRS+=( lxcfs )\nfi\n\nfor DIR in \"${DIRS[@]}\"\ndo\n mkdir /run/${DIR}\n mount --bind run/${DIR} /run/${DIR}\ndone\n\nprintf \"Configuring network...\\n\"\n\n# For now, we use DC/OS overlay CNI integration.\nCONFDIR=/opt/mesosphere/etc/dcos/network/cni\nif [ -f $CONFDIR/dcos.conf ]; then\n mv $CONFDIR/dcos.conf $CONFDIR/dcos.1.conf\n CNICONF='{\"name\":\"dcos\",'`cat ${CONFDIR}/dcos.1.conf | cut -d \"{\" -f3-5 | cut -d \"}\" -f1-4`\n echo ${CNICONF} > ${CONFDIR}/dcos.1.conf\nfi\n\n# socat is needed for kubectl port-forward.\ncat << EOF > socat.d/socat\n#! /bin/bash\nPATH=/usr/bin:/bin:/usr/sbin:/sbin:$(pwd)/socat.d/\nLD_LIBRARY_PATH=$(pwd)/socat.d/lib:$LD_LIBRARY_PATH exec $(pwd)/socat.d/bin/socat \"\\$@\"\nEOF\n\nchmod +x kubelet socat.d/socat\n\nprintf \"Starting docker...\\n\"\n\nPATH=$(pwd)/docker:$(pwd)/socat.d:$PATH ./docker/dockerd ${DOCKER_ARGS[@]} &\n\npid=$!\n\nprintf \"Setting up cgroups for docker pid=\"$pid\"\\n\"\n\n# Start dockerd and kubelet, add them to all cgroup subsystems available on the\n# system and wait for kubelet process to exit.\n\nchmod +x resource-container\n\n./resource-container --parent ${CGROUP_ROOT} --cgroup podruntime --cpus ${KUBE_RESERVED_CPUS} --mem ${KUBE_RESERVED_MEM} --pid $pid\n\nprintf \"Starting kubelet...\\n\"\n\nPATH=$(pwd)/docker:$(pwd)/socat.d:$PATH ./kubelet ${KUBELET_ARGS[@]} &\n\npid=$!\n\nprintf \"Setting up cgroups for kubelet pid=\"$pid\"\\n\"\n\n./resource-container --parent ${CGROUP_ROOT} --cgroup podruntime --cpus ${KUBE_RESERVED_CPUS} --mem ${KUBE_RESERVED_MEM} --pid $pid\n\nwait $pid\n" | |
} ], | |
"discovery-spec" : null, | |
"kill-grace-period" : 5, | |
"transport-encryption" : [ ] | |
}, { | |
"name" : "decommission", | |
"goal" : "FINISHED", | |
"resource-set" : { | |
"id" : "decommission-resource-set", | |
"resource-specifications" : [ { | |
"@type" : "DefaultResourceSpec", | |
"name" : "cpus", | |
"value" : { | |
"type" : "SCALAR", | |
"scalar" : { | |
"value" : 0.1 | |
}, | |
"ranges" : null, | |
"set" : null, | |
"text" : null | |
}, | |
"role" : "kubernetes-role", | |
"pre-reserved-role" : "*", | |
"principal" : "kubernetes" | |
}, { | |
"@type" : "DefaultResourceSpec", | |
"name" : "mem", | |
"value" : { | |
"type" : "SCALAR", | |
"scalar" : { | |
"value" : 32.0 | |
}, | |
"ranges" : null, | |
"set" : null, | |
"text" : null | |
}, | |
"role" : "kubernetes-role", | |
"pre-reserved-role" : "*", | |
"principal" : "kubernetes" | |
} ], | |
"volume-specifications" : [ ], | |
"role" : "kubernetes-role", | |
"principal" : "kubernetes" | |
}, | |
"command-spec" : { | |
"value" : "\nprintf \"Starting to decommission the node...\\n\"\n\n./bootstrap --resolve=false 2>&1 ;\n\nchmod +x kubectl ;\n\nNODE_FOUND=$(./kubectl --kubeconfig=kubeconfig.yaml get node --ignore-not-found kube-node-$POD_INSTANCE_INDEX-kubelet.kubernetes.mesos | grep \"kube-node-$POD_INSTANCE_INDEX-kubelet.kubernetes.mesos\" | grep -c \"Ready\")\n\nif [ \"$IS_UPGRADE_PLAN\" = \"YES\" ] && [ \"$NODE_FOUND\" -eq \"1\" ] ; then\n printf \"Starting to decommission the kube-node-$POD_INSTANCE_INDEX-kubelet.kubernetes.mesos -- ${TASK_NAME} \\n\" ;\n ./kubectl --kubeconfig=kubeconfig.yaml drain kube-node-$POD_INSTANCE_INDEX-kubelet.kubernetes.mesos ;\nelse\n printf \"Ignored the decommission process of kube-node-$POD_INSTANCE_INDEX-kubelet.kubernetes.mesos!\" ;\nfi\n", | |
"environment" : { | |
"KUBERNETES_NODE_DOCKER_VERSION" : "17.09.0", | |
"KUBERNETES_VERSION" : "v1.7.11", | |
"USERNAME" : "kube-node" | |
} | |
}, | |
"health-check-spec" : null, | |
"readiness-check-spec" : null, | |
"config-files" : [ { | |
"name" : "kubeconfig", | |
"relative-path" : "kubeconfig.yaml", | |
"template-content" : "apiVersion: v1\nkind: Config\nclusters:\n- name: local\n cluster:\n certificate-authority: ca-crt.pem\n server: https://apiserver.kubernetes.l4lb.thisdcos.directory:6443\nusers:\n- name: {{USERNAME}}\n user:\n client-certificate: {{USERNAME}}-crt.pem\n client-key: {{USERNAME}}-key.pem\ncontexts:\n- context:\n cluster: local\n user: {{USERNAME}}\n name: default\ncurrent-context: default\n" | |
} ], | |
"discovery-spec" : null, | |
"kill-grace-period" : 0, | |
"transport-encryption" : [ ] | |
}, { | |
"name" : "commission", | |
"goal" : "FINISHED", | |
"resource-set" : { | |
"id" : "commission-resource-set", | |
"resource-specifications" : [ { | |
"@type" : "DefaultResourceSpec", | |
"name" : "cpus", | |
"value" : { | |
"type" : "SCALAR", | |
"scalar" : { | |
"value" : 0.1 | |
}, | |
"ranges" : null, | |
"set" : null, | |
"text" : null | |
}, | |
"role" : "kubernetes-role", | |
"pre-reserved-role" : "*", | |
"principal" : "kubernetes" | |
}, { | |
"@type" : "DefaultResourceSpec", | |
"name" : "mem", | |
"value" : { | |
"type" : "SCALAR", | |
"scalar" : { | |
"value" : 32.0 | |
}, | |
"ranges" : null, | |
"set" : null, | |
"text" : null | |
}, | |
"role" : "kubernetes-role", | |
"pre-reserved-role" : "*", | |
"principal" : "kubernetes" | |
} ], | |
"volume-specifications" : [ ], | |
"role" : "kubernetes-role", | |
"principal" : "kubernetes" | |
}, | |
"command-spec" : { | |
"value" : "\nprintf \"Starting to commission the node...\\n\"\n\n./bootstrap --resolve=false 2>&1 ;\n\nchmod +x kubectl ;\n\nNODE_FOUND=$(./kubectl --kubeconfig=kubeconfig.yaml get node --ignore-not-found kube-node-$POD_INSTANCE_INDEX-kubelet.kubernetes.mesos | grep -c \"kube-node-$POD_INSTANCE_INDEX-kubelet.kubernetes.mesos\")\n\nif [ \"$IS_UPGRADE_PLAN\" = \"YES\" ] && [ \"$NODE_FOUND\" -eq \"1\" ] ; then\n printf \"Starting to commission the kube-node-$POD_INSTANCE_INDEX-kubelet.kubernetes.mesos -- ${TASK_NAME} \\n\" ;\n ./kubectl --kubeconfig=kubeconfig.yaml uncordon kube-node-$POD_INSTANCE_INDEX-kubelet.kubernetes.mesos ;\nelse\n printf \"Ignored the commission process of kube-node-$POD_INSTANCE_INDEX-kubelet.kubernetes.mesos!\" ;\nfi\n", | |
"environment" : { | |
"KUBERNETES_NODE_DOCKER_VERSION" : "17.09.0", | |
"KUBERNETES_VERSION" : "v1.7.11", | |
"USERNAME" : "kube-node" | |
} | |
}, | |
"health-check-spec" : null, | |
"readiness-check-spec" : null, | |
"config-files" : [ { | |
"name" : "kubeconfig", | |
"relative-path" : "kubeconfig.yaml", | |
"template-content" : "apiVersion: v1\nkind: Config\nclusters:\n- name: local\n cluster:\n certificate-authority: ca-crt.pem\n server: https://apiserver.kubernetes.l4lb.thisdcos.directory:6443\nusers:\n- name: {{USERNAME}}\n user:\n client-certificate: {{USERNAME}}-crt.pem\n client-key: {{USERNAME}}-key.pem\ncontexts:\n- context:\n cluster: local\n user: {{USERNAME}}\n name: default\ncurrent-context: default\n" | |
} ], | |
"discovery-spec" : null, | |
"kill-grace-period" : 0, | |
"transport-encryption" : [ ] | |
}, { | |
"name" : "recover", | |
"goal" : "FINISHED", | |
"resource-set" : { | |
"id" : "kube-node-kubelet", | |
"resource-specifications" : [ { | |
"@type" : "DefaultResourceSpec", | |
"name" : "cpus", | |
"value" : { | |
"type" : "SCALAR", | |
"scalar" : { | |
"value" : 3.0 | |
}, | |
"ranges" : null, | |
"set" : null, | |
"text" : null | |
}, | |
"role" : "kubernetes-role", | |
"pre-reserved-role" : "*", | |
"principal" : "kubernetes" | |
}, { | |
"@type" : "DefaultResourceSpec", | |
"name" : "mem", | |
"value" : { | |
"type" : "SCALAR", | |
"scalar" : { | |
"value" : 4100.0 | |
}, | |
"ranges" : null, | |
"set" : null, | |
"text" : null | |
}, | |
"role" : "kubernetes-role", | |
"pre-reserved-role" : "*", | |
"principal" : "kubernetes" | |
} ], | |
"volume-specifications" : [ ], | |
"role" : "kubernetes-role", | |
"principal" : "kubernetes" | |
}, | |
"command-spec" : { | |
"value" : "./bootstrap --resolve=false 2>&1\n\nchmod +x kubectl\n\n./kubectl --kubeconfig=kubeconfig.yaml delete node kube-node-$POD_INSTANCE_INDEX-kubelet.kubernetes.mesos\n", | |
"environment" : { | |
"KUBERNETES_VERSION" : "v1.7.11", | |
"USERNAME" : "kube-node" | |
} | |
}, | |
"health-check-spec" : null, | |
"readiness-check-spec" : null, | |
"config-files" : [ { | |
"name" : "kubeconfig", | |
"relative-path" : "kubeconfig.yaml", | |
"template-content" : "apiVersion: v1\nkind: Config\nclusters:\n- name: local\n cluster:\n certificate-authority: ca-crt.pem\n server: https://apiserver.kubernetes.l4lb.thisdcos.directory:6443\nusers:\n- name: {{USERNAME}}\n user:\n client-certificate: {{USERNAME}}-crt.pem\n client-key: {{USERNAME}}-key.pem\ncontexts:\n- context:\n cluster: local\n user: {{USERNAME}}\n name: default\ncurrent-context: default\n" | |
} ], | |
"discovery-spec" : null, | |
"kill-grace-period" : 0, | |
"transport-encryption" : [ ] | |
} ], | |
"placement-rule" : { | |
"@type" : "MaxPerHostnameRule", | |
"max" : 1, | |
"task-filter" : { | |
"@type" : "RegexMatcher", | |
"pattern" : "kube-node-.*" | |
} | |
}, | |
"volumes" : [ { | |
"@type" : "DefaultVolumeSpec", | |
"type" : "ROOT", | |
"container-path" : "var", | |
"name" : "disk", | |
"value" : { | |
"type" : "SCALAR", | |
"scalar" : { | |
"value" : 1024.0 | |
}, | |
"ranges" : null, | |
"set" : null, | |
"text" : null | |
}, | |
"role" : "kubernetes-role", | |
"pre-reserved-role" : "*", | |
"principal" : "kubernetes" | |
} ], | |
"pre-reserved-role" : "*", | |
"secrets" : [ { | |
"secret" : "kubernetes/ca_crt__tmp", | |
"env-key" : null, | |
"file" : "ca-crt.pem" | |
}, { | |
"secret" : "kubernetes/kube_node_chain__tmp", | |
"env-key" : null, | |
"file" : "kube-node-crt.pem" | |
}, { | |
"secret" : "kubernetes/kube_node_private_key__tmp", | |
"env-key" : null, | |
"file" : "kube-node-key.pem" | |
} ], | |
"share-pid-namespace" : false, | |
"allow-decommission" : true | |
}, { | |
"type" : "mandatory-addons", | |
"user" : "root", | |
"count" : 1, | |
"image" : null, | |
"networks" : [ ], | |
"rlimits" : [ ], | |
"uris" : [ "https://s3.eu-central-1.amazonaws.com/dcos-kubernetes-test/bootstrap.zip", "https://storage.googleapis.com/kubernetes-release/release/v1.7.11/bin/linux/amd64/kubectl", "https://s3.eu-central-1.amazonaws.com/dcos-kubernetes-test/executor.zip" ], | |
"task-specs" : [ { | |
"name" : "kube-dns", | |
"goal" : "FINISHED", | |
"resource-set" : { | |
"id" : "addons", | |
"resource-specifications" : [ { | |
"@type" : "DefaultResourceSpec", | |
"name" : "cpus", | |
"value" : { | |
"type" : "SCALAR", | |
"scalar" : { | |
"value" : 0.1 | |
}, | |
"ranges" : null, | |
"set" : null, | |
"text" : null | |
}, | |
"role" : "kubernetes-role", | |
"pre-reserved-role" : "*", | |
"principal" : "kubernetes" | |
}, { | |
"@type" : "DefaultResourceSpec", | |
"name" : "mem", | |
"value" : { | |
"type" : "SCALAR", | |
"scalar" : { | |
"value" : 32.0 | |
}, | |
"ranges" : null, | |
"set" : null, | |
"text" : null | |
}, | |
"role" : "kubernetes-role", | |
"pre-reserved-role" : "*", | |
"principal" : "kubernetes" | |
} ], | |
"volume-specifications" : [ ], | |
"role" : "kubernetes-role", | |
"principal" : "kubernetes" | |
}, | |
"command-spec" : { | |
"value" : "./bootstrap --resolve=false 2>&1\n\nTASK_NAME= ADDON=kube-dns bash ./install_addon.sh\n", | |
"environment" : { | |
"KUBEDNS_DNSMASQ_DOCKER_IMAGE" : "gcr.io/google_containers/k8s-dns-dnsmasq-nanny-amd64:1.14.7", | |
"KUBEDNS_DOCKER_IMAGE" : "gcr.io/google_containers/k8s-dns-kube-dns-amd64:1.14.7", | |
"KUBEDNS_SIDECAR_DOCKER_IMAGE" : "gcr.io/google_containers/k8s-dns-sidecar-amd64:1.14.7", | |
"KUBERNETES_VERSION" : "v1.7.11", | |
"USERNAME" : "mandatory-addons" | |
} | |
}, | |
"health-check-spec" : null, | |
"readiness-check-spec" : null, | |
"config-files" : [ { | |
"name" : "kubeconfig", | |
"relative-path" : "kubeconfig.yaml", | |
"template-content" : "apiVersion: v1\nkind: Config\nclusters:\n- name: local\n cluster:\n certificate-authority: ca-crt.pem\n server: https://apiserver.kubernetes.l4lb.thisdcos.directory:6443\nusers:\n- name: {{USERNAME}}\n user:\n client-certificate: {{USERNAME}}-crt.pem\n client-key: {{USERNAME}}-key.pem\ncontexts:\n- context:\n cluster: local\n user: {{USERNAME}}\n name: default\ncurrent-context: default\n" | |
}, { | |
"name" : "kubedns", | |
"relative-path" : "kube-dns.yaml", | |
"template-content" : "# Copyright 2016 The Kubernetes Authors.\n#\n# Licensed under the Apache License, Version 2.0 (the \"License\");\n# you may not use this file except in compliance with the License.\n# You may obtain a copy of the License at\n#\n# http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS,\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n# See the License for the specific language governing permissions and\n# limitations under the License.\n\napiVersion: v1\nkind: ConfigMap\nmetadata:\n name: kube-dns\n namespace: kube-system\n labels:\n addonmanager.kubernetes.io/mode: EnsureExists\ndata:\n upstreamNameservers: |\n [\"198.51.100.1\"]\n---\n\napiVersion: v1\nkind: Service\nmetadata:\n name: kube-dns\n namespace: kube-system\n labels:\n k8s-app: kube-dns\n kubernetes.io/cluster-service: \"true\"\n kubernetes.io/name: \"KubeDNS\"\nspec:\n selector:\n k8s-app: kube-dns\n clusterIP: 10.100.0.10\n ports:\n - name: dns\n port: 53\n protocol: UDP\n - name: dns-tcp\n port: 53\n protocol: TCP\n---\n\napiVersion: apps/v1beta1\nkind: Deployment\nmetadata:\n name: kube-dns\n namespace: kube-system\n labels:\n k8s-app: kube-dns\n kubernetes.io/cluster-service: \"true\"\nspec:\n # We initially require 3 replicas to handle HA during upgrade operations.\n replicas: 3\n strategy:\n rollingUpdate:\n maxSurge: 10%\n maxUnavailable: 0\n selector:\n matchLabels:\n k8s-app: kube-dns\n template:\n metadata:\n labels:\n k8s-app: kube-dns\n annotations:\n scheduler.alpha.kubernetes.io/critical-pod: ''\n spec:\n tolerations:\n - key: \"CriticalAddonsOnly\"\n operator: \"Exists\"\n volumes:\n - name: kube-dns-config\n configMap:\n name: kube-dns\n optional: true\n containers:\n - name: kubedns\n image: {{KUBEDNS_DOCKER_IMAGE}}\n resources:\n # TODO: Set memory limits when we've profiled the container for large\n # clusters, then set request = limit to keep this container in\n # guaranteed class. Currently, this container falls into the\n # \"burstable\" category so the kubelet doesn't backoff from restarting it.\n limits:\n memory: 170Mi\n requests:\n cpu: 100m\n memory: 70Mi\n livenessProbe:\n httpGet:\n path: /healthcheck/kubedns\n port: 10054\n scheme: HTTP\n initialDelaySeconds: 60\n timeoutSeconds: 5\n successThreshold: 1\n failureThreshold: 5\n readinessProbe:\n httpGet:\n path: /readiness\n port: 8081\n scheme: HTTP\n # we poll on pod startup for the Kubernetes master service and\n # only setup the /readiness HTTP server once that's available.\n initialDelaySeconds: 3\n timeoutSeconds: 5\n args:\n - --domain=cluster.local.\n - --dns-port=10053\n - --config-dir=/kube-dns-config\n - --v=2\n env:\n - name: PROMETHEUS_PORT\n value: \"10055\"\n ports:\n - containerPort: 10053\n name: dns-local\n protocol: UDP\n - containerPort: 10053\n name: dns-tcp-local\n protocol: TCP\n - containerPort: 10055\n name: metrics\n protocol: TCP\n volumeMounts:\n - name: kube-dns-config\n mountPath: /kube-dns-config\n - name: dnsmasq\n image: {{KUBEDNS_DNSMASQ_DOCKER_IMAGE}}\n livenessProbe:\n httpGet:\n path: /healthcheck/dnsmasq\n port: 10054\n scheme: HTTP\n initialDelaySeconds: 60\n timeoutSeconds: 5\n successThreshold: 1\n failureThreshold: 5\n args:\n - -v=2\n - -logtostderr\n - -configDir=/etc/k8s/dns/dnsmasq-nanny\n - -restartDnsmasq=true\n - --\n - -k\n - --cache-size=1000\n - --no-negcache\n - --log-facility=-\n - --server=/cluster.local/127.0.0.1#10053\n - --server=/in-addr.arpa/127.0.0.1#10053\n - --server=/ip6.arpa/127.0.0.1#10053\n ports:\n - containerPort: 53\n name: dns\n protocol: UDP\n - containerPort: 53\n name: dns-tcp\n protocol: TCP\n # see: https://github.com/kubernetes/kubernetes/issues/29055 for details\n resources:\n requests:\n cpu: 150m\n memory: 20Mi\n volumeMounts:\n - name: kube-dns-config\n mountPath: /etc/k8s/dns/dnsmasq-nanny\n - name: sidecar\n image: {{KUBEDNS_SIDECAR_DOCKER_IMAGE}}\n livenessProbe:\n httpGet:\n path: /metrics\n port: 10054\n scheme: HTTP\n initialDelaySeconds: 60\n timeoutSeconds: 5\n successThreshold: 1\n failureThreshold: 5\n args:\n - --v=2\n - --logtostderr\n - --probe=kubedns,127.0.0.1:10053,kubernetes.default.svc.cluster.local,5,SRV\n - --probe=dnsmasq,127.0.0.1:53,kubernetes.default.svc.cluster.local,5,SRV\n ports:\n - containerPort: 10054\n name: metrics\n protocol: TCP\n resources:\n requests:\n memory: 20Mi\n cpu: 10m\n dnsPolicy: Default # Don't use cluster DNS.\n #serviceAccountName: kube-dns\n" | |
}, { | |
"name" : "install_addon.sh", | |
"relative-path" : "install_addon.sh", | |
"template-content" : "#!/bin/bash\n\nset -e\n\nchmod +x kubectl\n\nprintf \"\\n\\n ###### Deploying ${ADDON} components -- ${TASK_NAME} ###### \\n\"\n\n./kubectl --kubeconfig=kubeconfig.yaml apply -f $ADDON.yaml\nsleep 10\n./kubectl --kubeconfig=kubeconfig.yaml get pods -n kube-system | grep $ADDON | grep -i running && \\\n./kubectl --kubeconfig=kubeconfig.yaml get svc -n kube-system | grep $ADDON\nif [ $? -ne 0 ]\nthen\n echo \"$ADDON check failed and is not ready\"\n exit 1\nfi\n" | |
} ], | |
"discovery-spec" : null, | |
"kill-grace-period" : 0, | |
"transport-encryption" : [ ] | |
}, { | |
"name" : "heapster", | |
"goal" : "FINISHED", | |
"resource-set" : { | |
"id" : "addons", | |
"resource-specifications" : [ { | |
"@type" : "DefaultResourceSpec", | |
"name" : "cpus", | |
"value" : { | |
"type" : "SCALAR", | |
"scalar" : { | |
"value" : 0.1 | |
}, | |
"ranges" : null, | |
"set" : null, | |
"text" : null | |
}, | |
"role" : "kubernetes-role", | |
"pre-reserved-role" : "*", | |
"principal" : "kubernetes" | |
}, { | |
"@type" : "DefaultResourceSpec", | |
"name" : "mem", | |
"value" : { | |
"type" : "SCALAR", | |
"scalar" : { | |
"value" : 32.0 | |
}, | |
"ranges" : null, | |
"set" : null, | |
"text" : null | |
}, | |
"role" : "kubernetes-role", | |
"pre-reserved-role" : "*", | |
"principal" : "kubernetes" | |
} ], | |
"volume-specifications" : [ ], | |
"role" : "kubernetes-role", | |
"principal" : "kubernetes" | |
}, | |
"command-spec" : { | |
"value" : "./bootstrap --resolve=false 2>&1\n\nTASK_NAME= ADDON=heapster bash ./install_addon.sh\n", | |
"environment" : { | |
"HEAPSTER_DOCKER_IMAGE" : "gcr.io/google_containers/heapster-amd64:v1.4.3", | |
"KUBERNETES_VERSION" : "v1.7.11", | |
"USERNAME" : "mandatory-addons" | |
} | |
}, | |
"health-check-spec" : null, | |
"readiness-check-spec" : null, | |
"config-files" : [ { | |
"name" : "kubeconfig", | |
"relative-path" : "kubeconfig.yaml", | |
"template-content" : "apiVersion: v1\nkind: Config\nclusters:\n- name: local\n cluster:\n certificate-authority: ca-crt.pem\n server: https://apiserver.kubernetes.l4lb.thisdcos.directory:6443\nusers:\n- name: {{USERNAME}}\n user:\n client-certificate: {{USERNAME}}-crt.pem\n client-key: {{USERNAME}}-key.pem\ncontexts:\n- context:\n cluster: local\n user: {{USERNAME}}\n name: default\ncurrent-context: default\n" | |
}, { | |
"name" : "heapster", | |
"relative-path" : "heapster.yaml", | |
"template-content" : "apiVersion: v1\nkind: ServiceAccount\nmetadata:\n name: heapster\n namespace: kube-system\n---\napiVersion: apps/v1beta1\nkind: Deployment\nmetadata:\n name: heapster\n namespace: kube-system\nspec:\n replicas: 1\n template:\n metadata:\n labels:\n task: monitoring\n k8s-app: heapster\n spec:\n serviceAccountName: heapster\n containers:\n - name: heapster\n image: {{HEAPSTER_DOCKER_IMAGE}}\n imagePullPolicy: IfNotPresent\n command:\n - /heapster\n - --source=kubernetes.summary_api:https://kubernetes.default\n---\napiVersion: v1\nkind: Service\nmetadata:\n labels:\n task: monitoring\n # For use as a Cluster add-on (https://github.com/kubernetes/kubernetes/tree/master/cluster/addons)\n # If you are NOT using this as an addon, you should comment out this line.\n kubernetes.io/cluster-service: 'true'\n kubernetes.io/name: Heapster\n name: heapster\n namespace: kube-system\nspec:\n ports:\n - port: 80\n targetPort: 8082\n selector:\n k8s-app: heapster\n" | |
}, { | |
"name" : "install_addon.sh", | |
"relative-path" : "install_addon.sh", | |
"template-content" : "#!/bin/bash\n\nset -e\n\nchmod +x kubectl\n\nprintf \"\\n\\n ###### Deploying ${ADDON} components -- ${TASK_NAME} ###### \\n\"\n\n./kubectl --kubeconfig=kubeconfig.yaml apply -f $ADDON.yaml\nsleep 10\n./kubectl --kubeconfig=kubeconfig.yaml get pods -n kube-system | grep $ADDON | grep -i running && \\\n./kubectl --kubeconfig=kubeconfig.yaml get svc -n kube-system | grep $ADDON\nif [ $? -ne 0 ]\nthen\n echo \"$ADDON check failed and is not ready\"\n exit 1\nfi\n" | |
} ], | |
"discovery-spec" : null, | |
"kill-grace-period" : 0, | |
"transport-encryption" : [ ] | |
}, { | |
"name" : "dashboard", | |
"goal" : "FINISHED", | |
"resource-set" : { | |
"id" : "addons", | |
"resource-specifications" : [ { | |
"@type" : "DefaultResourceSpec", | |
"name" : "cpus", | |
"value" : { | |
"type" : "SCALAR", | |
"scalar" : { | |
"value" : 0.1 | |
}, | |
"ranges" : null, | |
"set" : null, | |
"text" : null | |
}, | |
"role" : "kubernetes-role", | |
"pre-reserved-role" : "*", | |
"principal" : "kubernetes" | |
}, { | |
"@type" : "DefaultResourceSpec", | |
"name" : "mem", | |
"value" : { | |
"type" : "SCALAR", | |
"scalar" : { | |
"value" : 32.0 | |
}, | |
"ranges" : null, | |
"set" : null, | |
"text" : null | |
}, | |
"role" : "kubernetes-role", | |
"pre-reserved-role" : "*", | |
"principal" : "kubernetes" | |
} ], | |
"volume-specifications" : [ ], | |
"role" : "kubernetes-role", | |
"principal" : "kubernetes" | |
}, | |
"command-spec" : { | |
"value" : "./bootstrap --resolve=false 2>&1\n\nTASK_NAME= ADDON=dashboard bash ./install_addon.sh\n", | |
"environment" : { | |
"DASHBOARD_DOCKER_IMAGE" : "gcr.io/google_containers/kubernetes-dashboard-amd64:v1.6.3", | |
"KUBERNETES_VERSION" : "v1.7.11", | |
"USERNAME" : "mandatory-addons" | |
} | |
}, | |
"health-check-spec" : null, | |
"readiness-check-spec" : null, | |
"config-files" : [ { | |
"name" : "kubeconfig", | |
"relative-path" : "kubeconfig.yaml", | |
"template-content" : "apiVersion: v1\nkind: Config\nclusters:\n- name: local\n cluster:\n certificate-authority: ca-crt.pem\n server: https://apiserver.kubernetes.l4lb.thisdcos.directory:6443\nusers:\n- name: {{USERNAME}}\n user:\n client-certificate: {{USERNAME}}-crt.pem\n client-key: {{USERNAME}}-key.pem\ncontexts:\n- context:\n cluster: local\n user: {{USERNAME}}\n name: default\ncurrent-context: default\n" | |
}, { | |
"name" : "dashboard", | |
"relative-path" : "dashboard.yaml", | |
"template-content" : "kind: Deployment\napiVersion: apps/v1beta1\nmetadata:\n labels:\n k8s-app: kubernetes-dashboard\n name: kubernetes-dashboard\n namespace: kube-system\nspec:\n replicas: 1\n revisionHistoryLimit: 10\n selector:\n matchLabels:\n k8s-app: kubernetes-dashboard\n template:\n metadata:\n labels:\n k8s-app: kubernetes-dashboard\n spec:\n containers:\n - name: kubernetes-dashboard\n image: {{DASHBOARD_DOCKER_IMAGE}}\n ports:\n - containerPort: 9090\n protocol: TCP\n args:\n # Uncomment the following line to manually specify Kubernetes API server Host\n # If not specified, Dashboard will attempt to auto discover the API server and connect\n # to it. Uncomment only if the default does not work.\n # - --apiserver-host=https://kubernetes.default\n livenessProbe:\n httpGet:\n path: /\n port: 9090\n initialDelaySeconds: 30\n timeoutSeconds: 30\n---\n\nkind: Service\napiVersion: v1\nmetadata:\n labels:\n k8s-app: kubernetes-dashboard\n name: kubernetes-dashboard\n namespace: kube-system\nspec:\n ports:\n - port: 80\n targetPort: 9090\n selector:\n k8s-app: kubernetes-dashboard\n" | |
}, { | |
"name" : "install_addon.sh", | |
"relative-path" : "install_addon.sh", | |
"template-content" : "#!/bin/bash\n\nset -e\n\nchmod +x kubectl\n\nprintf \"\\n\\n ###### Deploying ${ADDON} components -- ${TASK_NAME} ###### \\n\"\n\n./kubectl --kubeconfig=kubeconfig.yaml apply -f $ADDON.yaml\nsleep 10\n./kubectl --kubeconfig=kubeconfig.yaml get pods -n kube-system | grep $ADDON | grep -i running && \\\n./kubectl --kubeconfig=kubeconfig.yaml get svc -n kube-system | grep $ADDON\nif [ $? -ne 0 ]\nthen\n echo \"$ADDON check failed and is not ready\"\n exit 1\nfi\n" | |
} ], | |
"discovery-spec" : null, | |
"kill-grace-period" : 0, | |
"transport-encryption" : [ ] | |
} ], | |
"placement-rule" : null, | |
"volumes" : [ ], | |
"pre-reserved-role" : "*", | |
"secrets" : [ { | |
"secret" : "kubernetes/ca_crt__tmp", | |
"env-key" : null, | |
"file" : "ca-crt.pem" | |
}, { | |
"secret" : "kubernetes/kube_node_chain__tmp", | |
"env-key" : null, | |
"file" : "mandatory-addons-crt.pem" | |
}, { | |
"secret" : "kubernetes/kube_node_private_key__tmp", | |
"env-key" : null, | |
"file" : "mandatory-addons-key.pem" | |
} ], | |
"share-pid-namespace" : false, | |
"allow-decommission" : false | |
} ], | |
"replacement-failure-policy" : null, | |
"user" : "root" | |
} | |
INFO 2017-12-06 17:44:04,339 [main] com.mesosphere.sdk.config.DefaultConfigurationUpdater:printConfigDiff(252): Difference between configs: | |
--- ServiceSpec.old | |
+++ ServiceSpec.new | |
@@ -599,5 +599,5 @@ | |
"type" : "kube-node", | |
"user" : "root", | |
- "count" : 2, | |
+ "count" : 1, | |
"image" : null, | |
"networks" : [ ], | |
INFO 2017-12-06 17:44:04,625 [main] com.mesosphere.sdk.config.DefaultConfigurationUpdater:updateConfiguration(126): Updating target configuration: Prior target configuration 'dd27fcb7-4095-4c86-8f9d-b5aa708a6ca5' is different from new configuration '7ccbbf67-edeb-4983-996e-a1aed63adeb1'. | |
INFO 2017-12-06 17:44:04,641 [main] com.mesosphere.sdk.config.DefaultConfigurationUpdater:needsConfigUpdate(300): Task 'etcd-0-peer' is up to date: PodSpec 'etcd' is the same | |
INFO 2017-12-06 17:44:04,648 [main] com.mesosphere.sdk.config.DefaultConfigurationUpdater:needsConfigUpdate(300): Task 'etcd-0-recover' is up to date: PodSpec 'etcd' is the same | |
INFO 2017-12-06 17:44:04,720 [main] com.mesosphere.sdk.config.DefaultConfigurationUpdater:needsConfigUpdate(300): Task 'etcd-1-peer' is up to date: PodSpec 'etcd' is the same | |
INFO 2017-12-06 17:44:04,722 [main] com.mesosphere.sdk.config.DefaultConfigurationUpdater:needsConfigUpdate(300): Task 'etcd-1-recover' is up to date: PodSpec 'etcd' is the same | |
INFO 2017-12-06 17:44:04,722 [main] com.mesosphere.sdk.config.DefaultConfigurationUpdater:needsConfigUpdate(300): Task 'etcd-2-peer' is up to date: PodSpec 'etcd' is the same | |
INFO 2017-12-06 17:44:04,723 [main] com.mesosphere.sdk.config.DefaultConfigurationUpdater:needsConfigUpdate(300): Task 'etcd-2-recover' is up to date: PodSpec 'etcd' is the same | |
INFO 2017-12-06 17:44:04,724 [main] com.mesosphere.sdk.config.DefaultConfigurationUpdater:needsConfigUpdate(300): Task 'kube-apiserver-0-instance' is up to date: PodSpec 'kube-apiserver' is the same | |
INFO 2017-12-06 17:44:04,725 [main] com.mesosphere.sdk.config.DefaultConfigurationUpdater:needsConfigUpdate(300): Task 'kube-apiserver-1-instance' is up to date: PodSpec 'kube-apiserver' is the same | |
INFO 2017-12-06 17:44:04,725 [main] com.mesosphere.sdk.config.DefaultConfigurationUpdater:needsConfigUpdate(300): Task 'kube-apiserver-2-instance' is up to date: PodSpec 'kube-apiserver' is the same | |
INFO 2017-12-06 17:44:04,726 [main] com.mesosphere.sdk.config.DefaultConfigurationUpdater:needsConfigUpdate(300): Task 'kube-controller-manager-0-instance' is up to date: PodSpec 'kube-controller-manager' is the same | |
INFO 2017-12-06 17:44:04,727 [main] com.mesosphere.sdk.config.DefaultConfigurationUpdater:needsConfigUpdate(300): Task 'kube-controller-manager-1-instance' is up to date: PodSpec 'kube-controller-manager' is the same | |
INFO 2017-12-06 17:44:04,727 [main] com.mesosphere.sdk.config.DefaultConfigurationUpdater:needsConfigUpdate(300): Task 'kube-controller-manager-2-instance' is up to date: PodSpec 'kube-controller-manager' is the same | |
INFO 2017-12-06 17:44:04,928 [main] com.mesosphere.sdk.config.DefaultConfigurationUpdater:needsConfigUpdate(300): Task 'kube-node-0-commission' is up to date: PodSpec 'kube-node' is the same | |
INFO 2017-12-06 17:44:05,128 [main] com.mesosphere.sdk.config.DefaultConfigurationUpdater:needsConfigUpdate(300): Task 'kube-node-0-decommission' is up to date: PodSpec 'kube-node' is the same | |
INFO 2017-12-06 17:44:05,325 [main] com.mesosphere.sdk.config.DefaultConfigurationUpdater:needsConfigUpdate(300): Task 'kube-node-0-kube-proxy' is up to date: PodSpec 'kube-node' is the same | |
INFO 2017-12-06 17:44:05,445 [main] com.mesosphere.sdk.config.DefaultConfigurationUpdater:needsConfigUpdate(300): Task 'kube-node-0-kubelet' is up to date: PodSpec 'kube-node' is the same | |
INFO 2017-12-06 17:44:05,644 [main] com.mesosphere.sdk.config.DefaultConfigurationUpdater:needsConfigUpdate(300): Task 'kube-node-1-commission' is up to date: PodSpec 'kube-node' is the same | |
INFO 2017-12-06 17:44:05,839 [main] com.mesosphere.sdk.config.DefaultConfigurationUpdater:needsConfigUpdate(300): Task 'kube-node-1-decommission' is up to date: PodSpec 'kube-node' is the same | |
INFO 2017-12-06 17:44:06,033 [main] com.mesosphere.sdk.config.DefaultConfigurationUpdater:needsConfigUpdate(300): Task 'kube-node-1-kube-proxy' is up to date: PodSpec 'kube-node' is the same | |
INFO 2017-12-06 17:44:06,227 [main] com.mesosphere.sdk.config.DefaultConfigurationUpdater:needsConfigUpdate(300): Task 'kube-node-1-kubelet' is up to date: PodSpec 'kube-node' is the same | |
INFO 2017-12-06 17:44:06,228 [main] com.mesosphere.sdk.config.DefaultConfigurationUpdater:needsConfigUpdate(300): Task 'kube-scheduler-0-instance' is up to date: PodSpec 'kube-scheduler' is the same | |
INFO 2017-12-06 17:44:06,228 [main] com.mesosphere.sdk.config.DefaultConfigurationUpdater:needsConfigUpdate(300): Task 'kube-scheduler-1-instance' is up to date: PodSpec 'kube-scheduler' is the same | |
INFO 2017-12-06 17:44:06,229 [main] com.mesosphere.sdk.config.DefaultConfigurationUpdater:needsConfigUpdate(300): Task 'kube-scheduler-2-instance' is up to date: PodSpec 'kube-scheduler' is the same | |
INFO 2017-12-06 17:44:06,230 [main] com.mesosphere.sdk.config.DefaultConfigurationUpdater:needsConfigUpdate(300): Task 'mandatory-addons-0-dashboard' is up to date: PodSpec 'mandatory-addons' is the same | |
INFO 2017-12-06 17:44:06,230 [main] com.mesosphere.sdk.config.DefaultConfigurationUpdater:needsConfigUpdate(300): Task 'mandatory-addons-0-heapster' is up to date: PodSpec 'mandatory-addons' is the same | |
INFO 2017-12-06 17:44:06,231 [main] com.mesosphere.sdk.config.DefaultConfigurationUpdater:needsConfigUpdate(300): Task 'mandatory-addons-0-kube-dns' is up to date: PodSpec 'mandatory-addons' is the same | |
INFO 2017-12-06 17:44:06,231 [main] com.mesosphere.sdk.config.DefaultConfigurationUpdater:cleanupDuplicateAndUnusedConfigs(224): Updating 26 tasks in StateStore with target configuration ID 7ccbbf67-edeb-4983-996e-a1aed63adeb1 | |
INFO 2017-12-06 17:44:06,421 [main] com.mesosphere.sdk.config.DefaultConfigurationUpdater:cleanupDuplicateAndUnusedConfigs(230): Testing deserialization of 2 listed configurations before cleanup: | |
INFO 2017-12-06 17:44:06,421 [main] com.mesosphere.sdk.config.DefaultConfigurationUpdater:cleanupDuplicateAndUnusedConfigs(234): - 7ccbbf67-edeb-4983-996e-a1aed63adeb1: OK | |
INFO 2017-12-06 17:44:06,421 [main] com.mesosphere.sdk.config.DefaultConfigurationUpdater:cleanupDuplicateAndUnusedConfigs(234): - dd27fcb7-4095-4c86-8f9d-b5aa708a6ca5: OK | |
INFO 2017-12-06 17:44:06,421 [main] com.mesosphere.sdk.config.DefaultConfigurationUpdater:clearConfigsNotListed(339): Cleaning up 1 unused configs: [dd27fcb7-4095-4c86-8f9d-b5aa708a6ca5] | |
INFO 2017-12-06 17:44:06,424 [main] com.mesosphere.sdk.scheduler.plan.DefaultStepFactory:getStep(40): Generating step for pod: etcd-0, with tasks: [recover] | |
INFO 2017-12-06 17:44:06,425 [main] com.mesosphere.sdk.scheduler.plan.DeploymentStep:setStatus(65): etcd-0:[recover]: changed status from: PENDING to: PENDING (interrupted=false) | |
WARN 2017-12-06 17:44:06,425 [main] com.mesosphere.sdk.state.StateStore:fetchStatus(349): No TaskStatus found for the requested name: etcd-0-recover at: Tasks/etcd-0-recover/TaskStatus | |
INFO 2017-12-06 17:44:06,425 [main] com.mesosphere.sdk.scheduler.plan.DefaultStepFactory:getStatus(140): Deployment of task 'etcd-0-recover' is PENDING: onTarget=true reachedGoal=false permanentlyFailed=false | |
INFO 2017-12-06 17:44:06,425 [main] com.mesosphere.sdk.scheduler.plan.DeploymentStep:setStatus(65): etcd-0:[recover]: changed status from: PENDING to: PENDING (interrupted=false) | |
INFO 2017-12-06 17:44:06,426 [main] com.mesosphere.sdk.scheduler.plan.DefaultStepFactory:getStep(40): Generating step for pod: etcd-0, with tasks: [peer] | |
INFO 2017-12-06 17:44:06,426 [main] com.mesosphere.sdk.scheduler.plan.DeploymentStep:setStatus(65): etcd-0:[peer]: changed status from: PENDING to: PENDING (interrupted=false) | |
INFO 2017-12-06 17:44:06,426 [main] com.mesosphere.sdk.scheduler.plan.DefaultStepFactory:getStatus(137): Deployment of task 'etcd-0-peer' is COMPLETE: onTarget=true reachedGoal=true permanentlyFailed=false | |
INFO 2017-12-06 17:44:06,426 [main] com.mesosphere.sdk.scheduler.plan.DeploymentStep:setStatus(65): etcd-0:[peer]: changed status from: PENDING to: COMPLETE (interrupted=false) | |
INFO 2017-12-06 17:44:06,427 [main] com.mesosphere.sdk.scheduler.plan.DefaultStepFactory:getStep(40): Generating step for pod: etcd-1, with tasks: [recover] | |
INFO 2017-12-06 17:44:06,427 [main] com.mesosphere.sdk.scheduler.plan.DeploymentStep:setStatus(65): etcd-1:[recover]: changed status from: PENDING to: PENDING (interrupted=false) | |
WARN 2017-12-06 17:44:06,427 [main] com.mesosphere.sdk.state.StateStore:fetchStatus(349): No TaskStatus found for the requested name: etcd-1-recover at: Tasks/etcd-1-recover/TaskStatus | |
INFO 2017-12-06 17:44:06,427 [main] com.mesosphere.sdk.scheduler.plan.DefaultStepFactory:getStatus(140): Deployment of task 'etcd-1-recover' is PENDING: onTarget=true reachedGoal=false permanentlyFailed=false | |
INFO 2017-12-06 17:44:06,428 [main] com.mesosphere.sdk.scheduler.plan.DeploymentStep:setStatus(65): etcd-1:[recover]: changed status from: PENDING to: PENDING (interrupted=false) | |
INFO 2017-12-06 17:44:06,428 [main] com.mesosphere.sdk.scheduler.plan.DefaultStepFactory:getStep(40): Generating step for pod: etcd-1, with tasks: [peer] | |
INFO 2017-12-06 17:44:06,428 [main] com.mesosphere.sdk.scheduler.plan.DeploymentStep:setStatus(65): etcd-1:[peer]: changed status from: PENDING to: PENDING (interrupted=false) | |
INFO 2017-12-06 17:44:06,429 [main] com.mesosphere.sdk.scheduler.plan.DefaultStepFactory:getStatus(137): Deployment of task 'etcd-1-peer' is COMPLETE: onTarget=true reachedGoal=true permanentlyFailed=false | |
INFO 2017-12-06 17:44:06,429 [main] com.mesosphere.sdk.scheduler.plan.DeploymentStep:setStatus(65): etcd-1:[peer]: changed status from: PENDING to: COMPLETE (interrupted=false) | |
INFO 2017-12-06 17:44:06,429 [main] com.mesosphere.sdk.scheduler.plan.DefaultStepFactory:getStep(40): Generating step for pod: etcd-2, with tasks: [recover] | |
INFO 2017-12-06 17:44:06,429 [main] com.mesosphere.sdk.scheduler.plan.DeploymentStep:setStatus(65): etcd-2:[recover]: changed status from: PENDING to: PENDING (interrupted=false) | |
WARN 2017-12-06 17:44:06,429 [main] com.mesosphere.sdk.state.StateStore:fetchStatus(349): No TaskStatus found for the requested name: etcd-2-recover at: Tasks/etcd-2-recover/TaskStatus | |
INFO 2017-12-06 17:44:06,430 [main] com.mesosphere.sdk.scheduler.plan.DefaultStepFactory:getStatus(140): Deployment of task 'etcd-2-recover' is PENDING: onTarget=true reachedGoal=false permanentlyFailed=false | |
INFO 2017-12-06 17:44:06,430 [main] com.mesosphere.sdk.scheduler.plan.DeploymentStep:setStatus(65): etcd-2:[recover]: changed status from: PENDING to: PENDING (interrupted=false) | |
INFO 2017-12-06 17:44:06,430 [main] com.mesosphere.sdk.scheduler.plan.DefaultStepFactory:getStep(40): Generating step for pod: etcd-2, with tasks: [peer] | |
INFO 2017-12-06 17:44:06,430 [main] com.mesosphere.sdk.scheduler.plan.DeploymentStep:setStatus(65): etcd-2:[peer]: changed status from: PENDING to: PENDING (interrupted=false) | |
INFO 2017-12-06 17:44:06,431 [main] com.mesosphere.sdk.scheduler.plan.DefaultStepFactory:getStatus(137): Deployment of task 'etcd-2-peer' is COMPLETE: onTarget=true reachedGoal=true permanentlyFailed=false | |
INFO 2017-12-06 17:44:06,431 [main] com.mesosphere.sdk.scheduler.plan.DeploymentStep:setStatus(65): etcd-2:[peer]: changed status from: PENDING to: COMPLETE (interrupted=false) | |
INFO 2017-12-06 17:44:06,431 [main] com.mesosphere.sdk.scheduler.plan.DefaultStepFactory:getStep(40): Generating step for pod: kube-node-0, with tasks: [recover] | |
WARN 2017-12-06 17:44:06,431 [main] com.mesosphere.sdk.state.StateStore:fetchTask(290): No TaskInfo found for the requested name: kube-node-0-recover at: Tasks/kube-node-0-recover/TaskInfo | |
INFO 2017-12-06 17:44:06,431 [main] com.mesosphere.sdk.scheduler.plan.DeploymentStep:setStatus(65): kube-node-0:[recover]: changed status from: PENDING to: PENDING (interrupted=false) | |
INFO 2017-12-06 17:44:06,431 [main] com.mesosphere.sdk.scheduler.plan.DeploymentStep:setStatus(65): kube-node-0:[recover]: changed status from: PENDING to: PENDING (interrupted=false) | |
INFO 2017-12-06 17:44:06,432 [main] com.mesosphere.sdk.scheduler.plan.DefaultStepFactory:getStep(40): Generating step for pod: kube-node-0, with tasks: [kube-proxy, kubelet] | |
INFO 2017-12-06 17:44:06,432 [main] com.mesosphere.sdk.scheduler.plan.DeploymentStep:setStatus(65): kube-node-0:[kube-proxy, kubelet]: changed status from: PENDING to: PENDING (interrupted=false) | |
INFO 2017-12-06 17:44:06,433 [main] com.mesosphere.sdk.scheduler.plan.DefaultStepFactory:getStatus(137): Deployment of task 'kube-node-0-kube-proxy' is COMPLETE: onTarget=true reachedGoal=true permanentlyFailed=false | |
INFO 2017-12-06 17:44:06,433 [main] com.mesosphere.sdk.scheduler.plan.DefaultStepFactory:getStatus(137): Deployment of task 'kube-node-0-kubelet' is COMPLETE: onTarget=true reachedGoal=true permanentlyFailed=false | |
INFO 2017-12-06 17:44:06,433 [main] com.mesosphere.sdk.scheduler.plan.DeploymentStep:setStatus(65): kube-node-0:[kube-proxy, kubelet]: changed status from: PENDING to: COMPLETE (interrupted=false) | |
INFO 2017-12-06 17:44:06,433 [main] com.mesosphere.sdk.scheduler.plan.DefaultStepFactory:getStep(40): Generating step for pod: etcd-0, with tasks: [peer] | |
INFO 2017-12-06 17:44:06,434 [main] com.mesosphere.sdk.scheduler.plan.DeploymentStep:setStatus(65): etcd-0:[peer]: changed status from: PENDING to: PENDING (interrupted=false) | |
INFO 2017-12-06 17:44:06,434 [main] com.mesosphere.sdk.scheduler.plan.DefaultStepFactory:getStatus(137): Deployment of task 'etcd-0-peer' is COMPLETE: onTarget=true reachedGoal=true permanentlyFailed=false | |
INFO 2017-12-06 17:44:06,434 [main] com.mesosphere.sdk.scheduler.plan.DeploymentStep:setStatus(65): etcd-0:[peer]: changed status from: PENDING to: COMPLETE (interrupted=false) | |
INFO 2017-12-06 17:44:06,434 [main] com.mesosphere.sdk.scheduler.plan.DefaultStepFactory:getStep(40): Generating step for pod: etcd-1, with tasks: [peer] | |
INFO 2017-12-06 17:44:06,435 [main] com.mesosphere.sdk.scheduler.plan.DeploymentStep:setStatus(65): etcd-1:[peer]: changed status from: PENDING to: PENDING (interrupted=false) | |
INFO 2017-12-06 17:44:06,435 [main] com.mesosphere.sdk.scheduler.plan.DefaultStepFactory:getStatus(137): Deployment of task 'etcd-1-peer' is COMPLETE: onTarget=true reachedGoal=true permanentlyFailed=false | |
INFO 2017-12-06 17:44:06,435 [main] com.mesosphere.sdk.scheduler.plan.DeploymentStep:setStatus(65): etcd-1:[peer]: changed status from: PENDING to: COMPLETE (interrupted=false) | |
INFO 2017-12-06 17:44:06,435 [main] com.mesosphere.sdk.scheduler.plan.DefaultStepFactory:getStep(40): Generating step for pod: etcd-2, with tasks: [peer] | |
INFO 2017-12-06 17:44:06,436 [main] com.mesosphere.sdk.scheduler.plan.DeploymentStep:setStatus(65): etcd-2:[peer]: changed status from: PENDING to: PENDING (interrupted=false) | |
INFO 2017-12-06 17:44:06,436 [main] com.mesosphere.sdk.scheduler.plan.DefaultStepFactory:getStatus(137): Deployment of task 'etcd-2-peer' is COMPLETE: onTarget=true reachedGoal=true permanentlyFailed=false | |
INFO 2017-12-06 17:44:06,436 [main] com.mesosphere.sdk.scheduler.plan.DeploymentStep:setStatus(65): etcd-2:[peer]: changed status from: PENDING to: COMPLETE (interrupted=false) | |
INFO 2017-12-06 17:44:06,436 [main] com.mesosphere.sdk.scheduler.plan.DefaultStepFactory:getStep(40): Generating step for pod: kube-apiserver-0, with tasks: [instance] | |
INFO 2017-12-06 17:44:06,437 [main] com.mesosphere.sdk.scheduler.plan.DeploymentStep:setStatus(65): kube-apiserver-0:[instance]: changed status from: PENDING to: PENDING (interrupted=false) | |
INFO 2017-12-06 17:44:06,437 [main] com.mesosphere.sdk.scheduler.plan.DefaultStepFactory:getStatus(137): Deployment of task 'kube-apiserver-0-instance' is COMPLETE: onTarget=true reachedGoal=true permanentlyFailed=false | |
INFO 2017-12-06 17:44:06,437 [main] com.mesosphere.sdk.scheduler.plan.DeploymentStep:setStatus(65): kube-apiserver-0:[instance]: changed status from: PENDING to: COMPLETE (interrupted=false) | |
INFO 2017-12-06 17:44:06,437 [main] com.mesosphere.sdk.scheduler.plan.DefaultStepFactory:getStep(40): Generating step for pod: kube-apiserver-1, with tasks: [instance] | |
INFO 2017-12-06 17:44:06,438 [main] com.mesosphere.sdk.scheduler.plan.DeploymentStep:setStatus(65): kube-apiserver-1:[instance]: changed status from: PENDING to: PENDING (interrupted=false) | |
INFO 2017-12-06 17:44:06,438 [main] com.mesosphere.sdk.scheduler.plan.DefaultStepFactory:getStatus(137): Deployment of task 'kube-apiserver-1-instance' is COMPLETE: onTarget=true reachedGoal=true permanentlyFailed=false | |
INFO 2017-12-06 17:44:06,438 [main] com.mesosphere.sdk.scheduler.plan.DeploymentStep:setStatus(65): kube-apiserver-1:[instance]: changed status from: PENDING to: COMPLETE (interrupted=false) | |
INFO 2017-12-06 17:44:06,438 [main] com.mesosphere.sdk.scheduler.plan.DefaultStepFactory:getStep(40): Generating step for pod: kube-apiserver-2, with tasks: [instance] | |
INFO 2017-12-06 17:44:06,439 [main] com.mesosphere.sdk.scheduler.plan.DeploymentStep:setStatus(65): kube-apiserver-2:[instance]: changed status from: PENDING to: PENDING (interrupted=false) | |
INFO 2017-12-06 17:44:06,439 [main] com.mesosphere.sdk.scheduler.plan.DefaultStepFactory:getStatus(137): Deployment of task 'kube-apiserver-2-instance' is COMPLETE: onTarget=true reachedGoal=true permanentlyFailed=false | |
INFO 2017-12-06 17:44:06,439 [main] com.mesosphere.sdk.scheduler.plan.DeploymentStep:setStatus(65): kube-apiserver-2:[instance]: changed status from: PENDING to: COMPLETE (interrupted=false) | |
INFO 2017-12-06 17:44:06,439 [main] com.mesosphere.sdk.scheduler.plan.DefaultStepFactory:getStep(40): Generating step for pod: kube-controller-manager-0, with tasks: [instance] | |
INFO 2017-12-06 17:44:06,440 [main] com.mesosphere.sdk.scheduler.plan.DeploymentStep:setStatus(65): kube-controller-manager-0:[instance]: changed status from: PENDING to: PENDING (interrupted=false) | |
INFO 2017-12-06 17:44:06,440 [main] com.mesosphere.sdk.scheduler.plan.DefaultStepFactory:getStatus(137): Deployment of task 'kube-controller-manager-0-instance' is COMPLETE: onTarget=true reachedGoal=true permanentlyFailed=false | |
INFO 2017-12-06 17:44:06,440 [main] com.mesosphere.sdk.scheduler.plan.DeploymentStep:setStatus(65): kube-controller-manager-0:[instance]: changed status from: PENDING to: COMPLETE (interrupted=false) | |
INFO 2017-12-06 17:44:06,440 [main] com.mesosphere.sdk.scheduler.plan.DefaultStepFactory:getStep(40): Generating step for pod: kube-controller-manager-1, with tasks: [instance] | |
INFO 2017-12-06 17:44:06,441 [main] com.mesosphere.sdk.scheduler.plan.DeploymentStep:setStatus(65): kube-controller-manager-1:[instance]: changed status from: PENDING to: PENDING (interrupted=false) | |
INFO 2017-12-06 17:44:06,441 [main] com.mesosphere.sdk.scheduler.plan.DefaultStepFactory:getStatus(137): Deployment of task 'kube-controller-manager-1-instance' is COMPLETE: onTarget=true reachedGoal=true permanentlyFailed=false | |
INFO 2017-12-06 17:44:06,441 [main] com.mesosphere.sdk.scheduler.plan.DeploymentStep:setStatus(65): kube-controller-manager-1:[instance]: changed status from: PENDING to: COMPLETE (interrupted=false) | |
INFO 2017-12-06 17:44:06,441 [main] com.mesosphere.sdk.scheduler.plan.DefaultStepFactory:getStep(40): Generating step for pod: kube-controller-manager-2, with tasks: [instance] | |
INFO 2017-12-06 17:44:06,442 [main] com.mesosphere.sdk.scheduler.plan.DeploymentStep:setStatus(65): kube-controller-manager-2:[instance]: changed status from: PENDING to: PENDING (interrupted=false) | |
INFO 2017-12-06 17:44:06,520 [main] com.mesosphere.sdk.scheduler.plan.DefaultStepFactory:getStatus(137): Deployment of task 'kube-controller-manager-2-instance' is COMPLETE: onTarget=true reachedGoal=true permanentlyFailed=false | |
INFO 2017-12-06 17:44:06,521 [main] com.mesosphere.sdk.scheduler.plan.DeploymentStep:setStatus(65): kube-controller-manager-2:[instance]: changed status from: PENDING to: COMPLETE (interrupted=false) | |
INFO 2017-12-06 17:44:06,521 [main] com.mesosphere.sdk.scheduler.plan.DefaultStepFactory:getStep(40): Generating step for pod: kube-scheduler-0, with tasks: [instance] | |
INFO 2017-12-06 17:44:06,521 [main] com.mesosphere.sdk.scheduler.plan.DeploymentStep:setStatus(65): kube-scheduler-0:[instance]: changed status from: PENDING to: PENDING (interrupted=false) | |
INFO 2017-12-06 17:44:06,522 [main] com.mesosphere.sdk.scheduler.plan.DefaultStepFactory:getStatus(137): Deployment of task 'kube-scheduler-0-instance' is COMPLETE: onTarget=true reachedGoal=true permanentlyFailed=false | |
INFO 2017-12-06 17:44:06,522 [main] com.mesosphere.sdk.scheduler.plan.DeploymentStep:setStatus(65): kube-scheduler-0:[instance]: changed status from: PENDING to: COMPLETE (interrupted=false) | |
INFO 2017-12-06 17:44:06,522 [main] com.mesosphere.sdk.scheduler.plan.DefaultStepFactory:getStep(40): Generating step for pod: kube-scheduler-1, with tasks: [instance] | |
INFO 2017-12-06 17:44:06,522 [main] com.mesosphere.sdk.scheduler.plan.DeploymentStep:setStatus(65): kube-scheduler-1:[instance]: changed status from: PENDING to: PENDING (interrupted=false) | |
INFO 2017-12-06 17:44:06,523 [main] com.mesosphere.sdk.scheduler.plan.DefaultStepFactory:getStatus(137): Deployment of task 'kube-scheduler-1-instance' is COMPLETE: onTarget=true reachedGoal=true permanentlyFailed=false | |
INFO 2017-12-06 17:44:06,523 [main] com.mesosphere.sdk.scheduler.plan.DeploymentStep:setStatus(65): kube-scheduler-1:[instance]: changed status from: PENDING to: COMPLETE (interrupted=false) | |
INFO 2017-12-06 17:44:06,523 [main] com.mesosphere.sdk.scheduler.plan.DefaultStepFactory:getStep(40): Generating step for pod: kube-scheduler-2, with tasks: [instance] | |
INFO 2017-12-06 17:44:06,523 [main] com.mesosphere.sdk.scheduler.plan.DeploymentStep:setStatus(65): kube-scheduler-2:[instance]: changed status from: PENDING to: PENDING (interrupted=false) | |
INFO 2017-12-06 17:44:06,523 [main] com.mesosphere.sdk.scheduler.plan.DefaultStepFactory:getStatus(137): Deployment of task 'kube-scheduler-2-instance' is COMPLETE: onTarget=true reachedGoal=true permanentlyFailed=false | |
INFO 2017-12-06 17:44:06,524 [main] com.mesosphere.sdk.scheduler.plan.DeploymentStep:setStatus(65): kube-scheduler-2:[instance]: changed status from: PENDING to: COMPLETE (interrupted=false) | |
INFO 2017-12-06 17:44:06,524 [main] com.mesosphere.sdk.scheduler.plan.DefaultStepFactory:getStep(40): Generating step for pod: kube-node-0, with tasks: [decommission] | |
INFO 2017-12-06 17:44:06,524 [main] com.mesosphere.sdk.scheduler.plan.DeploymentStep:setStatus(65): kube-node-0:[decommission]: changed status from: PENDING to: PENDING (interrupted=false) | |
WARN 2017-12-06 17:44:06,524 [main] com.mesosphere.sdk.state.StateStore:fetchStatus(349): No TaskStatus found for the requested name: kube-node-0-decommission at: Tasks/kube-node-0-decommission/TaskStatus | |
INFO 2017-12-06 17:44:06,524 [main] com.mesosphere.sdk.scheduler.plan.DefaultStepFactory:getStatus(140): Deployment of task 'kube-node-0-decommission' is PENDING: onTarget=true reachedGoal=false permanentlyFailed=false | |
INFO 2017-12-06 17:44:06,525 [main] com.mesosphere.sdk.scheduler.plan.DeploymentStep:setStatus(65): kube-node-0:[decommission]: changed status from: PENDING to: PENDING (interrupted=false) | |
INFO 2017-12-06 17:44:06,525 [main] com.mesosphere.sdk.scheduler.plan.DefaultStepFactory:getStep(40): Generating step for pod: kube-node-0, with tasks: [kube-proxy, kubelet] | |
INFO 2017-12-06 17:44:06,525 [main] com.mesosphere.sdk.scheduler.plan.DeploymentStep:setStatus(65): kube-node-0:[kube-proxy, kubelet]: changed status from: PENDING to: PENDING (interrupted=false) | |
INFO 2017-12-06 17:44:06,525 [main] com.mesosphere.sdk.scheduler.plan.DefaultStepFactory:getStatus(137): Deployment of task 'kube-node-0-kube-proxy' is COMPLETE: onTarget=true reachedGoal=true permanentlyFailed=false | |
INFO 2017-12-06 17:44:06,526 [main] com.mesosphere.sdk.scheduler.plan.DefaultStepFactory:getStatus(137): Deployment of task 'kube-node-0-kubelet' is COMPLETE: onTarget=true reachedGoal=true permanentlyFailed=false | |
INFO 2017-12-06 17:44:06,526 [main] com.mesosphere.sdk.scheduler.plan.DeploymentStep:setStatus(65): kube-node-0:[kube-proxy, kubelet]: changed status from: PENDING to: COMPLETE (interrupted=false) | |
INFO 2017-12-06 17:44:06,526 [main] com.mesosphere.sdk.scheduler.plan.DefaultStepFactory:getStep(40): Generating step for pod: kube-node-0, with tasks: [commission] | |
INFO 2017-12-06 17:44:06,526 [main] com.mesosphere.sdk.scheduler.plan.DeploymentStep:setStatus(65): kube-node-0:[commission]: changed status from: PENDING to: PENDING (interrupted=false) | |
WARN 2017-12-06 17:44:06,527 [main] com.mesosphere.sdk.state.StateStore:fetchStatus(349): No TaskStatus found for the requested name: kube-node-0-commission at: Tasks/kube-node-0-commission/TaskStatus | |
INFO 2017-12-06 17:44:06,527 [main] com.mesosphere.sdk.scheduler.plan.DefaultStepFactory:getStatus(140): Deployment of task 'kube-node-0-commission' is PENDING: onTarget=true reachedGoal=false permanentlyFailed=false | |
INFO 2017-12-06 17:44:06,527 [main] com.mesosphere.sdk.scheduler.plan.DeploymentStep:setStatus(65): kube-node-0:[commission]: changed status from: PENDING to: PENDING (interrupted=false) | |
INFO 2017-12-06 17:44:06,527 [main] com.mesosphere.sdk.scheduler.plan.DefaultStepFactory:getStep(40): Generating step for pod: mandatory-addons-0, with tasks: [kube-dns] | |
INFO 2017-12-06 17:44:06,527 [main] com.mesosphere.sdk.scheduler.plan.DeploymentStep:setStatus(65): mandatory-addons-0:[kube-dns]: changed status from: PENDING to: PENDING (interrupted=false) | |
INFO 2017-12-06 17:44:06,528 [main] com.mesosphere.sdk.scheduler.plan.DefaultStepFactory:getStatus(127): Automatically on target configuration due to having reached FINISHED goal. | |
INFO 2017-12-06 17:44:06,528 [main] com.mesosphere.sdk.scheduler.plan.DefaultStepFactory:getStatus(137): Deployment of task 'mandatory-addons-0-kube-dns' is COMPLETE: onTarget=true reachedGoal=true permanentlyFailed=false | |
INFO 2017-12-06 17:44:06,528 [main] com.mesosphere.sdk.scheduler.plan.DeploymentStep:setStatus(65): mandatory-addons-0:[kube-dns]: changed status from: PENDING to: COMPLETE (interrupted=false) | |
INFO 2017-12-06 17:44:06,528 [main] com.mesosphere.sdk.scheduler.plan.DefaultStepFactory:getStep(40): Generating step for pod: mandatory-addons-0, with tasks: [heapster] | |
INFO 2017-12-06 17:44:06,528 [main] com.mesosphere.sdk.scheduler.plan.DeploymentStep:setStatus(65): mandatory-addons-0:[heapster]: changed status from: PENDING to: PENDING (interrupted=false) | |
INFO 2017-12-06 17:44:06,529 [main] com.mesosphere.sdk.scheduler.plan.DefaultStepFactory:getStatus(127): Automatically on target configuration due to having reached FINISHED goal. | |
INFO 2017-12-06 17:44:06,529 [main] com.mesosphere.sdk.scheduler.plan.DefaultStepFactory:getStatus(137): Deployment of task 'mandatory-addons-0-heapster' is COMPLETE: onTarget=true reachedGoal=true permanentlyFailed=false | |
INFO 2017-12-06 17:44:06,529 [main] com.mesosphere.sdk.scheduler.plan.DeploymentStep:setStatus(65): mandatory-addons-0:[heapster]: changed status from: PENDING to: COMPLETE (interrupted=false) | |
INFO 2017-12-06 17:44:06,529 [main] com.mesosphere.sdk.scheduler.plan.DefaultStepFactory:getStep(40): Generating step for pod: mandatory-addons-0, with tasks: [dashboard] | |
INFO 2017-12-06 17:44:06,529 [main] com.mesosphere.sdk.scheduler.plan.DeploymentStep:setStatus(65): mandatory-addons-0:[dashboard]: changed status from: PENDING to: PENDING (interrupted=false) | |
INFO 2017-12-06 17:44:06,529 [main] com.mesosphere.sdk.scheduler.plan.DefaultStepFactory:getStatus(127): Automatically on target configuration due to having reached FINISHED goal. | |
INFO 2017-12-06 17:44:06,530 [main] com.mesosphere.sdk.scheduler.plan.DefaultStepFactory:getStatus(137): Deployment of task 'mandatory-addons-0-dashboard' is COMPLETE: onTarget=true reachedGoal=true permanentlyFailed=false | |
INFO 2017-12-06 17:44:06,530 [main] com.mesosphere.sdk.scheduler.plan.DeploymentStep:setStatus(65): mandatory-addons-0:[dashboard]: changed status from: PENDING to: COMPLETE (interrupted=false) | |
INFO 2017-12-06 17:44:06,530 [main] com.mesosphere.sdk.scheduler.plan.DefaultStepFactory:getStep(40): Generating step for pod: etcd-0, with tasks: [peer] | |
INFO 2017-12-06 17:44:06,531 [main] com.mesosphere.sdk.scheduler.plan.DeploymentStep:setStatus(65): etcd-0:[peer]: changed status from: PENDING to: PENDING (interrupted=false) | |
INFO 2017-12-06 17:44:06,531 [main] com.mesosphere.sdk.scheduler.plan.DefaultStepFactory:getStatus(137): Deployment of task 'etcd-0-peer' is COMPLETE: onTarget=true reachedGoal=true permanentlyFailed=false | |
INFO 2017-12-06 17:44:06,531 [main] com.mesosphere.sdk.scheduler.plan.DeploymentStep:setStatus(65): etcd-0:[peer]: changed status from: PENDING to: COMPLETE (interrupted=false) | |
INFO 2017-12-06 17:44:06,531 [main] com.mesosphere.sdk.scheduler.plan.DefaultStepFactory:getStep(40): Generating step for pod: etcd-1, with tasks: [peer] | |
INFO 2017-12-06 17:44:06,532 [main] com.mesosphere.sdk.scheduler.plan.DeploymentStep:setStatus(65): etcd-1:[peer]: changed status from: PENDING to: PENDING (interrupted=false) | |
INFO 2017-12-06 17:44:06,532 [main] com.mesosphere.sdk.scheduler.plan.DefaultStepFactory:getStatus(137): Deployment of task 'etcd-1-peer' is COMPLETE: onTarget=true reachedGoal=true permanentlyFailed=false | |
INFO 2017-12-06 17:44:06,532 [main] com.mesosphere.sdk.scheduler.plan.DeploymentStep:setStatus(65): etcd-1:[peer]: changed status from: PENDING to: COMPLETE (interrupted=false) | |
INFO 2017-12-06 17:44:06,532 [main] com.mesosphere.sdk.scheduler.plan.DefaultStepFactory:getStep(40): Generating step for pod: etcd-2, with tasks: [peer] | |
INFO 2017-12-06 17:44:06,532 [main] com.mesosphere.sdk.scheduler.plan.DeploymentStep:setStatus(65): etcd-2:[peer]: changed status from: PENDING to: PENDING (interrupted=false) | |
INFO 2017-12-06 17:44:06,533 [main] com.mesosphere.sdk.scheduler.plan.DefaultStepFactory:getStatus(137): Deployment of task 'etcd-2-peer' is COMPLETE: onTarget=true reachedGoal=true permanentlyFailed=false | |
INFO 2017-12-06 17:44:06,533 [main] com.mesosphere.sdk.scheduler.plan.DeploymentStep:setStatus(65): etcd-2:[peer]: changed status from: PENDING to: COMPLETE (interrupted=false) | |
INFO 2017-12-06 17:44:06,533 [main] com.mesosphere.sdk.scheduler.plan.DefaultStepFactory:getStep(40): Generating step for pod: kube-apiserver-0, with tasks: [instance] | |
INFO 2017-12-06 17:44:06,533 [main] com.mesosphere.sdk.scheduler.plan.DeploymentStep:setStatus(65): kube-apiserver-0:[instance]: changed status from: PENDING to: PENDING (interrupted=false) | |
INFO 2017-12-06 17:44:06,534 [main] com.mesosphere.sdk.scheduler.plan.DefaultStepFactory:getStatus(137): Deployment of task 'kube-apiserver-0-instance' is COMPLETE: onTarget=true reachedGoal=true permanentlyFailed=false | |
INFO 2017-12-06 17:44:06,534 [main] com.mesosphere.sdk.scheduler.plan.DeploymentStep:setStatus(65): kube-apiserver-0:[instance]: changed status from: PENDING to: COMPLETE (interrupted=false) | |
INFO 2017-12-06 17:44:06,534 [main] com.mesosphere.sdk.scheduler.plan.DefaultStepFactory:getStep(40): Generating step for pod: kube-apiserver-1, with tasks: [instance] | |
INFO 2017-12-06 17:44:06,534 [main] com.mesosphere.sdk.scheduler.plan.DeploymentStep:setStatus(65): kube-apiserver-1:[instance]: changed status from: PENDING to: PENDING (interrupted=false) | |
INFO 2017-12-06 17:44:06,534 [main] com.mesosphere.sdk.scheduler.plan.DefaultStepFactory:getStatus(137): Deployment of task 'kube-apiserver-1-instance' is COMPLETE: onTarget=true reachedGoal=true permanentlyFailed=false | |
INFO 2017-12-06 17:44:06,535 [main] com.mesosphere.sdk.scheduler.plan.DeploymentStep:setStatus(65): kube-apiserver-1:[instance]: changed status from: PENDING to: COMPLETE (interrupted=false) | |
INFO 2017-12-06 17:44:06,535 [main] com.mesosphere.sdk.scheduler.plan.DefaultStepFactory:getStep(40): Generating step for pod: kube-apiserver-2, with tasks: [instance] | |
INFO 2017-12-06 17:44:06,535 [main] com.mesosphere.sdk.scheduler.plan.DeploymentStep:setStatus(65): kube-apiserver-2:[instance]: changed status from: PENDING to: PENDING (interrupted=false) | |
INFO 2017-12-06 17:44:06,535 [main] com.mesosphere.sdk.scheduler.plan.DefaultStepFactory:getStatus(137): Deployment of task 'kube-apiserver-2-instance' is COMPLETE: onTarget=true reachedGoal=true permanentlyFailed=false | |
INFO 2017-12-06 17:44:06,535 [main] com.mesosphere.sdk.scheduler.plan.DeploymentStep:setStatus(65): kube-apiserver-2:[instance]: changed status from: PENDING to: COMPLETE (interrupted=false) | |
INFO 2017-12-06 17:44:06,536 [main] com.mesosphere.sdk.scheduler.plan.DefaultStepFactory:getStep(40): Generating step for pod: kube-controller-manager-0, with tasks: [instance] | |
INFO 2017-12-06 17:44:06,536 [main] com.mesosphere.sdk.scheduler.plan.DeploymentStep:setStatus(65): kube-controller-manager-0:[instance]: changed status from: PENDING to: PENDING (interrupted=false) | |
INFO 2017-12-06 17:44:06,536 [main] com.mesosphere.sdk.scheduler.plan.DefaultStepFactory:getStatus(137): Deployment of task 'kube-controller-manager-0-instance' is COMPLETE: onTarget=true reachedGoal=true permanentlyFailed=false | |
INFO 2017-12-06 17:44:06,536 [main] com.mesosphere.sdk.scheduler.plan.DeploymentStep:setStatus(65): kube-controller-manager-0:[instance]: changed status from: PENDING to: COMPLETE (interrupted=false) | |
INFO 2017-12-06 17:44:06,536 [main] com.mesosphere.sdk.scheduler.plan.DefaultStepFactory:getStep(40): Generating step for pod: kube-controller-manager-1, with tasks: [instance] | |
INFO 2017-12-06 17:44:06,537 [main] com.mesosphere.sdk.scheduler.plan.DeploymentStep:setStatus(65): kube-controller-manager-1:[instance]: changed status from: PENDING to: PENDING (interrupted=false) | |
INFO 2017-12-06 17:44:06,537 [main] com.mesosphere.sdk.scheduler.plan.DefaultStepFactory:getStatus(137): Deployment of task 'kube-controller-manager-1-instance' is COMPLETE: onTarget=true reachedGoal=true permanentlyFailed=false | |
INFO 2017-12-06 17:44:06,537 [main] com.mesosphere.sdk.scheduler.plan.DeploymentStep:setStatus(65): kube-controller-manager-1:[instance]: changed status from: PENDING to: COMPLETE (interrupted=false) | |
INFO 2017-12-06 17:44:06,537 [main] com.mesosphere.sdk.scheduler.plan.DefaultStepFactory:getStep(40): Generating step for pod: kube-controller-manager-2, with tasks: [instance] | |
INFO 2017-12-06 17:44:06,538 [main] com.mesosphere.sdk.scheduler.plan.DeploymentStep:setStatus(65): kube-controller-manager-2:[instance]: changed status from: PENDING to: PENDING (interrupted=false) | |
INFO 2017-12-06 17:44:06,538 [main] com.mesosphere.sdk.scheduler.plan.DefaultStepFactory:getStatus(137): Deployment of task 'kube-controller-manager-2-instance' is COMPLETE: onTarget=true reachedGoal=true permanentlyFailed=false | |
INFO 2017-12-06 17:44:06,538 [main] com.mesosphere.sdk.scheduler.plan.DeploymentStep:setStatus(65): kube-controller-manager-2:[instance]: changed status from: PENDING to: COMPLETE (interrupted=false) | |
INFO 2017-12-06 17:44:06,538 [main] com.mesosphere.sdk.scheduler.plan.DefaultStepFactory:getStep(40): Generating step for pod: kube-scheduler-0, with tasks: [instance] | |
INFO 2017-12-06 17:44:06,538 [main] com.mesosphere.sdk.scheduler.plan.DeploymentStep:setStatus(65): kube-scheduler-0:[instance]: changed status from: PENDING to: PENDING (interrupted=false) | |
INFO 2017-12-06 17:44:06,539 [main] com.mesosphere.sdk.scheduler.plan.DefaultStepFactory:getStatus(137): Deployment of task 'kube-scheduler-0-instance' is COMPLETE: onTarget=true reachedGoal=true permanentlyFailed=false | |
INFO 2017-12-06 17:44:06,539 [main] com.mesosphere.sdk.scheduler.plan.DeploymentStep:setStatus(65): kube-scheduler-0:[instance]: changed status from: PENDING to: COMPLETE (interrupted=false) | |
INFO 2017-12-06 17:44:06,539 [main] com.mesosphere.sdk.scheduler.plan.DefaultStepFactory:getStep(40): Generating step for pod: kube-scheduler-1, with tasks: [instance] | |
INFO 2017-12-06 17:44:06,539 [main] com.mesosphere.sdk.scheduler.plan.DeploymentStep:setStatus(65): kube-scheduler-1:[instance]: changed status from: PENDING to: PENDING (interrupted=false) | |
INFO 2017-12-06 17:44:06,540 [main] com.mesosphere.sdk.scheduler.plan.DefaultStepFactory:getStatus(137): Deployment of task 'kube-scheduler-1-instance' is COMPLETE: onTarget=true reachedGoal=true permanentlyFailed=false | |
INFO 2017-12-06 17:44:06,540 [main] com.mesosphere.sdk.scheduler.plan.DeploymentStep:setStatus(65): kube-scheduler-1:[instance]: changed status from: PENDING to: COMPLETE (interrupted=false) | |
INFO 2017-12-06 17:44:06,540 [main] com.mesosphere.sdk.scheduler.plan.DefaultStepFactory:getStep(40): Generating step for pod: kube-scheduler-2, with tasks: [instance] | |
INFO 2017-12-06 17:44:06,540 [main] com.mesosphere.sdk.scheduler.plan.DeploymentStep:setStatus(65): kube-scheduler-2:[instance]: changed status from: PENDING to: PENDING (interrupted=false) | |
INFO 2017-12-06 17:44:06,540 [main] com.mesosphere.sdk.scheduler.plan.DefaultStepFactory:getStatus(137): Deployment of task 'kube-scheduler-2-instance' is COMPLETE: onTarget=true reachedGoal=true permanentlyFailed=false | |
INFO 2017-12-06 17:44:06,540 [main] com.mesosphere.sdk.scheduler.plan.DeploymentStep:setStatus(65): kube-scheduler-2:[instance]: changed status from: PENDING to: COMPLETE (interrupted=false) | |
INFO 2017-12-06 17:44:06,541 [main] com.mesosphere.sdk.scheduler.plan.DefaultStepFactory:getStep(40): Generating step for pod: kube-node-0, with tasks: [kube-proxy, kubelet] | |
INFO 2017-12-06 17:44:06,541 [main] com.mesosphere.sdk.scheduler.plan.DeploymentStep:setStatus(65): kube-node-0:[kube-proxy, kubelet]: changed status from: PENDING to: PENDING (interrupted=false) | |
INFO 2017-12-06 17:44:06,541 [main] com.mesosphere.sdk.scheduler.plan.DefaultStepFactory:getStatus(137): Deployment of task 'kube-node-0-kube-proxy' is COMPLETE: onTarget=true reachedGoal=true permanentlyFailed=false | |
INFO 2017-12-06 17:44:06,542 [main] com.mesosphere.sdk.scheduler.plan.DefaultStepFactory:getStatus(137): Deployment of task 'kube-node-0-kubelet' is COMPLETE: onTarget=true reachedGoal=true permanentlyFailed=false | |
INFO 2017-12-06 17:44:06,542 [main] com.mesosphere.sdk.scheduler.plan.DeploymentStep:setStatus(65): kube-node-0:[kube-proxy, kubelet]: changed status from: PENDING to: COMPLETE (interrupted=false) | |
INFO 2017-12-06 17:44:06,542 [main] com.mesosphere.sdk.scheduler.plan.DefaultStepFactory:getStep(40): Generating step for pod: mandatory-addons-0, with tasks: [kube-dns] | |
INFO 2017-12-06 17:44:06,542 [main] com.mesosphere.sdk.scheduler.plan.DeploymentStep:setStatus(65): mandatory-addons-0:[kube-dns]: changed status from: PENDING to: PENDING (interrupted=false) | |
INFO 2017-12-06 17:44:06,542 [main] com.mesosphere.sdk.scheduler.plan.DefaultStepFactory:getStatus(127): Automatically on target configuration due to having reached FINISHED goal. | |
INFO 2017-12-06 17:44:06,543 [main] com.mesosphere.sdk.scheduler.plan.DefaultStepFactory:getStatus(137): Deployment of task 'mandatory-addons-0-kube-dns' is COMPLETE: onTarget=true reachedGoal=true permanentlyFailed=false | |
INFO 2017-12-06 17:44:06,543 [main] com.mesosphere.sdk.scheduler.plan.DeploymentStep:setStatus(65): mandatory-addons-0:[kube-dns]: changed status from: PENDING to: COMPLETE (interrupted=false) | |
INFO 2017-12-06 17:44:06,543 [main] com.mesosphere.sdk.scheduler.plan.DefaultStepFactory:getStep(40): Generating step for pod: mandatory-addons-0, with tasks: [heapster] | |
INFO 2017-12-06 17:44:06,543 [main] com.mesosphere.sdk.scheduler.plan.DeploymentStep:setStatus(65): mandatory-addons-0:[heapster]: changed status from: PENDING to: PENDING (interrupted=false) | |
INFO 2017-12-06 17:44:06,543 [main] com.mesosphere.sdk.scheduler.plan.DefaultStepFactory:getStatus(127): Automatically on target configuration due to having reached FINISHED goal. | |
INFO 2017-12-06 17:44:06,543 [main] com.mesosphere.sdk.scheduler.plan.DefaultStepFactory:getStatus(137): Deployment of task 'mandatory-addons-0-heapster' is COMPLETE: onTarget=true reachedGoal=true permanentlyFailed=false | |
INFO 2017-12-06 17:44:06,544 [main] com.mesosphere.sdk.scheduler.plan.DeploymentStep:setStatus(65): mandatory-addons-0:[heapster]: changed status from: PENDING to: COMPLETE (interrupted=false) | |
INFO 2017-12-06 17:44:06,544 [main] com.mesosphere.sdk.scheduler.plan.DefaultStepFactory:getStep(40): Generating step for pod: mandatory-addons-0, with tasks: [dashboard] | |
INFO 2017-12-06 17:44:06,544 [main] com.mesosphere.sdk.scheduler.plan.DeploymentStep:setStatus(65): mandatory-addons-0:[dashboard]: changed status from: PENDING to: PENDING (interrupted=false) | |
INFO 2017-12-06 17:44:06,544 [main] com.mesosphere.sdk.scheduler.plan.DefaultStepFactory:getStatus(127): Automatically on target configuration due to having reached FINISHED goal. | |
INFO 2017-12-06 17:44:06,544 [main] com.mesosphere.sdk.scheduler.plan.DefaultStepFactory:getStatus(137): Deployment of task 'mandatory-addons-0-dashboard' is COMPLETE: onTarget=true reachedGoal=true permanentlyFailed=false | |
INFO 2017-12-06 17:44:06,544 [main] com.mesosphere.sdk.scheduler.plan.DeploymentStep:setStatus(65): mandatory-addons-0:[dashboard]: changed status from: PENDING to: COMPLETE (interrupted=false) | |
INFO 2017-12-06 17:44:06,545 [main] com.mesosphere.sdk.scheduler.SchedulerBuilder:getPlans(375): Got 3 YAML plans: [replace, update, deploy] | |
INFO 2017-12-06 17:44:06,620 [main] com.mesosphere.sdk.scheduler.SchedulerBuilder:selectDeployPlan(429): Overriding deploy plan with custom update plan. (Has completed deployment: true, Custom update plan defined: true) | |
INFO 2017-12-06 17:44:07,232 [main] org.eclipse.jetty.util.log:initialized(188): Logging initialized @23035ms | |
INFO 2017-12-06 17:44:09,243 [Thread-3] com.mesosphere.sdk.scheduler.SchedulerApiServer:run(80): Starting API server at port 15488 | |
INFO 2017-12-06 17:44:09,245 [pool-7-thread-1] com.mesosphere.sdk.scheduler.AbstractScheduler:processQueuedOffers(385): Waiting for queued offers... | |
INFO 2017-12-06 17:44:09,247 [Thread-3] org.eclipse.jetty.server.Server:doStart(327): jetty-9.2.3.v20140905 | |
INFO 2017-12-06 17:44:09,321 [main] com.mesosphere.sdk.scheduler.SchedulerRunner:runScheduler(125): Registering framework: user: "root" name: "kubernetes" id { value: "66df60ae-ce98-4ce6-968a-f99922382ef3-0006" } failover_timeout: 1209600.0 checkpoint: true role: "kubernetes-role" principal: "kubernetes" capabilities { type: RESERVATION_REFINEMENT } | |
INFO 2017-12-06 17:44:09,324 [main] com.mesosphere.sdk.scheduler.SchedulerDriverFactory:create(76): Creating sidechannel authenticated MesosSchedulerDriver for scheduler[com.mesosphere.sdk.scheduler.AbstractScheduler$MesosScheduler@2ee522bf], frameworkInfo[user: "root" name: "kubernetes" id { value: "66df60ae-ce98-4ce6-968a-f99922382ef3-0006" } failover_timeout: 1209600.0 checkpoint: true role: "kubernetes-role" principal: "kubernetes" capabilities { type: RESERVATION_REFINEMENT }], masterUrl[zk://master.mesos:2181/mesos] | |
WARN 2017-12-06 17:44:09,345 [main] com.mesosphere.sdk.scheduler.SchedulerDriverFactory:startInternal(111): Current DC/OS cluster doesn't support the Mesos V1 API in strict mode. Using V0... | |
INFO 2017-12-06 17:44:09,924 [Thread-5] com.mesosphere.mesos.HTTPAdapter.MesosToSchedulerDriverAdapter:connected(103): Connected! | |
INFO 2017-12-06 17:44:09,924 [Thread-5] com.mesosphere.mesos.HTTPAdapter.MesosToSchedulerDriverAdapter:performReliableSubscription(144): Initializing reliable subscriber | |
INFO 2017-12-06 17:44:10,031 [Thread-3] org.eclipse.jetty.server.handler.ContextHandler:doStart(744): Started o.e.j.s.ServletContextHandler@4d617016{/,null,AVAILABLE} | |
INFO 2017-12-06 17:44:10,040 [Thread-3] org.eclipse.jetty.server.ServerConnector:doStart(266): Started ServerConnector@2da5e7ce{HTTP/1.1}{0.0.0.0:15488} | |
INFO 2017-12-06 17:44:10,041 [Thread-3] org.eclipse.jetty.server.Server:doStart(379): Started @25847ms | |
INFO 2017-12-06 17:44:10,041 [Thread-3] com.mesosphere.sdk.scheduler.SchedulerApiServer:run(83): API server started at port 15488 | |
ERROR 2017-12-06 17:44:11,262 [qtp636866781-31] com.mesosphere.sdk.api.PlansResource:getPlanManager(390): Plan managers haven't been initialized yet. Unable to retrieve plan deploy | |
INFO 2017-12-06 17:44:11,928 [pool-10-thread-1] com.mesosphere.mesos.HTTPAdapter.MesosToSchedulerDriverAdapter:subscribe(115): Sending SUBSCRIBE call | |
INFO 2017-12-06 17:44:12,021 [pool-10-thread-1] com.mesosphere.mesos.HTTPAdapter.MesosToSchedulerDriverAdapter:scheduleNextSubscription(134): Backing off for: 1488 | |
INFO 2017-12-06 17:44:12,053 [Thread-6] com.mesosphere.mesos.HTTPAdapter.MesosToSchedulerDriverAdapter:received(181): Received event of type: SUBSCRIBED | |
INFO 2017-12-06 17:44:12,056 [Thread-6] com.mesosphere.sdk.scheduler.AbstractScheduler:registered(251): Registered framework with frameworkId: 66df60ae-ce98-4ce6-968a-f99922382ef3-0006 | |
INFO 2017-12-06 17:44:12,057 [Thread-6] com.mesosphere.sdk.scheduler.TokenBucket:<init>(49): Configured with count: 256, capacity: 256, incrementInterval: 256s, acquireInterval: 5s | |
INFO 2017-12-06 17:44:12,059 [Thread-6] com.mesosphere.sdk.scheduler.DefaultScheduler:buildPlanCoordinator(190): Adding overriding recovery plan manager. | |
INFO 2017-12-06 17:44:12,189 [Thread-6] com.mesosphere.sdk.scheduler.DefaultTaskKiller:killTask(45): Scheduling task kube-node-1-kubelet__90d44342-3fbc-42eb-8d1a-704a1df0c65a to be killed non-destructively | |
INFO 2017-12-06 17:44:12,239 [Thread-6] com.mesosphere.sdk.scheduler.DefaultTaskKiller:killTask(45): Scheduling task kube-node-1-kube-proxy__d9952501-aa8b-408b-a3db-e7db8195a57e to be killed non-destructively | |
WARN 2017-12-06 17:44:12,240 [Thread-6] com.mesosphere.sdk.scheduler.DefaultTaskKiller:killTask(41): Attempted to kill empty TaskID. | |
INFO 2017-12-06 17:44:12,249 [Thread-6] com.mesosphere.sdk.reconciliation.DefaultReconciler:start(60): Added 17 unreconciled tasks to reconciler: 14 tasks to reconcile: [kube-scheduler-2-instance__608d6c56-6941-461a-90c4-bddd197804cb, kube-controller-manager-1-instance__2511d189-757d-417d-a34c-b25e5e4b1e11, etcd-2-peer__80494724-551b-43fa-85e3-6ca4f60938df, kube-controller-manager-0-instance__cf817499-703f-40e2-8933-97258babe1ba, kube-apiserver-1-instance__644e3831-335f-43bb-8775-e782ef40c75f, kube-node-0-kube-proxy__1dfecc56-e98b-4a62-bbfc-22bf587e0e82, kube-scheduler-0-instance__d8d08b78-274f-4a06-b7eb-c2586f10f6a4, kube-scheduler-1-instance__cc6ba12e-be6e-42ef-b944-1a495c3a70af, kube-controller-manager-2-instance__8ab2ef51-d08f-4838-9246-ad642bb9a419, kube-apiserver-0-instance__d4419299-e8dd-4e59-821c-fd3deead0554, kube-apiserver-2-instance__7d469297-d3b3-4324-9df8-28c3479dc744, etcd-0-peer__e29bb397-b2a7-4aa4-a5b2-ce51951e2c16, etcd-1-peer__2c9e875c-f09d-4682-961a-0fe37f9bd9ae, kube-node-0-kubelet__327e0783-0f22-4522-9708-af50471a51d6] | |
INFO 2017-12-06 17:44:12,249 [Thread-6] com.mesosphere.sdk.reconciliation.DefaultReconciler:reconcile(96): Triggering explicit reconciliation of 14 remaining tasks, next explicit reconciliation in 8000ms or later | |
INFO 2017-12-06 17:44:12,328 [Thread-6] com.mesosphere.mesos.HTTPAdapter.MesosToSchedulerDriverAdapter:received(205): Subscribed with ID value: "66df60ae-ce98-4ce6-968a-f99922382ef3-0006" | |
INFO 2017-12-06 17:44:12,329 [Thread-7] com.mesosphere.mesos.HTTPAdapter.MesosToSchedulerDriverAdapter:received(181): Received event of type: HEARTBEAT | |
INFO 2017-12-06 17:44:12,426 [Thread-8] com.mesosphere.mesos.HTTPAdapter.MesosToSchedulerDriverAdapter:received(181): Received event of type: UPDATE | |
INFO 2017-12-06 17:44:12,452 [Thread-8] com.mesosphere.sdk.scheduler.AbstractScheduler:statusUpdate(321): Received status update for taskId=kube-controller-manager-2-instance__8ab2ef51-d08f-4838-9246-ad642bb9a419 state=TASK_RUNNING message= protobuf=task_id { value: "kube-controller-manager-2-instance__8ab2ef51-d08f-4838-9246-ad642bb9a419" } state: TASK_RUNNING slave_id { value: "66df60ae-ce98-4ce6-968a-f99922382ef3-S0" } timestamp: 1.512582217193632E9 executor_id { value: "kube-controller-manager__8ed0b1ee-4c10-4a2d-8e87-dea57c29a3ef" } healthy: false source: SOURCE_EXECUTOR reason: REASON_TASK_HEALTH_CHECK_STATUS_UPDATED uuid: "t\337\021\315\326\323E\275\223\265#u;\244fJ" container_status { network_infos { ip_addresses { ip_address: "10.138.0.5" } } executor_pid: 5274 container_id { value: "a06913c9-e24a-4a5f-9571-cedf4cb68b5b" parent { value: "861bc702-fde8-446d-89f1-23e7f3e7b3a5" } } } check_status { type: COMMAND command { exit_code: 0 } } | |
INFO 2017-12-06 17:44:12,522 [Thread-8] com.mesosphere.sdk.state.StateStore:storeStatus(192): Storing status 'TASK_RUNNING' for 'kube-controller-manager-2-instance' in 'Tasks/kube-controller-manager-2-instance/TaskStatus' | |
INFO 2017-12-06 17:44:12,622 [Thread-8] com.mesosphere.sdk.reconciliation.DefaultReconciler:update(128): Reconciled task: kube-controller-manager-2-instance__8ab2ef51-d08f-4838-9246-ad642bb9a419 (13 remaining tasks) | |
INFO 2017-12-06 17:44:12,652 [Thread-9] com.mesosphere.mesos.HTTPAdapter.MesosToSchedulerDriverAdapter:received(181): Received event of type: OFFERS | |
INFO 2017-12-06 17:44:12,654 [Thread-9] com.mesosphere.sdk.scheduler.AbstractScheduler:resourceOffers(293): Enqueuing 3 offers. Updated offers in progress: [66df60ae-ce98-4ce6-968a-f99922382ef3-O836, 66df60ae-ce98-4ce6-968a-f99922382ef3-O837, 66df60ae-ce98-4ce6-968a-f99922382ef3-O838] | |
INFO 2017-12-06 17:44:12,721 [pool-7-thread-1] com.mesosphere.sdk.reconciliation.DefaultReconciler:reconcile(103): Too soon since last explicit reconciliation trigger. Waiting at least 7528ms before next explicit reconciliation (13 remaining tasks) | |
INFO 2017-12-06 17:44:12,720 [Thread-10] com.mesosphere.mesos.HTTPAdapter.MesosToSchedulerDriverAdapter:received(181): Received event of type: UPDATE | |
INFO 2017-12-06 17:44:12,721 [pool-7-thread-1] com.mesosphere.sdk.scheduler.AbstractScheduler:processQueuedOffers(400): Declining 3 offers: Waiting for task reconciliation to complete. | |
INFO 2017-12-06 17:44:12,721 [Thread-10] com.mesosphere.sdk.scheduler.AbstractScheduler:statusUpdate(321): Received status update for taskId=kube-scheduler-2-instance__608d6c56-6941-461a-90c4-bddd197804cb state=TASK_RUNNING message=Reconciliation: Latest task state protobuf=task_id { value: "kube-scheduler-2-instance__608d6c56-6941-461a-90c4-bddd197804cb" } state: TASK_RUNNING message: "Reconciliation: Latest task state" slave_id { value: "66df60ae-ce98-4ce6-968a-f99922382ef3-S0" } timestamp: 1.51258225265063E9 healthy: true source: SOURCE_MASTER reason: REASON_RECONCILIATION container_status { network_infos { ip_addresses { ip_address: "10.138.0.5" } } executor_pid: 5726 container_id { value: "f0140a1a-442e-4c7f-a660-1e1ee8dd1cd9" parent { value: "6cc25e27-5ebc-42b5-b2eb-8b8841bc45b5" } } } | |
INFO 2017-12-06 17:44:12,722 [pool-7-thread-1] com.mesosphere.sdk.offer.OfferUtils:declineOffers(69): Declining 3 unused offers for 5 seconds: | |
INFO 2017-12-06 17:44:12,723 [pool-7-thread-1] com.mesosphere.sdk.offer.OfferUtils:lambda$declineOffers$2(75): 66df60ae-ce98-4ce6-968a-f99922382ef3-O836 | |
INFO 2017-12-06 17:44:12,724 [Thread-10] com.mesosphere.sdk.state.StateStore:storeStatus(192): Storing status 'TASK_RUNNING' for 'kube-scheduler-2-instance' in 'Tasks/kube-scheduler-2-instance/TaskStatus' | |
INFO 2017-12-06 17:44:12,743 [Thread-10] com.mesosphere.sdk.reconciliation.DefaultReconciler:update(128): Reconciled task: kube-scheduler-2-instance__608d6c56-6941-461a-90c4-bddd197804cb (12 remaining tasks) | |
INFO 2017-12-06 17:44:12,748 [pool-7-thread-1] com.mesosphere.sdk.offer.OfferUtils:lambda$declineOffers$2(75): 66df60ae-ce98-4ce6-968a-f99922382ef3-O837 | |
INFO 2017-12-06 17:44:12,748 [Thread-11] com.mesosphere.mesos.HTTPAdapter.MesosToSchedulerDriverAdapter:received(181): Received event of type: UPDATE | |
INFO 2017-12-06 17:44:12,748 [Thread-11] com.mesosphere.sdk.scheduler.AbstractScheduler:statusUpdate(321): Received status update for taskId=kube-controller-manager-1-instance__2511d189-757d-417d-a34c-b25e5e4b1e11 state=TASK_RUNNING message=Reconciliation: Latest task state protobuf=task_id { value: "kube-controller-manager-1-instance__2511d189-757d-417d-a34c-b25e5e4b1e11" } state: TASK_RUNNING message: "Reconciliation: Latest task state" slave_id { value: "66df60ae-ce98-4ce6-968a-f99922382ef3-S3" } timestamp: 1.512582252650723E9 healthy: true source: SOURCE_MASTER reason: REASON_RECONCILIATION container_status { network_infos { ip_addresses { ip_address: "10.138.0.6" } } executor_pid: 5379 container_id { value: "b545281a-c68f-4433-b422-e56545dbd15e" parent { value: "ca14c9cc-5f24-4956-be43-c4ce7590b434" } } } check_status { type: COMMAND command { exit_code: 0 } } | |
INFO 2017-12-06 17:44:12,751 [Thread-11] com.mesosphere.sdk.state.StateStore:storeStatus(192): Storing status 'TASK_RUNNING' for 'kube-controller-manager-1-instance' in 'Tasks/kube-controller-manager-1-instance/TaskStatus' | |
WARN 2017-12-06 17:44:12,826 [qtp636866781-33] com.mesosphere.sdk.scheduler.plan.DeploymentStep:forceComplete(99): Forcing completion of step: 'kube-node-0:[decommission] [d7061397-0e72-4585-bb79-b21a6231757e]' | |
INFO 2017-12-06 17:44:12,826 [qtp636866781-33] com.mesosphere.sdk.scheduler.plan.DeploymentStep:setStatus(65): kube-node-0:[decommission]: changed status from: PENDING to: COMPLETE (interrupted=false) | |
WARN 2017-12-06 17:44:12,826 [qtp636866781-33] com.mesosphere.sdk.scheduler.plan.DeploymentStep:forceComplete(99): Forcing completion of step: 'kube-node-0:[commission] [d68ba1e4-b5d3-4952-b510-fef6d19cbe55]' | |
INFO 2017-12-06 17:44:12,827 [qtp636866781-33] com.mesosphere.sdk.scheduler.plan.DeploymentStep:setStatus(65): kube-node-0:[commission]: changed status from: PENDING to: COMPLETE (interrupted=false) | |
INFO 2017-12-06 17:44:12,827 [qtp636866781-33] com.mesosphere.sdk.kubernetes.scheduler.plan.KubernetesUpgradeStrategy:getCandidates(49): Selected Kubernetes upgrade candidates, [] | |
INFO 2017-12-06 17:44:12,828 [qtp636866781-33] com.mesosphere.sdk.kubernetes.scheduler.plan.KubernetesUpgradeStrategy:getCandidates(49): Selected Kubernetes upgrade candidates, [] | |
INFO 2017-12-06 17:44:12,831 [Thread-11] com.mesosphere.sdk.reconciliation.DefaultReconciler:update(128): Reconciled task: kube-controller-manager-1-instance__2511d189-757d-417d-a34c-b25e5e4b1e11 (11 remaining tasks) | |
INFO 2017-12-06 17:44:12,832 [pool-7-thread-1] com.mesosphere.sdk.offer.OfferUtils:lambda$declineOffers$2(75): 66df60ae-ce98-4ce6-968a-f99922382ef3-O838 | |
INFO 2017-12-06 17:44:12,832 [pool-7-thread-1] com.mesosphere.sdk.scheduler.AbstractScheduler:processQueuedOffers(385): Waiting for queued offers... | |
INFO 2017-12-06 17:44:12,832 [Thread-12] com.mesosphere.mesos.HTTPAdapter.MesosToSchedulerDriverAdapter:received(181): Received event of type: UPDATE | |
INFO 2017-12-06 17:44:12,833 [Thread-12] com.mesosphere.sdk.scheduler.AbstractScheduler:statusUpdate(321): Received status update for taskId=etcd-2-peer__80494724-551b-43fa-85e3-6ca4f60938df state=TASK_RUNNING message=Reconciliation: Latest task state protobuf=task_id { value: "etcd-2-peer__80494724-551b-43fa-85e3-6ca4f60938df" } state: TASK_RUNNING message: "Reconciliation: Latest task state" slave_id { value: "66df60ae-ce98-4ce6-968a-f99922382ef3-S0" } timestamp: 1.51258225265076E9 healthy: true source: SOURCE_MASTER reason: REASON_RECONCILIATION container_status { network_infos { ip_addresses { ip_address: "10.138.0.5" } } executor_pid: 4804 container_id { value: "f2ff97ef-1bbc-4b14-a7d0-f56cc7a3af69" parent { value: "9dbc6f23-d413-4671-8f4c-d19576601315" } } } check_status { type: COMMAND command { exit_code: 0 } } | |
INFO 2017-12-06 17:44:12,834 [qtp636866781-33] com.mesosphere.sdk.kubernetes.scheduler.plan.KubernetesUpgradeStrategy:getCandidates(49): Selected Kubernetes upgrade candidates, [] | |
INFO 2017-12-06 17:44:12,840 [Thread-12] com.mesosphere.sdk.state.StateStore:storeStatus(192): Storing status 'TASK_RUNNING' for 'etcd-2-peer' in 'Tasks/etcd-2-peer/TaskStatus' | |
INFO 2017-12-06 17:44:12,930 [Thread-12] com.mesosphere.sdk.reconciliation.DefaultReconciler:update(128): Reconciled task: etcd-2-peer__80494724-551b-43fa-85e3-6ca4f60938df (10 remaining tasks) | |
INFO 2017-12-06 17:44:12,932 [Thread-13] com.mesosphere.mesos.HTTPAdapter.MesosToSchedulerDriverAdapter:received(181): Received event of type: UPDATE | |
INFO 2017-12-06 17:44:12,932 [Thread-13] com.mesosphere.sdk.scheduler.AbstractScheduler:statusUpdate(321): Received status update for taskId=kube-controller-manager-0-instance__cf817499-703f-40e2-8933-97258babe1ba state=TASK_RUNNING message=Reconciliation: Latest task state protobuf=task_id { value: "kube-controller-manager-0-instance__cf817499-703f-40e2-8933-97258babe1ba" } state: TASK_RUNNING message: "Reconciliation: Latest task state" slave_id { value: "66df60ae-ce98-4ce6-968a-f99922382ef3-S2" } timestamp: 1.5125822526507862E9 healthy: true source: SOURCE_MASTER reason: REASON_RECONCILIATION container_status { network_infos { ip_addresses { ip_address: "10.138.0.4" } } executor_pid: 1163 container_id { value: "15b4b621-d8e8-4607-901f-9b6a1132fda3" parent { value: "4d4fefa1-91db-4d43-8f5d-0a6ffd029054" } } } check_status { type: COMMAND command { exit_code: 0 } } | |
INFO 2017-12-06 17:44:12,936 [Thread-13] com.mesosphere.sdk.state.StateStore:storeStatus(192): Storing status 'TASK_RUNNING' for 'kube-controller-manager-0-instance' in 'Tasks/kube-controller-manager-0-instance/TaskStatus' | |
INFO 2017-12-06 17:44:12,949 [Thread-13] com.mesosphere.sdk.reconciliation.DefaultReconciler:update(128): Reconciled task: kube-controller-manager-0-instance__cf817499-703f-40e2-8933-97258babe1ba (9 remaining tasks) | |
INFO 2017-12-06 17:44:12,950 [Thread-14] com.mesosphere.mesos.HTTPAdapter.MesosToSchedulerDriverAdapter:received(181): Received event of type: UPDATE | |
INFO 2017-12-06 17:44:12,950 [Thread-14] com.mesosphere.sdk.scheduler.AbstractScheduler:statusUpdate(321): Received status update for taskId=kube-apiserver-1-instance__644e3831-335f-43bb-8775-e782ef40c75f state=TASK_RUNNING message=Reconciliation: Latest task state protobuf=task_id { value: "kube-apiserver-1-instance__644e3831-335f-43bb-8775-e782ef40c75f" } state: TASK_RUNNING message: "Reconciliation: Latest task state" slave_id { value: "66df60ae-ce98-4ce6-968a-f99922382ef3-S2" } timestamp: 1.512582252650811E9 healthy: true source: SOURCE_MASTER reason: REASON_RECONCILIATION container_status { network_infos { ip_addresses { ip_address: "10.138.0.4" } } executor_pid: 974 container_id { value: "fd6a54dc-e309-4638-9050-9667ab487650" parent { value: "8bdaf9f8-f468-4f0d-87b0-1a71a30a775f" } } } | |
INFO 2017-12-06 17:44:12,952 [Thread-14] com.mesosphere.sdk.state.StateStore:storeStatus(192): Storing status 'TASK_RUNNING' for 'kube-apiserver-1-instance' in 'Tasks/kube-apiserver-1-instance/TaskStatus' | |
INFO 2017-12-06 17:44:13,029 [Thread-14] com.mesosphere.sdk.reconciliation.DefaultReconciler:update(128): Reconciled task: kube-apiserver-1-instance__644e3831-335f-43bb-8775-e782ef40c75f (8 remaining tasks) | |
INFO 2017-12-06 17:44:13,030 [Thread-15] com.mesosphere.mesos.HTTPAdapter.MesosToSchedulerDriverAdapter:received(181): Received event of type: UPDATE | |
INFO 2017-12-06 17:44:13,030 [Thread-15] com.mesosphere.sdk.scheduler.AbstractScheduler:statusUpdate(321): Received status update for taskId=kube-node-0-kube-proxy__1dfecc56-e98b-4a62-bbfc-22bf587e0e82 state=TASK_RUNNING message=Reconciliation: Latest task state protobuf=task_id { value: "kube-node-0-kube-proxy__1dfecc56-e98b-4a62-bbfc-22bf587e0e82" } state: TASK_RUNNING message: "Reconciliation: Latest task state" slave_id { value: "66df60ae-ce98-4ce6-968a-f99922382ef3-S3" } timestamp: 1.512582252650846E9 source: SOURCE_MASTER reason: REASON_RECONCILIATION container_status { network_infos { ip_addresses { ip_address: "10.138.0.6" } } executor_pid: 6305 container_id { value: "db872c0d-fd27-479a-9c27-786b1fa0fffb" parent { value: "a2a62cfb-373c-4512-b553-9215eaec5571" } } } | |
INFO 2017-12-06 17:44:13,032 [Thread-15] com.mesosphere.sdk.state.StateStore:storeStatus(192): Storing status 'TASK_RUNNING' for 'kube-node-0-kube-proxy' in 'Tasks/kube-node-0-kube-proxy/TaskStatus' | |
INFO 2017-12-06 17:44:13,042 [Thread-15] com.mesosphere.sdk.reconciliation.DefaultReconciler:update(128): Reconciled task: kube-node-0-kube-proxy__1dfecc56-e98b-4a62-bbfc-22bf587e0e82 (7 remaining tasks) | |
INFO 2017-12-06 17:44:13,043 [Thread-16] com.mesosphere.mesos.HTTPAdapter.MesosToSchedulerDriverAdapter:received(181): Received event of type: UPDATE | |
INFO 2017-12-06 17:44:13,043 [Thread-16] com.mesosphere.sdk.scheduler.AbstractScheduler:statusUpdate(321): Received status update for taskId=kube-scheduler-0-instance__d8d08b78-274f-4a06-b7eb-c2586f10f6a4 state=TASK_RUNNING message=Reconciliation: Latest task state protobuf=task_id { value: "kube-scheduler-0-instance__d8d08b78-274f-4a06-b7eb-c2586f10f6a4" } state: TASK_RUNNING message: "Reconciliation: Latest task state" slave_id { value: "66df60ae-ce98-4ce6-968a-f99922382ef3-S3" } timestamp: 1.512582252650871E9 healthy: true source: SOURCE_MASTER reason: REASON_RECONCILIATION container_status { network_infos { ip_addresses { ip_address: "10.138.0.6" } } executor_pid: 5873 container_id { value: "dd8522c4-f40a-418f-91b0-db5f51e600d1" parent { value: "6601f913-b6ee-4f23-8a47-ff3476c8060c" } } } | |
INFO 2017-12-06 17:44:13,045 [Thread-16] com.mesosphere.sdk.state.StateStore:storeStatus(192): Storing status 'TASK_RUNNING' for 'kube-scheduler-0-instance' in 'Tasks/kube-scheduler-0-instance/TaskStatus' | |
INFO 2017-12-06 17:44:13,127 [Thread-16] com.mesosphere.sdk.reconciliation.DefaultReconciler:update(128): Reconciled task: kube-scheduler-0-instance__d8d08b78-274f-4a06-b7eb-c2586f10f6a4 (6 remaining tasks) | |
INFO 2017-12-06 17:44:13,127 [Thread-17] com.mesosphere.mesos.HTTPAdapter.MesosToSchedulerDriverAdapter:received(181): Received event of type: UPDATE | |
INFO 2017-12-06 17:44:13,128 [Thread-17] com.mesosphere.sdk.scheduler.AbstractScheduler:statusUpdate(321): Received status update for taskId=kube-scheduler-1-instance__cc6ba12e-be6e-42ef-b944-1a495c3a70af state=TASK_RUNNING message=Reconciliation: Latest task state protobuf=task_id { value: "kube-scheduler-1-instance__cc6ba12e-be6e-42ef-b944-1a495c3a70af" } state: TASK_RUNNING message: "Reconciliation: Latest task state" slave_id { value: "66df60ae-ce98-4ce6-968a-f99922382ef3-S2" } timestamp: 1.512582252650906E9 healthy: true source: SOURCE_MASTER reason: REASON_RECONCILIATION container_status { network_infos { ip_addresses { ip_address: "10.138.0.4" } } executor_pid: 1663 container_id { value: "1a22adb4-c8ae-4e40-a295-836485ffe77f" parent { value: "64606b02-c479-4c00-87d4-4530acba82c1" } } } | |
INFO 2017-12-06 17:44:13,130 [Thread-17] com.mesosphere.sdk.state.StateStore:storeStatus(192): Storing status 'TASK_RUNNING' for 'kube-scheduler-1-instance' in 'Tasks/kube-scheduler-1-instance/TaskStatus' | |
INFO 2017-12-06 17:44:13,140 [Thread-17] com.mesosphere.sdk.reconciliation.DefaultReconciler:update(128): Reconciled task: kube-scheduler-1-instance__cc6ba12e-be6e-42ef-b944-1a495c3a70af (5 remaining tasks) | |
INFO 2017-12-06 17:44:13,141 [Thread-18] com.mesosphere.mesos.HTTPAdapter.MesosToSchedulerDriverAdapter:received(181): Received event of type: UPDATE | |
INFO 2017-12-06 17:44:13,141 [Thread-18] com.mesosphere.sdk.scheduler.AbstractScheduler:statusUpdate(321): Received status update for taskId=kube-controller-manager-2-instance__8ab2ef51-d08f-4838-9246-ad642bb9a419 state=TASK_RUNNING message=Reconciliation: Latest task state protobuf=task_id { value: "kube-controller-manager-2-instance__8ab2ef51-d08f-4838-9246-ad642bb9a419" } state: TASK_RUNNING message: "Reconciliation: Latest task state" slave_id { value: "66df60ae-ce98-4ce6-968a-f99922382ef3-S0" } timestamp: 1.5125822526509418E9 healthy: false source: SOURCE_MASTER reason: REASON_RECONCILIATION container_status { network_infos { ip_addresses { ip_address: "10.138.0.5" } } executor_pid: 5274 container_id { value: "a06913c9-e24a-4a5f-9571-cedf4cb68b5b" parent { value: "861bc702-fde8-446d-89f1-23e7f3e7b3a5" } } } check_status { type: COMMAND command { exit_code: 0 } } | |
INFO 2017-12-06 17:44:13,144 [Thread-18] com.mesosphere.sdk.state.StateStore:storeStatus(192): Storing status 'TASK_RUNNING' for 'kube-controller-manager-2-instance' in 'Tasks/kube-controller-manager-2-instance/TaskStatus' | |
INFO 2017-12-06 17:44:13,226 [Thread-18] com.mesosphere.sdk.reconciliation.DefaultReconciler:update(128): Reconciled task: kube-controller-manager-2-instance__8ab2ef51-d08f-4838-9246-ad642bb9a419 (5 remaining tasks) | |
INFO 2017-12-06 17:44:13,227 [Thread-19] com.mesosphere.mesos.HTTPAdapter.MesosToSchedulerDriverAdapter:received(181): Received event of type: UPDATE | |
INFO 2017-12-06 17:44:13,227 [Thread-19] com.mesosphere.sdk.scheduler.AbstractScheduler:statusUpdate(321): Received status update for taskId=kube-apiserver-0-instance__d4419299-e8dd-4e59-821c-fd3deead0554 state=TASK_RUNNING message=Reconciliation: Latest task state protobuf=task_id { value: "kube-apiserver-0-instance__d4419299-e8dd-4e59-821c-fd3deead0554" } state: TASK_RUNNING message: "Reconciliation: Latest task state" slave_id { value: "66df60ae-ce98-4ce6-968a-f99922382ef3-S3" } timestamp: 1.51258225265098E9 healthy: true source: SOURCE_MASTER reason: REASON_RECONCILIATION container_status { network_infos { ip_addresses { ip_address: "10.138.0.6" } } executor_pid: 5203 container_id { value: "8d248150-4f40-4b61-9734-c509e3aa4d98" parent { value: "11d019b2-872d-4198-a864-37d3b5cbbd46" } } } | |
INFO 2017-12-06 17:44:13,229 [Thread-19] com.mesosphere.sdk.state.StateStore:storeStatus(192): Storing status 'TASK_RUNNING' for 'kube-apiserver-0-instance' in 'Tasks/kube-apiserver-0-instance/TaskStatus' | |
INFO 2017-12-06 17:44:13,239 [qtp636866781-31] com.mesosphere.sdk.kubernetes.scheduler.plan.KubernetesUpgradeStrategy:getCandidates(49): Selected Kubernetes upgrade candidates, [] | |
INFO 2017-12-06 17:44:13,239 [Thread-19] com.mesosphere.sdk.reconciliation.DefaultReconciler:update(128): Reconciled task: kube-apiserver-0-instance__d4419299-e8dd-4e59-821c-fd3deead0554 (4 remaining tasks) | |
INFO 2017-12-06 17:44:13,240 [qtp636866781-31] com.mesosphere.sdk.kubernetes.scheduler.plan.KubernetesUpgradeStrategy:getCandidates(49): Selected Kubernetes upgrade candidates, [] | |
INFO 2017-12-06 17:44:13,243 [qtp636866781-31] com.mesosphere.sdk.kubernetes.scheduler.plan.KubernetesUpgradeStrategy:getCandidates(49): Selected Kubernetes upgrade candidates, [] | |
INFO 2017-12-06 17:44:13,320 [Thread-20] com.mesosphere.mesos.HTTPAdapter.MesosToSchedulerDriverAdapter:received(181): Received event of type: UPDATE | |
INFO 2017-12-06 17:44:13,321 [Thread-20] com.mesosphere.sdk.scheduler.AbstractScheduler:statusUpdate(321): Received status update for taskId=kube-apiserver-2-instance__7d469297-d3b3-4324-9df8-28c3479dc744 state=TASK_RUNNING message=Reconciliation: Latest task state protobuf=task_id { value: "kube-apiserver-2-instance__7d469297-d3b3-4324-9df8-28c3479dc744" } state: TASK_RUNNING message: "Reconciliation: Latest task state" slave_id { value: "66df60ae-ce98-4ce6-968a-f99922382ef3-S0" } timestamp: 1.512582252651009E9 healthy: true source: SOURCE_MASTER reason: REASON_RECONCILIATION container_status { network_infos { ip_addresses { ip_address: "10.138.0.5" } } executor_pid: 5100 container_id { value: "c983e770-2baf-4df1-87d1-a07d94306ddc" parent { value: "ab23bcc1-5825-4b45-b3e2-90f098a5e20a" } } } | |
INFO 2017-12-06 17:44:13,323 [Thread-20] com.mesosphere.sdk.state.StateStore:storeStatus(192): Storing status 'TASK_RUNNING' for 'kube-apiserver-2-instance' in 'Tasks/kube-apiserver-2-instance/TaskStatus' | |
INFO 2017-12-06 17:44:13,334 [Thread-20] com.mesosphere.sdk.reconciliation.DefaultReconciler:update(128): Reconciled task: kube-apiserver-2-instance__7d469297-d3b3-4324-9df8-28c3479dc744 (3 remaining tasks) | |
INFO 2017-12-06 17:44:13,336 [Thread-21] com.mesosphere.mesos.HTTPAdapter.MesosToSchedulerDriverAdapter:received(181): Received event of type: UPDATE | |
INFO 2017-12-06 17:44:13,336 [Thread-21] com.mesosphere.sdk.scheduler.AbstractScheduler:statusUpdate(321): Received status update for taskId=etcd-0-peer__e29bb397-b2a7-4aa4-a5b2-ce51951e2c16 state=TASK_RUNNING message=Reconciliation: Latest task state protobuf=task_id { value: "etcd-0-peer__e29bb397-b2a7-4aa4-a5b2-ce51951e2c16" } state: TASK_RUNNING message: "Reconciliation: Latest task state" slave_id { value: "66df60ae-ce98-4ce6-968a-f99922382ef3-S3" } timestamp: 1.512582252651047E9 healthy: true source: SOURCE_MASTER reason: REASON_RECONCILIATION container_status { network_infos { ip_addresses { ip_address: "10.138.0.6" } } executor_pid: 4897 container_id { value: "e3d913e0-de2e-4379-a3e7-f3c907b78f70" parent { value: "65b5a11c-c564-46b7-8f02-d87cf1564220" } } } check_status { type: COMMAND command { exit_code: 0 } } | |
INFO 2017-12-06 17:44:13,338 [Thread-21] com.mesosphere.sdk.state.StateStore:storeStatus(192): Storing status 'TASK_RUNNING' for 'etcd-0-peer' in 'Tasks/etcd-0-peer/TaskStatus' | |
INFO 2017-12-06 17:44:13,422 [Thread-21] com.mesosphere.sdk.reconciliation.DefaultReconciler:update(128): Reconciled task: etcd-0-peer__e29bb397-b2a7-4aa4-a5b2-ce51951e2c16 (2 remaining tasks) | |
INFO 2017-12-06 17:44:13,423 [Thread-22] com.mesosphere.mesos.HTTPAdapter.MesosToSchedulerDriverAdapter:received(181): Received event of type: UPDATE | |
INFO 2017-12-06 17:44:13,423 [Thread-22] com.mesosphere.sdk.scheduler.AbstractScheduler:statusUpdate(321): Received status update for taskId=etcd-1-peer__2c9e875c-f09d-4682-961a-0fe37f9bd9ae state=TASK_RUNNING message=Reconciliation: Latest task state protobuf=task_id { value: "etcd-1-peer__2c9e875c-f09d-4682-961a-0fe37f9bd9ae" } state: TASK_RUNNING message: "Reconciliation: Latest task state" slave_id { value: "66df60ae-ce98-4ce6-968a-f99922382ef3-S2" } timestamp: 1.5125822526510808E9 healthy: true source: SOURCE_MASTER reason: REASON_RECONCILIATION container_status { network_infos { ip_addresses { ip_address: "10.138.0.4" } } executor_pid: 651 container_id { value: "f511f139-cde9-4f7c-9f24-8de846d0dab9" parent { value: "acf28ef7-73f1-45b0-a5aa-aabf1175ec74" } } } check_status { type: COMMAND command { exit_code: 0 } } | |
INFO 2017-12-06 17:44:13,425 [Thread-22] com.mesosphere.sdk.state.StateStore:storeStatus(192): Storing status 'TASK_RUNNING' for 'etcd-1-peer' in 'Tasks/etcd-1-peer/TaskStatus' | |
INFO 2017-12-06 17:44:13,438 [Thread-22] com.mesosphere.sdk.reconciliation.DefaultReconciler:update(128): Reconciled task: etcd-1-peer__2c9e875c-f09d-4682-961a-0fe37f9bd9ae (1 remaining tasks) | |
INFO 2017-12-06 17:44:13,439 [Thread-23] com.mesosphere.mesos.HTTPAdapter.MesosToSchedulerDriverAdapter:received(181): Received event of type: UPDATE | |
INFO 2017-12-06 17:44:13,439 [Thread-23] com.mesosphere.sdk.scheduler.AbstractScheduler:statusUpdate(321): Received status update for taskId=kube-node-0-kubelet__327e0783-0f22-4522-9708-af50471a51d6 state=TASK_RUNNING message=Reconciliation: Latest task state protobuf=task_id { value: "kube-node-0-kubelet__327e0783-0f22-4522-9708-af50471a51d6" } state: TASK_RUNNING message: "Reconciliation: Latest task state" slave_id { value: "66df60ae-ce98-4ce6-968a-f99922382ef3-S3" } timestamp: 1.5125822526511118E9 healthy: true source: SOURCE_MASTER reason: REASON_RECONCILIATION container_status { network_infos { ip_addresses { ip_address: "10.138.0.6" } } executor_pid: 6311 container_id { value: "ba9f0546-2aca-44a0-acc0-80719d641c86" parent { value: "a2a62cfb-373c-4512-b553-9215eaec5571" } } } | |
INFO 2017-12-06 17:44:13,441 [Thread-23] com.mesosphere.sdk.state.StateStore:storeStatus(192): Storing status 'TASK_RUNNING' for 'kube-node-0-kubelet' in 'Tasks/kube-node-0-kubelet/TaskStatus' | |
INFO 2017-12-06 17:44:13,451 [Thread-23] com.mesosphere.sdk.reconciliation.DefaultReconciler:update(128): Reconciled task: kube-node-0-kubelet__327e0783-0f22-4522-9708-af50471a51d6 (0 remaining tasks) | |
INFO 2017-12-06 17:44:13,452 [Thread-24] com.mesosphere.mesos.HTTPAdapter.MesosToSchedulerDriverAdapter:received(181): Received event of type: UPDATE | |
INFO 2017-12-06 17:44:13,520 [Thread-24] com.mesosphere.sdk.scheduler.AbstractScheduler:statusUpdate(321): Received status update for taskId=kube-controller-manager-2-instance__8ab2ef51-d08f-4838-9246-ad642bb9a419 state=TASK_RUNNING message= protobuf=task_id { value: "kube-controller-manager-2-instance__8ab2ef51-d08f-4838-9246-ad642bb9a419" } state: TASK_RUNNING slave_id { value: "66df60ae-ce98-4ce6-968a-f99922382ef3-S0" } timestamp: 1.512582232463029E9 executor_id { value: "kube-controller-manager__8ed0b1ee-4c10-4a2d-8e87-dea57c29a3ef" } healthy: true source: SOURCE_EXECUTOR reason: REASON_TASK_HEALTH_CHECK_STATUS_UPDATED uuid: "d(\370\237\364\205N\365\200\020U\031\310\263~\006" container_status { network_infos { ip_addresses { ip_address: "10.138.0.5" } } executor_pid: 5274 container_id { value: "a06913c9-e24a-4a5f-9571-cedf4cb68b5b" parent { value: "861bc702-fde8-446d-89f1-23e7f3e7b3a5" } } } check_status { type: COMMAND command { exit_code: 0 } } | |
INFO 2017-12-06 17:44:13,522 [Thread-24] com.mesosphere.sdk.state.StateStore:storeStatus(192): Storing status 'TASK_RUNNING' for 'kube-controller-manager-2-instance' in 'Tasks/kube-controller-manager-2-instance/TaskStatus' | |
INFO 2017-12-06 17:44:13,534 [pool-10-thread-1] com.mesosphere.mesos.HTTPAdapter.MesosToSchedulerDriverAdapter:cancelSubscriber(731): Cancelling subscriber | |
INFO 2017-12-06 17:44:13,535 [Thread-25] com.mesosphere.mesos.HTTPAdapter.MesosToSchedulerDriverAdapter:received(181): Received event of type: UPDATE | |
INFO 2017-12-06 17:44:13,535 [Thread-25] com.mesosphere.sdk.scheduler.AbstractScheduler:statusUpdate(321): Received status update for taskId=kube-node-1-kube-proxy__d9952501-aa8b-408b-a3db-e7db8195a57e state=TASK_KILLED message=Command terminated with signal Killed protobuf=task_id { value: "kube-node-1-kube-proxy__d9952501-aa8b-408b-a3db-e7db8195a57e" } state: TASK_KILLED message: "Command terminated with signal Killed" slave_id { value: "66df60ae-ce98-4ce6-968a-f99922382ef3-S2" } timestamp: 1.512582252831413E9 executor_id { value: "kube-node__d97d7f0a-f30f-4244-8f56-69e09960bf5c" } source: SOURCE_EXECUTOR uuid: "%0\372_\215jEN\213\nc\237/\033O\f" container_status { container_id { value: "c00344ce-907c-45a0-b633-ab32a53916ee" parent { value: "4353b892-dcc7-45e6-a31f-e69e5b5e590c" } } } | |
WARN 2017-12-06 17:44:13,538 [Thread-25] com.mesosphere.sdk.scheduler.AbstractScheduler:statusUpdate(331): Failed to update TaskStatus received from Mesos. This may be expected if Mesos sent stale status information: task_id { | |
value: "kube-node-1-kube-proxy__d9952501-aa8b-408b-a3db-e7db8195a57e" | |
} | |
state: TASK_KILLED | |
message: "Command terminated with signal Killed" | |
slave_id { | |
value: "66df60ae-ce98-4ce6-968a-f99922382ef3-S2" | |
} | |
timestamp: 1.512582252831413E9 | |
executor_id { | |
value: "kube-node__d97d7f0a-f30f-4244-8f56-69e09960bf5c" | |
} | |
source: SOURCE_EXECUTOR | |
uuid: "%0\372_\215jEN\213\nc\237/\033O\f" | |
container_status { | |
container_id { | |
value: "c00344ce-907c-45a0-b633-ab32a53916ee" | |
parent { | |
value: "4353b892-dcc7-45e6-a31f-e69e5b5e590c" | |
} | |
} | |
} | |
com.mesosphere.sdk.state.StateStoreException: Failed to find a task with TaskID: task_id { | |
value: "kube-node-1-kube-proxy__d9952501-aa8b-408b-a3db-e7db8195a57e" | |
} | |
state: TASK_KILLED | |
message: "Command terminated with signal Killed" | |
slave_id { | |
value: "66df60ae-ce98-4ce6-968a-f99922382ef3-S2" | |
} | |
timestamp: 1.512582252831413E9 | |
executor_id { | |
value: "kube-node__d97d7f0a-f30f-4244-8f56-69e09960bf5c" | |
} | |
source: SOURCE_EXECUTOR | |
uuid: "%0\372_\215jEN\213\nc\237/\033O\f" | |
container_status { | |
container_id { | |
value: "c00344ce-907c-45a0-b633-ab32a53916ee" | |
parent { | |
value: "4353b892-dcc7-45e6-a31f-e69e5b5e590c" | |
} | |
} | |
} | |
(reason: NOT_FOUND) | |
at com.mesosphere.sdk.state.StateStoreUtils.getTaskName(StateStoreUtils.java:148) ~[scheduler-master.jar:?] | |
at com.mesosphere.sdk.scheduler.DefaultScheduler.processStatusUpdate(DefaultScheduler.java:281) ~[kubernetes.jar:?] | |
at com.mesosphere.sdk.scheduler.AbstractScheduler$MesosScheduler.statusUpdate(AbstractScheduler.java:327) [scheduler-master.jar:?] | |
at com.mesosphere.mesos.HTTPAdapter.MesosToSchedulerDriverAdapter.received(MesosToSchedulerDriverAdapter.java:228) [mesos-http-adapter-0.4.1.jar:?] | |
INFO 2017-12-06 17:44:13,544 [Thread-26] com.mesosphere.mesos.HTTPAdapter.MesosToSchedulerDriverAdapter:received(181): Received event of type: UPDATE | |
INFO 2017-12-06 17:44:13,544 [Thread-26] com.mesosphere.sdk.scheduler.AbstractScheduler:statusUpdate(321): Received status update for taskId=kube-node-1-kubelet__90d44342-3fbc-42eb-8d1a-704a1df0c65a state=TASK_KILLED message=Command terminated with signal Terminated protobuf=task_id { value: "kube-node-1-kubelet__90d44342-3fbc-42eb-8d1a-704a1df0c65a" } state: TASK_KILLED message: "Command terminated with signal Terminated" slave_id { value: "66df60ae-ce98-4ce6-968a-f99922382ef3-S2" } timestamp: 1.512582252937974E9 executor_id { value: "kube-node__d97d7f0a-f30f-4244-8f56-69e09960bf5c" } source: SOURCE_EXECUTOR uuid: "t \347\a\034]L\373\212H;B\354\350f\327" container_status { container_id { value: "2d2cc919-395f-46e7-b7da-4e029801fb3d" parent { value: "4353b892-dcc7-45e6-a31f-e69e5b5e590c" } } } | |
WARN 2017-12-06 17:44:13,622 [Thread-26] com.mesosphere.sdk.scheduler.AbstractScheduler:statusUpdate(331): Failed to update TaskStatus received from Mesos. This may be expected if Mesos sent stale status information: task_id { | |
value: "kube-node-1-kubelet__90d44342-3fbc-42eb-8d1a-704a1df0c65a" | |
} | |
state: TASK_KILLED | |
message: "Command terminated with signal Terminated" | |
slave_id { | |
value: "66df60ae-ce98-4ce6-968a-f99922382ef3-S2" | |
} | |
timestamp: 1.512582252937974E9 | |
executor_id { | |
value: "kube-node__d97d7f0a-f30f-4244-8f56-69e09960bf5c" | |
} | |
source: SOURCE_EXECUTOR | |
uuid: "t \347\a\034]L\373\212H;B\354\350f\327" | |
container_status { | |
container_id { | |
value: "2d2cc919-395f-46e7-b7da-4e029801fb3d" | |
parent { | |
value: "4353b892-dcc7-45e6-a31f-e69e5b5e590c" | |
} | |
} | |
} | |
com.mesosphere.sdk.state.StateStoreException: Failed to find a task with TaskID: task_id { | |
value: "kube-node-1-kubelet__90d44342-3fbc-42eb-8d1a-704a1df0c65a" | |
} | |
state: TASK_KILLED | |
message: "Command terminated with signal Terminated" | |
slave_id { | |
value: "66df60ae-ce98-4ce6-968a-f99922382ef3-S2" | |
} | |
timestamp: 1.512582252937974E9 | |
executor_id { | |
value: "kube-node__d97d7f0a-f30f-4244-8f56-69e09960bf5c" | |
} | |
source: SOURCE_EXECUTOR | |
uuid: "t \347\a\034]L\373\212H;B\354\350f\327" | |
container_status { | |
container_id { | |
value: "2d2cc919-395f-46e7-b7da-4e029801fb3d" | |
parent { | |
value: "4353b892-dcc7-45e6-a31f-e69e5b5e590c" | |
} | |
} | |
} | |
(reason: NOT_FOUND) | |
at com.mesosphere.sdk.state.StateStoreUtils.getTaskName(StateStoreUtils.java:148) ~[scheduler-master.jar:?] | |
at com.mesosphere.sdk.scheduler.DefaultScheduler.processStatusUpdate(DefaultScheduler.java:281) ~[kubernetes.jar:?] | |
at com.mesosphere.sdk.scheduler.AbstractScheduler$MesosScheduler.statusUpdate(AbstractScheduler.java:327) [scheduler-master.jar:?] | |
at com.mesosphere.mesos.HTTPAdapter.MesosToSchedulerDriverAdapter.received(MesosToSchedulerDriverAdapter.java:228) [mesos-http-adapter-0.4.1.jar:?] | |
INFO 2017-12-06 17:44:13,625 [Thread-27] com.mesosphere.mesos.HTTPAdapter.MesosToSchedulerDriverAdapter:received(181): Received event of type: FAILURE | |
WARN 2017-12-06 17:44:13,625 [Thread-27] com.mesosphere.sdk.scheduler.AbstractScheduler:executorLost(371): Lost Executor: kube-node__d97d7f0a-f30f-4244-8f56-69e09960bf5c on Agent: 66df60ae-ce98-4ce6-968a-f99922382ef3-S2 | |
INFO 2017-12-06 17:44:13,639 [Thread-28] com.mesosphere.mesos.HTTPAdapter.MesosToSchedulerDriverAdapter:received(181): Received event of type: OFFERS | |
INFO 2017-12-06 17:44:13,639 [Thread-28] com.mesosphere.sdk.scheduler.AbstractScheduler:resourceOffers(293): Enqueuing 1 offer. Updated offers in progress: [66df60ae-ce98-4ce6-968a-f99922382ef3-O836, 66df60ae-ce98-4ce6-968a-f99922382ef3-O837, 66df60ae-ce98-4ce6-968a-f99922382ef3-O838, 66df60ae-ce98-4ce6-968a-f99922382ef3-O839] | |
INFO 2017-12-06 17:44:13,639 [pool-7-thread-1] com.mesosphere.sdk.reconciliation.DefaultReconciler:reconcile(110): Triggering implicit final reconciliation of all tasks | |
INFO 2017-12-06 17:44:13,642 [pool-7-thread-1] com.mesosphere.sdk.scheduler.plan.DefaultPlanCoordinator:getCandidates(44): Initial dirtied assets: [] | |
INFO 2017-12-06 17:44:13,642 [pool-7-thread-1] com.mesosphere.sdk.scheduler.plan.DefaultPlanCoordinator:getRelevantDirtyAssets(91): Input dirty assets: [] | |
INFO 2017-12-06 17:44:13,642 [pool-7-thread-1] com.mesosphere.sdk.scheduler.plan.DefaultPlanCoordinator:getRelevantDirtyAssets(92): Plan's dirty assets: [] | |
INFO 2017-12-06 17:44:13,642 [Thread-29] com.mesosphere.mesos.HTTPAdapter.MesosToSchedulerDriverAdapter:received(181): Received event of type: UPDATE | |
INFO 2017-12-06 17:44:13,643 [Thread-29] com.mesosphere.sdk.scheduler.AbstractScheduler:statusUpdate(321): Received status update for taskId=kube-node-0-kubelet__327e0783-0f22-4522-9708-af50471a51d6 state=TASK_RUNNING message=Reconciliation: Latest task state protobuf=task_id { value: "kube-node-0-kubelet__327e0783-0f22-4522-9708-af50471a51d6" } state: TASK_RUNNING message: "Reconciliation: Latest task state" slave_id { value: "66df60ae-ce98-4ce6-968a-f99922382ef3-S3" } timestamp: 1.5125822536359181E9 healthy: true source: SOURCE_MASTER reason: REASON_RECONCILIATION container_status { network_infos { ip_addresses { ip_address: "10.138.0.6" } } executor_pid: 6311 container_id { value: "ba9f0546-2aca-44a0-acc0-80719d641c86" parent { value: "a2a62cfb-373c-4512-b553-9215eaec5571" } } } | |
INFO 2017-12-06 17:44:13,643 [pool-7-thread-1] com.mesosphere.sdk.scheduler.plan.DefaultPlanCoordinator:getCandidates(56): Getting candidates for plan: 'deploy' with relevant dirtied assets: []. | |
INFO 2017-12-06 17:44:13,643 [pool-7-thread-1] com.mesosphere.sdk.kubernetes.scheduler.plan.KubernetesUpgradeStrategy:getCandidates(49): Selected Kubernetes upgrade candidates, [] | |
INFO 2017-12-06 17:44:13,644 [pool-7-thread-1] com.mesosphere.sdk.scheduler.plan.DefaultPlanCoordinator:getCandidates(61): Got candidates: [], from plan: deploy | |
INFO 2017-12-06 17:44:13,644 [Thread-29] com.mesosphere.sdk.state.StateStore:storeStatus(192): Storing status 'TASK_RUNNING' for 'kube-node-0-kubelet' in 'Tasks/kube-node-0-kubelet/TaskStatus' | |
INFO 2017-12-06 17:44:13,644 [pool-7-thread-1] com.mesosphere.sdk.scheduler.plan.DefaultPlanCoordinator:getCandidates(72): Updated dirtied assets: [] | |
INFO 2017-12-06 17:44:13,645 [pool-7-thread-1] com.mesosphere.sdk.scheduler.plan.DefaultPlanCoordinator:getRelevantDirtyAssets(91): Input dirty assets: [] | |
INFO 2017-12-06 17:44:13,645 [pool-7-thread-1] com.mesosphere.sdk.scheduler.plan.DefaultPlanCoordinator:getRelevantDirtyAssets(92): Plan's dirty assets: [] | |
INFO 2017-12-06 17:44:13,645 [pool-7-thread-1] com.mesosphere.sdk.scheduler.plan.DefaultPlanCoordinator:getCandidates(56): Getting candidates for plan: 'recovery' with relevant dirtied assets: []. | |
INFO 2017-12-06 17:44:13,720 [pool-7-thread-1] com.mesosphere.sdk.scheduler.recovery.DefaultRecoveryPlanManager:updatePlan(115): Dirty assets for recovery plan consideration: [] | |
INFO 2017-12-06 17:44:13,725 [pool-7-thread-1] com.mesosphere.sdk.scheduler.recovery.DefaultRecoveryPlanManager:getRecoveryRequirements(186): Found tasks needing recovery: [] | |
WARN 2017-12-06 17:44:13,727 [pool-7-thread-1] com.mesosphere.sdk.state.StateStore:fetchStatus(349): No TaskStatus found for the requested name: etcd-0-recover at: Tasks/etcd-0-recover/TaskStatus | |
WARN 2017-12-06 17:44:13,727 [pool-7-thread-1] com.mesosphere.sdk.state.StateStore:fetchStatus(349): No TaskStatus found for the requested name: etcd-1-recover at: Tasks/etcd-1-recover/TaskStatus | |
WARN 2017-12-06 17:44:13,728 [pool-7-thread-1] com.mesosphere.sdk.state.StateStore:fetchStatus(349): No TaskStatus found for the requested name: etcd-2-recover at: Tasks/etcd-2-recover/TaskStatus | |
WARN 2017-12-06 17:44:13,728 [pool-7-thread-1] com.mesosphere.sdk.state.StateStore:fetchStatus(349): No TaskStatus found for the requested name: kube-node-0-commission at: Tasks/kube-node-0-commission/TaskStatus | |
WARN 2017-12-06 17:44:13,728 [pool-7-thread-1] com.mesosphere.sdk.state.StateStore:fetchStatus(349): No TaskStatus found for the requested name: kube-node-0-decommission at: Tasks/kube-node-0-decommission/TaskStatus | |
WARN 2017-12-06 17:44:13,728 [pool-7-thread-1] com.mesosphere.sdk.state.StateStore:fetchStatus(349): No TaskStatus found for the requested name: kube-node-1-commission at: Tasks/kube-node-1-commission/TaskStatus | |
WARN 2017-12-06 17:44:13,728 [pool-7-thread-1] com.mesosphere.sdk.state.StateStore:fetchStatus(349): No TaskStatus found for the requested name: kube-node-1-decommission at: Tasks/kube-node-1-decommission/TaskStatus | |
WARN 2017-12-06 17:44:13,728 [pool-7-thread-1] com.mesosphere.sdk.state.StateStore:fetchStatus(349): No TaskStatus found for the requested name: kube-node-1-kube-proxy at: Tasks/kube-node-1-kube-proxy/TaskStatus | |
WARN 2017-12-06 17:44:13,728 [pool-7-thread-1] com.mesosphere.sdk.state.StateStore:fetchStatus(349): No TaskStatus found for the requested name: kube-node-1-kubelet at: Tasks/kube-node-1-kubelet/TaskStatus | |
INFO 2017-12-06 17:44:13,730 [pool-7-thread-1] com.mesosphere.sdk.scheduler.recovery.DefaultRecoveryPlanManager:getRecoveryRequirements(202): Found pods needing recovery: [] | |
INFO 2017-12-06 17:44:13,731 [pool-7-thread-1] com.mesosphere.sdk.scheduler.recovery.DefaultRecoveryPlanManager:getRecoveryRequirements(211): Found recoveries already in progress: [] | |
INFO 2017-12-06 17:44:13,732 [pool-7-thread-1] com.mesosphere.sdk.scheduler.recovery.DefaultRecoveryPlanManager:getRecoveryRequirements(216): New pods needing recovery: [] | |
INFO 2017-12-06 17:44:13,733 [pool-7-thread-1] com.mesosphere.sdk.scheduler.recovery.DefaultRecoveryPlanManager:setPlan(82): Recovery plan set to: [] | |
INFO 2017-12-06 17:44:13,734 [pool-7-thread-1] com.mesosphere.sdk.scheduler.plan.DefaultPlanCoordinator:getCandidates(61): Got candidates: [], from plan: recovery | |
INFO 2017-12-06 17:44:13,734 [pool-7-thread-1] com.mesosphere.sdk.scheduler.plan.DefaultPlanCoordinator:getCandidates(72): Updated dirtied assets: [] | |
INFO 2017-12-06 17:44:13,734 [pool-7-thread-1] com.mesosphere.sdk.scheduler.plan.DefaultPlanCoordinator:getRelevantDirtyAssets(91): Input dirty assets: [] | |
INFO 2017-12-06 17:44:13,734 [pool-7-thread-1] com.mesosphere.sdk.scheduler.plan.DefaultPlanCoordinator:getRelevantDirtyAssets(92): Plan's dirty assets: [] | |
INFO 2017-12-06 17:44:13,734 [pool-7-thread-1] com.mesosphere.sdk.scheduler.plan.DefaultPlanCoordinator:getCandidates(56): Getting candidates for plan: 'decommission' with relevant dirtied assets: []. | |
INFO 2017-12-06 17:44:13,734 [pool-7-thread-1] com.mesosphere.sdk.scheduler.plan.DefaultPlanCoordinator:getCandidates(61): Got candidates: [], from plan: decommission | |
INFO 2017-12-06 17:44:13,734 [pool-7-thread-1] com.mesosphere.sdk.scheduler.plan.DefaultPlanCoordinator:getCandidates(72): Updated dirtied assets: [] | |
INFO 2017-12-06 17:44:13,734 [pool-7-thread-1] com.mesosphere.sdk.scheduler.plan.DefaultPlanCoordinator:getCandidates(49): Skipping interrupted plan: replace | |
INFO 2017-12-06 17:44:13,737 [pool-7-thread-1] com.mesosphere.sdk.scheduler.plan.DefaultPlanCoordinator:getCandidates(78): Got total candidates: [] | |
INFO 2017-12-06 17:44:13,820 [Thread-30] com.mesosphere.mesos.HTTPAdapter.MesosToSchedulerDriverAdapter:received(181): Received event of type: UPDATE | |
INFO 2017-12-06 17:44:13,821 [Thread-30] com.mesosphere.sdk.scheduler.AbstractScheduler:statusUpdate(321): Received status update for taskId=kube-node-0-kube-proxy__1dfecc56-e98b-4a62-bbfc-22bf587e0e82 state=TASK_RUNNING message=Reconciliation: Latest task state protobuf=task_id { value: "kube-node-0-kube-proxy__1dfecc56-e98b-4a62-bbfc-22bf587e0e82" } state: TASK_RUNNING message: "Reconciliation: Latest task state" slave_id { value: "66df60ae-ce98-4ce6-968a-f99922382ef3-S3" } timestamp: 1.512582253636089E9 source: SOURCE_MASTER reason: REASON_RECONCILIATION container_status { network_infos { ip_addresses { ip_address: "10.138.0.6" } } executor_pid: 6305 container_id { value: "db872c0d-fd27-479a-9c27-786b1fa0fffb" parent { value: "a2a62cfb-373c-4512-b553-9215eaec5571" } } } | |
INFO 2017-12-06 17:44:13,821 [pool-7-thread-1] com.mesosphere.sdk.scheduler.AbstractScheduler:processQueuedOffers(412): InProgress Steps: [] | |
INFO 2017-12-06 17:44:13,822 [pool-7-thread-1] com.mesosphere.sdk.scheduler.ReviveManager:revive(74): Candidates, old: [], current: [], new:[] | |
INFO 2017-12-06 17:44:13,822 [pool-7-thread-1] com.mesosphere.sdk.scheduler.AbstractScheduler:processQueuedOffers(420): Processing 1 offer against 0 steps: | |
INFO 2017-12-06 17:44:13,822 [Thread-30] com.mesosphere.sdk.state.StateStore:storeStatus(192): Storing status 'TASK_RUNNING' for 'kube-node-0-kube-proxy' in 'Tasks/kube-node-0-kube-proxy/TaskStatus' | |
INFO 2017-12-06 17:44:13,834 [Thread-31] com.mesosphere.mesos.HTTPAdapter.MesosToSchedulerDriverAdapter:received(181): Received event of type: UPDATE | |
INFO 2017-12-06 17:44:13,835 [Thread-31] com.mesosphere.sdk.scheduler.AbstractScheduler:statusUpdate(321): Received status update for taskId=kube-scheduler-1-instance__cc6ba12e-be6e-42ef-b944-1a495c3a70af state=TASK_RUNNING message=Reconciliation: Latest task state protobuf=task_id { value: "kube-scheduler-1-instance__cc6ba12e-be6e-42ef-b944-1a495c3a70af" } state: TASK_RUNNING message: "Reconciliation: Latest task state" slave_id { value: "66df60ae-ce98-4ce6-968a-f99922382ef3-S2" } timestamp: 1.512582253636112E9 healthy: true source: SOURCE_MASTER reason: REASON_RECONCILIATION container_status { network_infos { ip_addresses { ip_address: "10.138.0.4" } } executor_pid: 1663 container_id { value: "1a22adb4-c8ae-4e40-a295-836485ffe77f" parent { value: "64606b02-c479-4c00-87d4-4530acba82c1" } } } | |
INFO 2017-12-06 17:44:13,837 [Thread-31] com.mesosphere.sdk.state.StateStore:storeStatus(192): Storing status 'TASK_RUNNING' for 'kube-scheduler-1-instance' in 'Tasks/kube-scheduler-1-instance/TaskStatus' | |
INFO 2017-12-06 17:44:13,927 [Thread-32] com.mesosphere.mesos.HTTPAdapter.MesosToSchedulerDriverAdapter:received(181): Received event of type: UPDATE | |
INFO 2017-12-06 17:44:13,927 [Thread-32] com.mesosphere.sdk.scheduler.AbstractScheduler:statusUpdate(321): Received status update for taskId=kube-scheduler-2-instance__608d6c56-6941-461a-90c4-bddd197804cb state=TASK_RUNNING message=Reconciliation: Latest task state protobuf=task_id { value: "kube-scheduler-2-instance__608d6c56-6941-461a-90c4-bddd197804cb" } state: TASK_RUNNING message: "Reconciliation: Latest task state" slave_id { value: "66df60ae-ce98-4ce6-968a-f99922382ef3-S0" } timestamp: 1.512582253636128E9 healthy: true source: SOURCE_MASTER reason: REASON_RECONCILIATION container_status { network_infos { ip_addresses { ip_address: "10.138.0.5" } } executor_pid: 5726 container_id { value: "f0140a1a-442e-4c7f-a660-1e1ee8dd1cd9" parent { value: "6cc25e27-5ebc-42b5-b2eb-8b8841bc45b5" } } } | |
INFO 2017-12-06 17:44:13,929 [Thread-32] com.mesosphere.sdk.state.StateStore:storeStatus(192): Storing status 'TASK_RUNNING' for 'kube-scheduler-2-instance' in 'Tasks/kube-scheduler-2-instance/TaskStatus' | |
INFO 2017-12-06 17:44:13,940 [Thread-33] com.mesosphere.mesos.HTTPAdapter.MesosToSchedulerDriverAdapter:received(181): Received event of type: UPDATE | |
INFO 2017-12-06 17:44:13,940 [Thread-33] com.mesosphere.sdk.scheduler.AbstractScheduler:statusUpdate(321): Received status update for taskId=etcd-2-peer__80494724-551b-43fa-85e3-6ca4f60938df state=TASK_RUNNING message=Reconciliation: Latest task state protobuf=task_id { value: "etcd-2-peer__80494724-551b-43fa-85e3-6ca4f60938df" } state: TASK_RUNNING message: "Reconciliation: Latest task state" slave_id { value: "66df60ae-ce98-4ce6-968a-f99922382ef3-S0" } timestamp: 1.512582253636149E9 healthy: true source: SOURCE_MASTER reason: REASON_RECONCILIATION container_status { network_infos { ip_addresses { ip_address: "10.138.0.5" } } executor_pid: 4804 container_id { value: "f2ff97ef-1bbc-4b14-a7d0-f56cc7a3af69" parent { value: "9dbc6f23-d413-4671-8f4c-d19576601315" } } } check_status { type: COMMAND command { exit_code: 0 } } | |
INFO 2017-12-06 17:44:14,021 [Thread-33] com.mesosphere.sdk.state.StateStore:storeStatus(192): Storing status 'TASK_RUNNING' for 'etcd-2-peer' in 'Tasks/etcd-2-peer/TaskStatus' | |
INFO 2017-12-06 17:44:14,027 [pool-7-thread-1] com.mesosphere.sdk.scheduler.AbstractScheduler:processQueuedOffers(424): 1: id { value: "66df60ae-ce98-4ce6-968a-f99922382ef3-O839" } framework_id { value: "66df60ae-ce98-4ce6-968a-f99922382ef3-0006" } slave_id { value: "66df60ae-ce98-4ce6-968a-f99922382ef3-S2" } hostname: "10.138.0.4" resources { name: "ports" type: RANGES ranges { range { begin: 1025 end: 2180 } range { begin: 2182 end: 2378 } range { begin: 2381 end: 3887 } range { begin: 3889 end: 5049 } range { begin: 5052 end: 6442 } range { begin: 6444 end: 8079 } range { begin: 8082 end: 8180 } range { begin: 8182 end: 8999 } range { begin: 9001 end: 32000 } } allocation_info { role: "kubernetes-role" } } resources { name: "disk" type: SCALAR scalar { value: 29055.0 } allocation_info { role: "kubernetes-role" } } resources { name: "cpus" type: SCALAR scalar { value: 2.1 } allocation_info { role: "kubernetes-role" } } resources { name: "mem" type: SCALAR scalar { value: 21199.0 } allocation_info { role: "kubernetes-role" } } resources { name: "mem" type: SCALAR scalar { value: 4100.0 } allocation_info { role: "kubernetes-role" } reservations { principal: "kubernetes" labels { labels { key: "resource_id" value: "ec6f745e-235b-4fbc-96b0-54964523a5c3" } } role: "kubernetes-role" type: DYNAMIC } } resources { name: "cpus" type: SCALAR scalar { value: 3.0 } allocation_info { role: "kubernetes-role" } reservations { principal: "kubernetes" labels { labels { key: "resource_id" value: "3223fdf1-636a-4af0-a2ac-8de797e2e54e" } } role: "kubernetes-role" type: DYNAMIC } } resources { name: "cpus" type: SCALAR scalar { value: 0.1 } allocation_info { role: "kubernetes-role" } reservations { principal: "kubernetes" labels { labels { key: "resource_id" value: "43c79bc9-ab10-4ce8-9b07-218b43e06f05" } } role: "kubernetes-role" type: DYNAMIC } } resources { name: "mem" type: SCALAR scalar { value: 32.0 } allocation_info { role: "kubernetes-role" } reservations { principal: "kubernetes" labels { labels { key: "resource_id" value: "fb158714-f4a9-4297-9cb1-2f8adf694c36" } } role: "kubernetes-role" type: DYNAMIC } } resources { name: "cpus" type: SCALAR scalar { value: 0.1 } allocation_info { role: "kubernetes-role" } reservations { principal: "kubernetes" labels { labels { key: "resource_id" value: "c4b24205-811f-4abd-b2a8-fbfdcb6aa2a0" } } role: "kubernetes-role" type: DYNAMIC } } resources { name: "mem" type: SCALAR scalar { value: 32.0 } allocation_info { role: "kubernetes-role" } reservations { principal: "kubernetes" labels { labels { key: "resource_id" value: "a6c5eb4d-cf27-4abd-a321-2c09e70983d9" } } role: "kubernetes-role" type: DYNAMIC } } resources { name: "disk" type: SCALAR scalar { value: 256.0 } allocation_info { role: "kubernetes-role" } reservations { principal: "kubernetes" labels { labels { key: "resource_id" value: "b9d8e71e-e8dd-4f9a-a18e-e305c7dd1b05" } } role: "kubernetes-role" type: DYNAMIC } } resources { name: "cpus" type: SCALAR scalar { value: 0.1 } allocation_info { role: "kubernetes-role" } reservations { principal: "kubernetes" labels { labels { key: "resource_id" value: "23708e59-79c3-4b82-9f72-a49d8a0ece6b" } } role: "kubernetes-role" type: DYNAMIC } } resources { name: "mem" type: SCALAR scalar { value: 32.0 } allocation_info { role: "kubernetes-role" } reservations { principal: "kubernetes" labels { labels { key: "resource_id" value: "0b28c1c5-60f3-4d39-9af3-1dc0648eb0f2" } } role: "kubernetes-role" type: DYNAMIC } } resources { name: "cpus" type: SCALAR scalar { value: 0.1 } allocation_info { role: "kubernetes-role" } reservations { principal: "kubernetes" labels { labels { key: "resource_id" value: "91ef5455-ae5b-43e5-ae5a-75dcb5835113" } } role: "kubernetes-role" type: DYNAMIC } } resources { name: "mem" type: SCALAR scalar { value: 32.0 } allocation_info { role: "kubernetes-role" } reservations { principal: "kubernetes" labels { labels { key: "resource_id" value: "ad656fed-70e1-4ca5-9979-967c319b73fe" } } role: "kubernetes-role" type: DYNAMIC } } resources { name: "disk" type: SCALAR scalar { value: 1024.0 } disk { persistence { id: "42d6e9ad-ed01-44c3-a1e7-ba324ed2a729" principal: "kubernetes" } volume { container_path: "var" mode: RW } } allocation_info { role: "kubernetes-role" } reservations { principal: "kubernetes" labels { labels { key: "resource_id" value: "b1e8146b-ca3d-4637-adf1-3a36d0767bf0" } } role: "kubernetes-role" type: DYNAMIC } } resources { name: "cpus" type: SCALAR scalar { value: 0.1 } allocation_info { role: "kubernetes-role" } reservations { principal: "kubernetes" labels { labels { key: "resource_id" value: "60d9d1f7-16c1-4ff4-90c9-b1d10ba716b6" } } role: "kubernetes-role" type: DYNAMIC } } resources { name: "mem" type: SCALAR scalar { value: 512.0 } allocation_info { role: "kubernetes-role" } reservations { principal: "kubernetes" labels { labels { key: "resource_id" value: "fe3c1a26-0912-4e88-8978-2fedeb04fcfb" } } role: "kubernetes-role" type: DYNAMIC } } executor_ids { value: "kube-scheduler__b34a7a05-3499-4f90-986e-e8ed055e90ab" } executor_ids { value: "kube-controller-manager__f54d9622-647e-45e9-bfd6-a9ec36182e60" } executor_ids { value: "kube-apiserver__31e2cf2a-936f-4091-ac67-db546a367c66" } executor_ids { value: "etcd__cc9cc502-353b-4068-8046-e3dfc74bd1be" } url { scheme: "http" address { hostname: "10.138.0.4" ip: "10.138.0.4" port: 5051 } path: "/slave(1)" } allocation_info { role: "kubernetes-role" } | |
INFO 2017-12-06 17:44:14,033 [Thread-34] com.mesosphere.mesos.HTTPAdapter.MesosToSchedulerDriverAdapter:received(181): Received event of type: UPDATE | |
INFO 2017-12-06 17:44:14,033 [Thread-34] com.mesosphere.sdk.scheduler.AbstractScheduler:statusUpdate(321): Received status update for taskId=etcd-0-peer__e29bb397-b2a7-4aa4-a5b2-ce51951e2c16 state=TASK_RUNNING message=Reconciliation: Latest task state protobuf=task_id { value: "etcd-0-peer__e29bb397-b2a7-4aa4-a5b2-ce51951e2c16" } state: TASK_RUNNING message: "Reconciliation: Latest task state" slave_id { value: "66df60ae-ce98-4ce6-968a-f99922382ef3-S3" } timestamp: 1.512582253636173E9 healthy: true source: SOURCE_MASTER reason: REASON_RECONCILIATION container_status { network_infos { ip_addresses { ip_address: "10.138.0.6" } } executor_pid: 4897 container_id { value: "e3d913e0-de2e-4379-a3e7-f3c907b78f70" parent { value: "65b5a11c-c564-46b7-8f02-d87cf1564220" } } } check_status { type: COMMAND command { exit_code: 0 } } | |
INFO 2017-12-06 17:44:14,036 [Thread-34] com.mesosphere.sdk.state.StateStore:storeStatus(192): Storing status 'TASK_RUNNING' for 'etcd-0-peer' in 'Tasks/etcd-0-peer/TaskStatus' | |
INFO 2017-12-06 17:44:14,124 [qtp636866781-33] com.mesosphere.sdk.kubernetes.scheduler.plan.KubernetesUpgradeStrategy:getCandidates(49): Selected Kubernetes upgrade candidates, [] | |
INFO 2017-12-06 17:44:14,125 [qtp636866781-33] com.mesosphere.sdk.kubernetes.scheduler.plan.KubernetesUpgradeStrategy:getCandidates(49): Selected Kubernetes upgrade candidates, [] | |
INFO 2017-12-06 17:44:14,127 [Thread-35] com.mesosphere.mesos.HTTPAdapter.MesosToSchedulerDriverAdapter:received(181): Received event of type: UPDATE | |
INFO 2017-12-06 17:44:14,127 [Thread-35] com.mesosphere.sdk.scheduler.AbstractScheduler:statusUpdate(321): Received status update for taskId=kube-controller-manager-2-instance__8ab2ef51-d08f-4838-9246-ad642bb9a419 state=TASK_RUNNING message=Reconciliation: Latest task state protobuf=task_id { value: "kube-controller-manager-2-instance__8ab2ef51-d08f-4838-9246-ad642bb9a419" } state: TASK_RUNNING message: "Reconciliation: Latest task state" slave_id { value: "66df60ae-ce98-4ce6-968a-f99922382ef3-S0" } timestamp: 1.512582253636191E9 healthy: true source: SOURCE_MASTER reason: REASON_RECONCILIATION container_status { network_infos { ip_addresses { ip_address: "10.138.0.5" } } executor_pid: 5274 container_id { value: "a06913c9-e24a-4a5f-9571-cedf4cb68b5b" parent { value: "861bc702-fde8-446d-89f1-23e7f3e7b3a5" } } } check_status { type: COMMAND command { exit_code: 0 } } | |
INFO 2017-12-06 17:44:14,129 [qtp636866781-33] com.mesosphere.sdk.kubernetes.scheduler.plan.KubernetesUpgradeStrategy:getCandidates(49): Selected Kubernetes upgrade candidates, [] | |
INFO 2017-12-06 17:44:14,129 [Thread-35] com.mesosphere.sdk.state.StateStore:storeStatus(192): Storing status 'TASK_RUNNING' for 'kube-controller-manager-2-instance' in 'Tasks/kube-controller-manager-2-instance/TaskStatus' | |
INFO 2017-12-06 17:44:14,133 [pool-7-thread-1] com.mesosphere.sdk.offer.OfferUtils:declineOffers(69): Declining 1 unused offers for 1209600 seconds: | |
INFO 2017-12-06 17:44:14,133 [pool-7-thread-1] com.mesosphere.sdk.offer.OfferUtils:lambda$declineOffers$2(75): 66df60ae-ce98-4ce6-968a-f99922382ef3-O839 | |
INFO 2017-12-06 17:44:14,144 [Thread-36] com.mesosphere.mesos.HTTPAdapter.MesosToSchedulerDriverAdapter:received(181): Received event of type: UPDATE | |
INFO 2017-12-06 17:44:14,144 [Thread-36] com.mesosphere.sdk.scheduler.AbstractScheduler:statusUpdate(321): Received status update for taskId=kube-scheduler-0-instance__d8d08b78-274f-4a06-b7eb-c2586f10f6a4 state=TASK_RUNNING message=Reconciliation: Latest task state protobuf=task_id { value: "kube-scheduler-0-instance__d8d08b78-274f-4a06-b7eb-c2586f10f6a4" } state: TASK_RUNNING message: "Reconciliation: Latest task state" slave_id { value: "66df60ae-ce98-4ce6-968a-f99922382ef3-S3" } timestamp: 1.512582253636207E9 healthy: true source: SOURCE_MASTER reason: REASON_RECONCILIATION container_status { network_infos { ip_addresses { ip_address: "10.138.0.6" } } executor_pid: 5873 container_id { value: "dd8522c4-f40a-418f-91b0-db5f51e600d1" parent { value: "6601f913-b6ee-4f23-8a47-ff3476c8060c" } } } | |
INFO 2017-12-06 17:44:14,145 [pool-7-thread-1] com.mesosphere.sdk.scheduler.DefaultScheduler:processOffers(263): 1 Offer processed: | |
0 accepted by Plans: [] | |
0 accepted by Resource Cleaner: [] | |
1 declined: [66df60ae-ce98-4ce6-968a-f99922382ef3-O839] | |
INFO 2017-12-06 17:44:14,221 [Thread-36] com.mesosphere.sdk.state.StateStore:storeStatus(192): Storing status 'TASK_RUNNING' for 'kube-scheduler-0-instance' in 'Tasks/kube-scheduler-0-instance/TaskStatus' | |
INFO 2017-12-06 17:44:14,222 [pool-7-thread-1] com.mesosphere.sdk.scheduler.AbstractScheduler:processQueuedOffers(442): Processed 1 queued offer. 3 offers remain in progress: [66df60ae-ce98-4ce6-968a-f99922382ef3-O836, 66df60ae-ce98-4ce6-968a-f99922382ef3-O837, 66df60ae-ce98-4ce6-968a-f99922382ef3-O838] | |
INFO 2017-12-06 17:44:14,222 [pool-7-thread-1] com.mesosphere.sdk.scheduler.AbstractScheduler:processQueuedOffers(385): Waiting for queued offers... | |
INFO 2017-12-06 17:44:14,234 [Thread-37] com.mesosphere.mesos.HTTPAdapter.MesosToSchedulerDriverAdapter:received(181): Received event of type: UPDATE | |
INFO 2017-12-06 17:44:14,234 [Thread-37] com.mesosphere.sdk.scheduler.AbstractScheduler:statusUpdate(321): Received status update for taskId=kube-apiserver-1-instance__644e3831-335f-43bb-8775-e782ef40c75f state=TASK_RUNNING message=Reconciliation: Latest task state protobuf=task_id { value: "kube-apiserver-1-instance__644e3831-335f-43bb-8775-e782ef40c75f" } state: TASK_RUNNING message: "Reconciliation: Latest task state" slave_id { value: "66df60ae-ce98-4ce6-968a-f99922382ef3-S2" } timestamp: 1.512582253636223E9 healthy: true source: SOURCE_MASTER reason: REASON_RECONCILIATION container_status { network_infos { ip_addresses { ip_address: "10.138.0.4" } } executor_pid: 974 container_id { value: "fd6a54dc-e309-4638-9050-9667ab487650" parent { value: "8bdaf9f8-f468-4f0d-87b0-1a71a30a775f" } } } | |
INFO 2017-12-06 17:44:14,236 [Thread-37] com.mesosphere.sdk.state.StateStore:storeStatus(192): Storing status 'TASK_RUNNING' for 'kube-apiserver-1-instance' in 'Tasks/kube-apiserver-1-instance/TaskStatus' | |
INFO 2017-12-06 17:44:14,248 [Thread-38] com.mesosphere.mesos.HTTPAdapter.MesosToSchedulerDriverAdapter:received(181): Received event of type: UPDATE | |
INFO 2017-12-06 17:44:14,248 [Thread-38] com.mesosphere.sdk.scheduler.AbstractScheduler:statusUpdate(321): Received status update for taskId=kube-apiserver-0-instance__d4419299-e8dd-4e59-821c-fd3deead0554 state=TASK_RUNNING message=Reconciliation: Latest task state protobuf=task_id { value: "kube-apiserver-0-instance__d4419299-e8dd-4e59-821c-fd3deead0554" } state: TASK_RUNNING message: "Reconciliation: Latest task state" slave_id { value: "66df60ae-ce98-4ce6-968a-f99922382ef3-S3" } timestamp: 1.512582253636257E9 healthy: true source: SOURCE_MASTER reason: REASON_RECONCILIATION container_status { network_infos { ip_addresses { ip_address: "10.138.0.6" } } executor_pid: 5203 container_id { value: "8d248150-4f40-4b61-9734-c509e3aa4d98" parent { value: "11d019b2-872d-4198-a864-37d3b5cbbd46" } } } | |
INFO 2017-12-06 17:44:14,321 [Thread-38] com.mesosphere.sdk.state.StateStore:storeStatus(192): Storing status 'TASK_RUNNING' for 'kube-apiserver-0-instance' in 'Tasks/kube-apiserver-0-instance/TaskStatus' | |
INFO 2017-12-06 17:44:14,333 [Thread-39] com.mesosphere.mesos.HTTPAdapter.MesosToSchedulerDriverAdapter:received(181): Received event of type: UPDATE | |
INFO 2017-12-06 17:44:14,334 [Thread-39] com.mesosphere.sdk.scheduler.AbstractScheduler:statusUpdate(321): Received status update for taskId=kube-apiserver-2-instance__7d469297-d3b3-4324-9df8-28c3479dc744 state=TASK_RUNNING message=Reconciliation: Latest task state protobuf=task_id { value: "kube-apiserver-2-instance__7d469297-d3b3-4324-9df8-28c3479dc744" } state: TASK_RUNNING message: "Reconciliation: Latest task state" slave_id { value: "66df60ae-ce98-4ce6-968a-f99922382ef3-S0" } timestamp: 1.512582253636275E9 healthy: true source: SOURCE_MASTER reason: REASON_RECONCILIATION container_status { network_infos { ip_addresses { ip_address: "10.138.0.5" } } executor_pid: 5100 container_id { value: "c983e770-2baf-4df1-87d1-a07d94306ddc" parent { value: "ab23bcc1-5825-4b45-b3e2-90f098a5e20a" } } } | |
INFO 2017-12-06 17:44:14,335 [Thread-39] com.mesosphere.sdk.state.StateStore:storeStatus(192): Storing status 'TASK_RUNNING' for 'kube-apiserver-2-instance' in 'Tasks/kube-apiserver-2-instance/TaskStatus' | |
INFO 2017-12-06 17:44:14,434 [Thread-40] com.mesosphere.mesos.HTTPAdapter.MesosToSchedulerDriverAdapter:received(181): Received event of type: UPDATE | |
INFO 2017-12-06 17:44:14,435 [Thread-40] com.mesosphere.sdk.scheduler.AbstractScheduler:statusUpdate(321): Received status update for taskId=etcd-1-peer__2c9e875c-f09d-4682-961a-0fe37f9bd9ae state=TASK_RUNNING message=Reconciliation: Latest task state protobuf=task_id { value: "etcd-1-peer__2c9e875c-f09d-4682-961a-0fe37f9bd9ae" } state: TASK_RUNNING message: "Reconciliation: Latest task state" slave_id { value: "66df60ae-ce98-4ce6-968a-f99922382ef3-S2" } timestamp: 1.512582253636291E9 healthy: true source: SOURCE_MASTER reason: REASON_RECONCILIATION container_status { network_infos { ip_addresses { ip_address: "10.138.0.4" } } executor_pid: 651 container_id { value: "f511f139-cde9-4f7c-9f24-8de846d0dab9" parent { value: "acf28ef7-73f1-45b0-a5aa-aabf1175ec74" } } } check_status { type: COMMAND command { exit_code: 0 } } | |
INFO 2017-12-06 17:44:14,436 [Thread-40] com.mesosphere.sdk.state.StateStore:storeStatus(192): Storing status 'TASK_RUNNING' for 'etcd-1-peer' in 'Tasks/etcd-1-peer/TaskStatus' | |
INFO 2017-12-06 17:44:14,527 [Thread-41] com.mesosphere.mesos.HTTPAdapter.MesosToSchedulerDriverAdapter:received(181): Received event of type: UPDATE | |
INFO 2017-12-06 17:44:14,528 [Thread-41] com.mesosphere.sdk.scheduler.AbstractScheduler:statusUpdate(321): Received status update for taskId=kube-controller-manager-1-instance__2511d189-757d-417d-a34c-b25e5e4b1e11 state=TASK_RUNNING message=Reconciliation: Latest task state protobuf=task_id { value: "kube-controller-manager-1-instance__2511d189-757d-417d-a34c-b25e5e4b1e11" } state: TASK_RUNNING message: "Reconciliation: Latest task state" slave_id { value: "66df60ae-ce98-4ce6-968a-f99922382ef3-S3" } timestamp: 1.512582253636309E9 healthy: true source: SOURCE_MASTER reason: REASON_RECONCILIATION container_status { network_infos { ip_addresses { ip_address: "10.138.0.6" } } executor_pid: 5379 container_id { value: "b545281a-c68f-4433-b422-e56545dbd15e" parent { value: "ca14c9cc-5f24-4956-be43-c4ce7590b434" } } } check_status { type: COMMAND command { exit_code: 0 } } | |
INFO 2017-12-06 17:44:14,529 [Thread-41] com.mesosphere.sdk.state.StateStore:storeStatus(192): Storing status 'TASK_RUNNING' for 'kube-controller-manager-1-instance' in 'Tasks/kube-controller-manager-1-instance/TaskStatus' | |
INFO 2017-12-06 17:44:14,541 [Thread-42] com.mesosphere.mesos.HTTPAdapter.MesosToSchedulerDriverAdapter:received(181): Received event of type: UPDATE | |
INFO 2017-12-06 17:44:14,541 [Thread-42] com.mesosphere.sdk.scheduler.AbstractScheduler:statusUpdate(321): Received status update for taskId=kube-controller-manager-0-instance__cf817499-703f-40e2-8933-97258babe1ba state=TASK_RUNNING message=Reconciliation: Latest task state protobuf=task_id { value: "kube-controller-manager-0-instance__cf817499-703f-40e2-8933-97258babe1ba" } state: TASK_RUNNING message: "Reconciliation: Latest task state" slave_id { value: "66df60ae-ce98-4ce6-968a-f99922382ef3-S2" } timestamp: 1.512582253636327E9 healthy: true source: SOURCE_MASTER reason: REASON_RECONCILIATION container_status { network_infos { ip_addresses { ip_address: "10.138.0.4" } } executor_pid: 1163 container_id { value: "15b4b621-d8e8-4607-901f-9b6a1132fda3" parent { value: "4d4fefa1-91db-4d43-8f5d-0a6ffd029054" } } } check_status { type: COMMAND command { exit_code: 0 } } | |
INFO 2017-12-06 17:44:14,543 [Thread-42] com.mesosphere.sdk.state.StateStore:storeStatus(192): Storing status 'TASK_RUNNING' for 'kube-controller-manager-0-instance' in 'Tasks/kube-controller-manager-0-instance/TaskStatus' | |
INFO 2017-12-06 17:44:15,032 [qtp636866781-32] com.mesosphere.sdk.kubernetes.scheduler.plan.KubernetesUpgradeStrategy:getCandidates(49): Selected Kubernetes upgrade candidates, [] | |
INFO 2017-12-06 17:44:15,033 [qtp636866781-32] com.mesosphere.sdk.kubernetes.scheduler.plan.KubernetesUpgradeStrategy:getCandidates(49): Selected Kubernetes upgrade candidates, [] | |
INFO 2017-12-06 17:44:15,034 [qtp636866781-32] com.mesosphere.sdk.kubernetes.scheduler.plan.KubernetesUpgradeStrategy:getCandidates(49): Selected Kubernetes upgrade candidates, [] | |
INFO 2017-12-06 17:44:15,814 [qtp636866781-33] com.mesosphere.sdk.kubernetes.scheduler.plan.KubernetesUpgradeStrategy:getCandidates(49): Selected Kubernetes upgrade candidates, [] | |
INFO 2017-12-06 17:44:15,814 [qtp636866781-33] com.mesosphere.sdk.kubernetes.scheduler.plan.KubernetesUpgradeStrategy:getCandidates(49): Selected Kubernetes upgrade candidates, [] | |
INFO 2017-12-06 17:44:15,816 [qtp636866781-33] com.mesosphere.sdk.kubernetes.scheduler.plan.KubernetesUpgradeStrategy:getCandidates(49): Selected Kubernetes upgrade candidates, [] | |
INFO 2017-12-06 17:44:18,650 [Thread-43] com.mesosphere.mesos.HTTPAdapter.MesosToSchedulerDriverAdapter:received(181): Received event of type: OFFERS | |
INFO 2017-12-06 17:44:18,650 [Thread-43] com.mesosphere.sdk.scheduler.AbstractScheduler:resourceOffers(293): Enqueuing 2 offers. Updated offers in progress: [66df60ae-ce98-4ce6-968a-f99922382ef3-O840, 66df60ae-ce98-4ce6-968a-f99922382ef3-O841, 66df60ae-ce98-4ce6-968a-f99922382ef3-O836, 66df60ae-ce98-4ce6-968a-f99922382ef3-O837, 66df60ae-ce98-4ce6-968a-f99922382ef3-O838] | |
INFO 2017-12-06 17:44:18,651 [pool-7-thread-1] com.mesosphere.sdk.scheduler.plan.DefaultPlanCoordinator:getCandidates(44): Initial dirtied assets: [] | |
INFO 2017-12-06 17:44:18,651 [pool-7-thread-1] com.mesosphere.sdk.scheduler.plan.DefaultPlanCoordinator:getRelevantDirtyAssets(91): Input dirty assets: [] | |
INFO 2017-12-06 17:44:18,651 [pool-7-thread-1] com.mesosphere.sdk.scheduler.plan.DefaultPlanCoordinator:getRelevantDirtyAssets(92): Plan's dirty assets: [] | |
INFO 2017-12-06 17:44:18,651 [pool-7-thread-1] com.mesosphere.sdk.scheduler.plan.DefaultPlanCoordinator:getCandidates(56): Getting candidates for plan: 'deploy' with relevant dirtied assets: []. | |
INFO 2017-12-06 17:44:18,651 [pool-7-thread-1] com.mesosphere.sdk.kubernetes.scheduler.plan.KubernetesUpgradeStrategy:getCandidates(49): Selected Kubernetes upgrade candidates, [] | |
INFO 2017-12-06 17:44:18,651 [pool-7-thread-1] com.mesosphere.sdk.scheduler.plan.DefaultPlanCoordinator:getCandidates(61): Got candidates: [], from plan: deploy | |
INFO 2017-12-06 17:44:18,652 [pool-7-thread-1] com.mesosphere.sdk.scheduler.plan.DefaultPlanCoordinator:getCandidates(72): Updated dirtied assets: [] | |
INFO 2017-12-06 17:44:18,652 [pool-7-thread-1] com.mesosphere.sdk.scheduler.plan.DefaultPlanCoordinator:getRelevantDirtyAssets(91): Input dirty assets: [] | |
INFO 2017-12-06 17:44:18,652 [pool-7-thread-1] com.mesosphere.sdk.scheduler.plan.DefaultPlanCoordinator:getRelevantDirtyAssets(92): Plan's dirty assets: [] | |
INFO 2017-12-06 17:44:18,652 [pool-7-thread-1] com.mesosphere.sdk.scheduler.plan.DefaultPlanCoordinator:getCandidates(56): Getting candidates for plan: 'recovery' with relevant dirtied assets: []. | |
INFO 2017-12-06 17:44:18,652 [pool-7-thread-1] com.mesosphere.sdk.scheduler.recovery.DefaultRecoveryPlanManager:updatePlan(115): Dirty assets for recovery plan consideration: [] | |
INFO 2017-12-06 17:44:18,655 [pool-7-thread-1] com.mesosphere.sdk.scheduler.recovery.DefaultRecoveryPlanManager:getRecoveryRequirements(186): Found tasks needing recovery: [] | |
WARN 2017-12-06 17:44:18,657 [pool-7-thread-1] com.mesosphere.sdk.state.StateStore:fetchStatus(349): No TaskStatus found for the requested name: etcd-0-recover at: Tasks/etcd-0-recover/TaskStatus | |
WARN 2017-12-06 17:44:18,657 [pool-7-thread-1] com.mesosphere.sdk.state.StateStore:fetchStatus(349): No TaskStatus found for the requested name: etcd-1-recover at: Tasks/etcd-1-recover/TaskStatus | |
WARN 2017-12-06 17:44:18,657 [pool-7-thread-1] com.mesosphere.sdk.state.StateStore:fetchStatus(349): No TaskStatus found for the requested name: etcd-2-recover at: Tasks/etcd-2-recover/TaskStatus | |
WARN 2017-12-06 17:44:18,657 [pool-7-thread-1] com.mesosphere.sdk.state.StateStore:fetchStatus(349): No TaskStatus found for the requested name: kube-node-0-commission at: Tasks/kube-node-0-commission/TaskStatus | |
WARN 2017-12-06 17:44:18,657 [pool-7-thread-1] com.mesosphere.sdk.state.StateStore:fetchStatus(349): No TaskStatus found for the requested name: kube-node-0-decommission at: Tasks/kube-node-0-decommission/TaskStatus | |
WARN 2017-12-06 17:44:18,657 [pool-7-thread-1] com.mesosphere.sdk.state.StateStore:fetchStatus(349): No TaskStatus found for the requested name: kube-node-1-commission at: Tasks/kube-node-1-commission/TaskStatus | |
WARN 2017-12-06 17:44:18,658 [pool-7-thread-1] com.mesosphere.sdk.state.StateStore:fetchStatus(349): No TaskStatus found for the requested name: kube-node-1-decommission at: Tasks/kube-node-1-decommission/TaskStatus | |
WARN 2017-12-06 17:44:18,658 [pool-7-thread-1] com.mesosphere.sdk.state.StateStore:fetchStatus(349): No TaskStatus found for the requested name: kube-node-1-kube-proxy at: Tasks/kube-node-1-kube-proxy/TaskStatus | |
WARN 2017-12-06 17:44:18,658 [pool-7-thread-1] com.mesosphere.sdk.state.StateStore:fetchStatus(349): No TaskStatus found for the requested name: kube-node-1-kubelet at: Tasks/kube-node-1-kubelet/TaskStatus | |
INFO 2017-12-06 17:44:18,658 [pool-7-thread-1] com.mesosphere.sdk.scheduler.recovery.DefaultRecoveryPlanManager:getRecoveryRequirements(202): Found pods needing recovery: [] | |
INFO 2017-12-06 17:44:18,658 [pool-7-thread-1] com.mesosphere.sdk.scheduler.recovery.DefaultRecoveryPlanManager:getRecoveryRequirements(211): Found recoveries already in progress: [] | |
INFO 2017-12-06 17:44:18,658 [pool-7-thread-1] com.mesosphere.sdk.scheduler.recovery.DefaultRecoveryPlanManager:getRecoveryRequirements(216): New pods needing recovery: [] | |
INFO 2017-12-06 17:44:18,658 [pool-7-thread-1] com.mesosphere.sdk.scheduler.recovery.DefaultRecoveryPlanManager:setPlan(82): Recovery plan set to: [] | |
INFO 2017-12-06 17:44:18,658 [pool-7-thread-1] com.mesosphere.sdk.scheduler.plan.DefaultPlanCoordinator:getCandidates(61): Got candidates: [], from plan: recovery | |
INFO 2017-12-06 17:44:18,659 [pool-7-thread-1] com.mesosphere.sdk.scheduler.plan.DefaultPlanCoordinator:getCandidates(72): Updated dirtied assets: [] | |
INFO 2017-12-06 17:44:18,659 [pool-7-thread-1] com.mesosphere.sdk.scheduler.plan.DefaultPlanCoordinator:getRelevantDirtyAssets(91): Input dirty assets: [] | |
INFO 2017-12-06 17:44:18,659 [pool-7-thread-1] com.mesosphere.sdk.scheduler.plan.DefaultPlanCoordinator:getRelevantDirtyAssets(92): Plan's dirty assets: [] | |
INFO 2017-12-06 17:44:18,659 [pool-7-thread-1] com.mesosphere.sdk.scheduler.plan.DefaultPlanCoordinator:getCandidates(56): Getting candidates for plan: 'decommission' with relevant dirtied assets: []. | |
INFO 2017-12-06 17:44:18,659 [pool-7-thread-1] com.mesosphere.sdk.scheduler.plan.DefaultPlanCoordinator:getCandidates(61): Got candidates: [], from plan: decommission | |
INFO 2017-12-06 17:44:18,659 [pool-7-thread-1] com.mesosphere.sdk.scheduler.plan.DefaultPlanCoordinator:getCandidates(72): Updated dirtied assets: [] | |
INFO 2017-12-06 17:44:18,659 [pool-7-thread-1] com.mesosphere.sdk.scheduler.plan.DefaultPlanCoordinator:getCandidates(49): Skipping interrupted plan: replace | |
INFO 2017-12-06 17:44:18,659 [pool-7-thread-1] com.mesosphere.sdk.scheduler.plan.DefaultPlanCoordinator:getCandidates(78): Got total candidates: [] | |
INFO 2017-12-06 17:44:18,659 [pool-7-thread-1] com.mesosphere.sdk.scheduler.AbstractScheduler:processQueuedOffers(412): InProgress Steps: [] | |
INFO 2017-12-06 17:44:18,659 [pool-7-thread-1] com.mesosphere.sdk.scheduler.ReviveManager:revive(74): Candidates, old: [], current: [], new:[] | |
INFO 2017-12-06 17:44:18,659 [pool-7-thread-1] com.mesosphere.sdk.scheduler.AbstractScheduler:processQueuedOffers(420): Processing 2 offers against 0 steps: | |
INFO 2017-12-06 17:44:18,663 [pool-7-thread-1] com.mesosphere.sdk.scheduler.AbstractScheduler:processQueuedOffers(424): 1: id { value: "66df60ae-ce98-4ce6-968a-f99922382ef3-O840" } framework_id { value: "66df60ae-ce98-4ce6-968a-f99922382ef3-0006" } slave_id { value: "66df60ae-ce98-4ce6-968a-f99922382ef3-S3" } hostname: "10.138.0.6" resources { name: "ports" type: RANGES ranges { range { begin: 1025 end: 2180 } range { begin: 2182 end: 2378 } range { begin: 2381 end: 3887 } range { begin: 3889 end: 5049 } range { begin: 5052 end: 6442 } range { begin: 6444 end: 8079 } range { begin: 8082 end: 8180 } range { begin: 8182 end: 8999 } range { begin: 9001 end: 32000 } } allocation_info { role: "kubernetes-role" } } resources { name: "disk" type: SCALAR scalar { value: 28799.0 } allocation_info { role: "kubernetes-role" } } resources { name: "cpus" type: SCALAR scalar { value: 1.9 } allocation_info { role: "kubernetes-role" } } resources { name: "mem" type: SCALAR scalar { value: 21135.0 } allocation_info { role: "kubernetes-role" } } resources { name: "mem" type: SCALAR scalar { value: 32.0 } allocation_info { role: "kubernetes-role" } reservations { principal: "kubernetes" labels { labels { key: "resource_id" value: "f6f6e585-1f39-46fd-b448-038c2936f0cf" } } role: "kubernetes-role" type: DYNAMIC } } resources { name: "cpus" type: SCALAR scalar { value: 0.1 } allocation_info { role: "kubernetes-role" } reservations { principal: "kubernetes" labels { labels { key: "resource_id" value: "4693d3dc-7752-4b3d-9124-dd29fb506f80" } } role: "kubernetes-role" type: DYNAMIC } } resources { name: "cpus" type: SCALAR scalar { value: 0.1 } allocation_info { role: "kubernetes-role" } reservations { principal: "kubernetes" labels { labels { key: "resource_id" value: "32a62b6a-41a3-464a-b254-ccacb576056a" } } role: "kubernetes-role" type: DYNAMIC } } resources { name: "mem" type: SCALAR scalar { value: 32.0 } allocation_info { role: "kubernetes-role" } reservations { principal: "kubernetes" labels { labels { key: "resource_id" value: "9d436956-fea9-48ae-a17e-accb4e392c6e" } } role: "kubernetes-role" type: DYNAMIC } } resources { name: "cpus" type: SCALAR scalar { value: 0.1 } allocation_info { role: "kubernetes-role" } reservations { principal: "kubernetes" labels { labels { key: "resource_id" value: "c1ab6b00-21bc-4252-8978-0b5d7357fe9f" } } role: "kubernetes-role" type: DYNAMIC } } resources { name: "disk" type: SCALAR scalar { value: 256.0 } allocation_info { role: "kubernetes-role" } reservations { principal: "kubernetes" labels { labels { key: "resource_id" value: "c5d1148a-f2e1-4b17-8322-d48c267fc41c" } } role: "kubernetes-role" type: DYNAMIC } } resources { name: "mem" type: SCALAR scalar { value: 32.0 } allocation_info { role: "kubernetes-role" } reservations { principal: "kubernetes" labels { labels { key: "resource_id" value: "bec5db3f-3951-4d14-aa72-d73d67b4fc5f" } } role: "kubernetes-role" type: DYNAMIC } } resources { name: "cpus" type: SCALAR scalar { value: 0.1 } allocation_info { role: "kubernetes-role" } reservations { principal: "kubernetes" labels { labels { key: "resource_id" value: "24cbbeb4-bb55-44bd-9efd-7494f1447996" } } role: "kubernetes-role" type: DYNAMIC } } resources { name: "mem" type: SCALAR scalar { value: 32.0 } allocation_info { role: "kubernetes-role" } reservations { principal: "kubernetes" labels { labels { key: "resource_id" value: "06056195-c3d0-42ca-b858-5953b66f18d8" } } role: "kubernetes-role" type: DYNAMIC } } resources { name: "cpus" type: SCALAR scalar { value: 0.1 } allocation_info { role: "kubernetes-role" } reservations { principal: "kubernetes" labels { labels { key: "resource_id" value: "34af1c89-0201-4ddf-81d7-6b21ab8752f1" } } role: "kubernetes-role" type: DYNAMIC } } resources { name: "mem" type: SCALAR scalar { value: 32.0 } allocation_info { role: "kubernetes-role" } reservations { principal: "kubernetes" labels { labels { key: "resource_id" value: "c1031de5-9962-463c-a352-abb2cbf6c64a" } } role: "kubernetes-role" type: DYNAMIC } } executor_ids { value: "kube-node__f0f3e687-8a63-4742-a4bb-67ed3d8a8e38" } executor_ids { value: "kube-controller-manager__62e74d4c-7552-443a-82ae-fb4491eb2f82" } executor_ids { value: "kube-apiserver__152f772c-3645-42e3-b675-6704200d0c7a" } executor_ids { value: "kube-scheduler__122254a0-fb1e-49c3-be10-a865c1aeaf91" } executor_ids { value: "etcd__0a21c1cd-6b71-4372-b0bb-b2e652541a56" } url { scheme: "http" address { hostname: "10.138.0.6" ip: "10.138.0.6" port: 5051 } path: "/slave(1)" } allocation_info { role: "kubernetes-role" } | |
INFO 2017-12-06 17:44:18,664 [pool-7-thread-1] com.mesosphere.sdk.scheduler.AbstractScheduler:processQueuedOffers(424): 2: id { value: "66df60ae-ce98-4ce6-968a-f99922382ef3-O841" } framework_id { value: "66df60ae-ce98-4ce6-968a-f99922382ef3-0006" } slave_id { value: "66df60ae-ce98-4ce6-968a-f99922382ef3-S0" } hostname: "10.138.0.5" resources { name: "ports" type: RANGES ranges { range { begin: 1025 end: 2180 } range { begin: 2182 end: 2378 } range { begin: 2381 end: 3887 } range { begin: 3889 end: 5049 } range { begin: 5052 end: 6442 } range { begin: 6444 end: 8079 } range { begin: 8082 end: 8180 } range { begin: 8182 end: 8999 } range { begin: 9001 end: 15487 } range { begin: 15489 end: 32000 } } allocation_info { role: "kubernetes-role" } } resources { name: "disk" type: SCALAR scalar { value: 30335.0 } allocation_info { role: "kubernetes-role" } } resources { name: "cpus" type: SCALAR scalar { value: 4.5 } allocation_info { role: "kubernetes-role" } } resources { name: "mem" type: SCALAR scalar { value: 24883.0 } allocation_info { role: "kubernetes-role" } } resources { name: "cpus" type: SCALAR scalar { value: 0.1 } allocation_info { role: "kubernetes-role" } reservations { principal: "kubernetes" labels { labels { key: "resource_id" value: "7e33fe21-02f2-4bc1-bff7-ffc30a3a39c8" } } role: "kubernetes-role" type: DYNAMIC } } resources { name: "mem" type: SCALAR scalar { value: 32.0 } allocation_info { role: "kubernetes-role" } reservations { principal: "kubernetes" labels { labels { key: "resource_id" value: "981bc60a-b95c-4a75-bedf-32121c694eb8" } } role: "kubernetes-role" type: DYNAMIC } } executor_ids { value: "kube-scheduler__b14c87b7-981f-409b-a320-b469905c708c" } executor_ids { value: "kube-controller-manager__8ed0b1ee-4c10-4a2d-8e87-dea57c29a3ef" } executor_ids { value: "kube-apiserver__a4d34e86-2fb1-4f58-bd53-d13ff41505c9" } executor_ids { value: "etcd__1d55b84a-fafd-42e8-a339-ac63c647069d" } url { scheme: "http" address { hostname: "10.138.0.5" ip: "10.138.0.5" port: 5051 } path: "/slave(1)" } allocation_info { role: "kubernetes-role" } | |
INFO 2017-12-06 17:44:18,667 [pool-7-thread-1] com.mesosphere.sdk.offer.OfferUtils:declineOffers(69): Declining 2 unused offers for 1209600 seconds: | |
INFO 2017-12-06 17:44:18,667 [pool-7-thread-1] com.mesosphere.sdk.offer.OfferUtils:lambda$declineOffers$2(75): 66df60ae-ce98-4ce6-968a-f99922382ef3-O840 | |
INFO 2017-12-06 17:44:18,667 [pool-7-thread-1] com.mesosphere.sdk.offer.OfferUtils:lambda$declineOffers$2(75): 66df60ae-ce98-4ce6-968a-f99922382ef3-O841 | |
INFO 2017-12-06 17:44:18,668 [pool-7-thread-1] com.mesosphere.sdk.scheduler.DefaultScheduler:processOffers(263): 2 Offers processed: | |
0 accepted by Plans: [] | |
0 accepted by Resource Cleaner: [] | |
2 declined: [66df60ae-ce98-4ce6-968a-f99922382ef3-O840, 66df60ae-ce98-4ce6-968a-f99922382ef3-O841] | |
INFO 2017-12-06 17:44:18,668 [pool-7-thread-1] com.mesosphere.sdk.scheduler.AbstractScheduler:processQueuedOffers(442): Processed 2 queued offers. 3 offers remain in progress: [66df60ae-ce98-4ce6-968a-f99922382ef3-O836, 66df60ae-ce98-4ce6-968a-f99922382ef3-O837, 66df60ae-ce98-4ce6-968a-f99922382ef3-O838] | |
INFO 2017-12-06 17:44:18,668 [pool-7-thread-1] com.mesosphere.sdk.scheduler.AbstractScheduler:processQueuedOffers(385): Waiting for queued offers... | |
INFO 2017-12-06 17:44:23,668 [pool-7-thread-1] com.mesosphere.sdk.scheduler.plan.DefaultPlanCoordinator:getCandidates(44): Initial dirtied assets: [] | |
INFO 2017-12-06 17:44:23,669 [pool-7-thread-1] com.mesosphere.sdk.scheduler.plan.DefaultPlanCoordinator:getRelevantDirtyAssets(91): Input dirty assets: [] | |
INFO 2017-12-06 17:44:23,669 [pool-7-thread-1] com.mesosphere.sdk.scheduler.plan.DefaultPlanCoordinator:getRelevantDirtyAssets(92): Plan's dirty assets: [] | |
INFO 2017-12-06 17:44:23,669 [pool-7-thread-1] com.mesosphere.sdk.scheduler.plan.DefaultPlanCoordinator:getCandidates(56): Getting candidates for plan: 'deploy' with relevant dirtied assets: []. | |
INFO 2017-12-06 17:44:23,669 [pool-7-thread-1] com.mesosphere.sdk.kubernetes.scheduler.plan.KubernetesUpgradeStrategy:getCandidates(49): Selected Kubernetes upgrade candidates, [] | |
INFO 2017-12-06 17:44:23,670 [pool-7-thread-1] com.mesosphere.sdk.scheduler.plan.DefaultPlanCoordinator:getCandidates(61): Got candidates: [], from plan: deploy | |
INFO 2017-12-06 17:44:23,670 [pool-7-thread-1] com.mesosphere.sdk.scheduler.plan.DefaultPlanCoordinator:getCandidates(72): Updated dirtied assets: [] | |
INFO 2017-12-06 17:44:23,670 [pool-7-thread-1] com.mesosphere.sdk.scheduler.plan.DefaultPlanCoordinator:getRelevantDirtyAssets(91): Input dirty assets: [] | |
INFO 2017-12-06 17:44:23,670 [pool-7-thread-1] com.mesosphere.sdk.scheduler.plan.DefaultPlanCoordinator:getRelevantDirtyAssets(92): Plan's dirty assets: [] | |
INFO 2017-12-06 17:44:23,670 [pool-7-thread-1] com.mesosphere.sdk.scheduler.plan.DefaultPlanCoordinator:getCandidates(56): Getting candidates for plan: 'recovery' with relevant dirtied assets: []. | |
INFO 2017-12-06 17:44:23,670 [pool-7-thread-1] com.mesosphere.sdk.scheduler.recovery.DefaultRecoveryPlanManager:updatePlan(115): Dirty assets for recovery plan consideration: [] | |
INFO 2017-12-06 17:44:23,721 [pool-7-thread-1] com.mesosphere.sdk.scheduler.recovery.DefaultRecoveryPlanManager:getRecoveryRequirements(186): Found tasks needing recovery: [] | |
WARN 2017-12-06 17:44:23,723 [pool-7-thread-1] com.mesosphere.sdk.state.StateStore:fetchStatus(349): No TaskStatus found for the requested name: etcd-0-recover at: Tasks/etcd-0-recover/TaskStatus | |
WARN 2017-12-06 17:44:23,723 [pool-7-thread-1] com.mesosphere.sdk.state.StateStore:fetchStatus(349): No TaskStatus found for the requested name: etcd-1-recover at: Tasks/etcd-1-recover/TaskStatus | |
WARN 2017-12-06 17:44:23,724 [pool-7-thread-1] com.mesosphere.sdk.state.StateStore:fetchStatus(349): No TaskStatus found for the requested name: etcd-2-recover at: Tasks/etcd-2-recover/TaskStatus | |
WARN 2017-12-06 17:44:23,725 [pool-7-thread-1] com.mesosphere.sdk.state.StateStore:fetchStatus(349): No TaskStatus found for the requested name: kube-node-0-commission at: Tasks/kube-node-0-commission/TaskStatus | |
WARN 2017-12-06 17:44:23,725 [pool-7-thread-1] com.mesosphere.sdk.state.StateStore:fetchStatus(349): No TaskStatus found for the requested name: kube-node-0-decommission at: Tasks/kube-node-0-decommission/TaskStatus | |
WARN 2017-12-06 17:44:23,725 [pool-7-thread-1] com.mesosphere.sdk.state.StateStore:fetchStatus(349): No TaskStatus found for the requested name: kube-node-1-commission at: Tasks/kube-node-1-commission/TaskStatus | |
WARN 2017-12-06 17:44:23,725 [pool-7-thread-1] com.mesosphere.sdk.state.StateStore:fetchStatus(349): No TaskStatus found for the requested name: kube-node-1-decommission at: Tasks/kube-node-1-decommission/TaskStatus | |
WARN 2017-12-06 17:44:23,725 [pool-7-thread-1] com.mesosphere.sdk.state.StateStore:fetchStatus(349): No TaskStatus found for the requested name: kube-node-1-kube-proxy at: Tasks/kube-node-1-kube-proxy/TaskStatus | |
WARN 2017-12-06 17:44:23,726 [pool-7-thread-1] com.mesosphere.sdk.state.StateStore:fetchStatus(349): No TaskStatus found for the requested name: kube-node-1-kubelet at: Tasks/kube-node-1-kubelet/TaskStatus | |
INFO 2017-12-06 17:44:23,726 [pool-7-thread-1] com.mesosphere.sdk.scheduler.recovery.DefaultRecoveryPlanManager:getRecoveryRequirements(202): Found pods needing recovery: [] | |
INFO 2017-12-06 17:44:23,726 [pool-7-thread-1] com.mesosphere.sdk.scheduler.recovery.DefaultRecoveryPlanManager:getRecoveryRequirements(211): Found recoveries already in progress: [] | |
INFO 2017-12-06 17:44:23,726 [pool-7-thread-1] com.mesosphere.sdk.scheduler.recovery.DefaultRecoveryPlanManager:getRecoveryRequirements(216): New pods needing recovery: [] | |
INFO 2017-12-06 17:44:23,726 [pool-7-thread-1] com.mesosphere.sdk.scheduler.recovery.DefaultRecoveryPlanManager:setPlan(82): Recovery plan set to: [] | |
INFO 2017-12-06 17:44:23,727 [pool-7-thread-1] com.mesosphere.sdk.scheduler.plan.DefaultPlanCoordinator:getCandidates(61): Got candidates: [], from plan: recovery | |
INFO 2017-12-06 17:44:23,727 [pool-7-thread-1] com.mesosphere.sdk.scheduler.plan.DefaultPlanCoordinator:getCandidates(72): Updated dirtied assets: [] | |
INFO 2017-12-06 17:44:23,727 [pool-7-thread-1] com.mesosphere.sdk.scheduler.plan.DefaultPlanCoordinator:getRelevantDirtyAssets(91): Input dirty assets: [] | |
INFO 2017-12-06 17:44:23,727 [pool-7-thread-1] com.mesosphere.sdk.scheduler.plan.DefaultPlanCoordinator:getRelevantDirtyAssets(92): Plan's dirty assets: [] | |
INFO 2017-12-06 17:44:23,727 [pool-7-thread-1] com.mesosphere.sdk.scheduler.plan.DefaultPlanCoordinator:getCandidates(56): Getting candidates for plan: 'decommission' with relevant dirtied assets: []. | |
INFO 2017-12-06 17:44:23,727 [pool-7-thread-1] com.mesosphere.sdk.scheduler.plan.DefaultPlanCoordinator:getCandidates(61): Got candidates: [], from plan: decommission | |
INFO 2017-12-06 17:44:23,727 [pool-7-thread-1] com.mesosphere.sdk.scheduler.plan.DefaultPlanCoordinator:getCandidates(72): Updated dirtied assets: [] | |
INFO 2017-12-06 17:44:23,727 [pool-7-thread-1] com.mesosphere.sdk.scheduler.plan.DefaultPlanCoordinator:getCandidates(49): Skipping interrupted plan: replace | |
INFO 2017-12-06 17:44:23,728 [pool-7-thread-1] com.mesosphere.sdk.scheduler.plan.DefaultPlanCoordinator:getCandidates(78): Got total candidates: [] | |
INFO 2017-12-06 17:44:23,728 [pool-7-thread-1] com.mesosphere.sdk.scheduler.AbstractScheduler:processQueuedOffers(412): InProgress Steps: [] | |
INFO 2017-12-06 17:44:23,728 [pool-7-thread-1] com.mesosphere.sdk.scheduler.ReviveManager:revive(74): Candidates, old: [], current: [], new:[] | |
INFO 2017-12-06 17:44:23,728 [pool-7-thread-1] com.mesosphere.sdk.scheduler.AbstractScheduler:processQueuedOffers(420): Processing 0 offers against 0 steps: | |
INFO 2017-12-06 17:44:23,731 [pool-7-thread-1] com.mesosphere.sdk.scheduler.DefaultScheduler:processOffers(261): 0 Offers processed. | |
INFO 2017-12-06 17:44:23,732 [pool-7-thread-1] com.mesosphere.sdk.scheduler.AbstractScheduler:processQueuedOffers(442): Processed 0 queued offers. 3 offers remain in progress: [66df60ae-ce98-4ce6-968a-f99922382ef3-O836, 66df60ae-ce98-4ce6-968a-f99922382ef3-O837, 66df60ae-ce98-4ce6-968a-f99922382ef3-O838] | |
INFO 2017-12-06 17:44:23,732 [pool-7-thread-1] com.mesosphere.sdk.scheduler.AbstractScheduler:processQueuedOffers(385): Waiting for queued offers... | |
INFO 2017-12-06 17:44:27,022 [Thread-44] com.mesosphere.mesos.HTTPAdapter.MesosToSchedulerDriverAdapter:received(181): Received event of type: HEARTBEAT | |
INFO 2017-12-06 17:44:28,733 [pool-7-thread-1] com.mesosphere.sdk.scheduler.plan.DefaultPlanCoordinator:getCandidates(44): Initial dirtied assets: [] | |
INFO 2017-12-06 17:44:28,733 [pool-7-thread-1] com.mesosphere.sdk.scheduler.plan.DefaultPlanCoordinator:getRelevantDirtyAssets(91): Input dirty assets: [] | |
INFO 2017-12-06 17:44:28,733 [pool-7-thread-1] com.mesosphere.sdk.scheduler.plan.DefaultPlanCoordinator:getRelevantDirtyAssets(92): Plan's dirty assets: [] | |
INFO 2017-12-06 17:44:28,733 [pool-7-thread-1] com.mesosphere.sdk.scheduler.plan.DefaultPlanCoordinator:getCandidates(56): Getting candidates for plan: 'deploy' with relevant dirtied assets: []. | |
INFO 2017-12-06 17:44:28,733 [pool-7-thread-1] com.mesosphere.sdk.kubernetes.scheduler.plan.KubernetesUpgradeStrategy:getCandidates(49): Selected Kubernetes upgrade candidates, [] | |
INFO 2017-12-06 17:44:28,733 [pool-7-thread-1] com.mesosphere.sdk.scheduler.plan.DefaultPlanCoordinator:getCandidates(61): Got candidates: [], from plan: deploy | |
INFO 2017-12-06 17:44:28,733 [pool-7-thread-1] com.mesosphere.sdk.scheduler.plan.DefaultPlanCoordinator:getCandidates(72): Updated dirtied assets: [] | |
INFO 2017-12-06 17:44:28,733 [pool-7-thread-1] com.mesosphere.sdk.scheduler.plan.DefaultPlanCoordinator:getRelevantDirtyAssets(91): Input dirty assets: [] | |
INFO 2017-12-06 17:44:28,733 [pool-7-thread-1] com.mesosphere.sdk.scheduler.plan.DefaultPlanCoordinator:getRelevantDirtyAssets(92): Plan's dirty assets: [] | |
INFO 2017-12-06 17:44:28,733 [pool-7-thread-1] com.mesosphere.sdk.scheduler.plan.DefaultPlanCoordinator:getCandidates(56): Getting candidates for plan: 'recovery' with relevant dirtied assets: []. | |
INFO 2017-12-06 17:44:28,734 [pool-7-thread-1] com.mesosphere.sdk.scheduler.recovery.DefaultRecoveryPlanManager:updatePlan(115): Dirty assets for recovery plan consideration: [] | |
INFO 2017-12-06 17:44:28,736 [pool-7-thread-1] com.mesosphere.sdk.scheduler.recovery.DefaultRecoveryPlanManager:getRecoveryRequirements(186): Found tasks needing recovery: [] | |
WARN 2017-12-06 17:44:28,737 [pool-7-thread-1] com.mesosphere.sdk.state.StateStore:fetchStatus(349): No TaskStatus found for the requested name: etcd-0-recover at: Tasks/etcd-0-recover/TaskStatus | |
WARN 2017-12-06 17:44:28,737 [pool-7-thread-1] com.mesosphere.sdk.state.StateStore:fetchStatus(349): No TaskStatus found for the requested name: etcd-1-recover at: Tasks/etcd-1-recover/TaskStatus | |
WARN 2017-12-06 17:44:28,737 [pool-7-thread-1] com.mesosphere.sdk.state.StateStore:fetchStatus(349): No TaskStatus found for the requested name: etcd-2-recover at: Tasks/etcd-2-recover/TaskStatus | |
WARN 2017-12-06 17:44:28,738 [pool-7-thread-1] com.mesosphere.sdk.state.StateStore:fetchStatus(349): No TaskStatus found for the requested name: kube-node-0-commission at: Tasks/kube-node-0-commission/TaskStatus | |
WARN 2017-12-06 17:44:28,738 [pool-7-thread-1] com.mesosphere.sdk.state.StateStore:fetchStatus(349): No TaskStatus found for the requested name: kube-node-0-decommission at: Tasks/kube-node-0-decommission/TaskStatus | |
WARN 2017-12-06 17:44:28,738 [pool-7-thread-1] com.mesosphere.sdk.state.StateStore:fetchStatus(349): No TaskStatus found for the requested name: kube-node-1-commission at: Tasks/kube-node-1-commission/TaskStatus | |
WARN 2017-12-06 17:44:28,738 [pool-7-thread-1] com.mesosphere.sdk.state.StateStore:fetchStatus(349): No TaskStatus found for the requested name: kube-node-1-decommission at: Tasks/kube-node-1-decommission/TaskStatus | |
WARN 2017-12-06 17:44:28,738 [pool-7-thread-1] com.mesosphere.sdk.state.StateStore:fetchStatus(349): No TaskStatus found for the requested name: kube-node-1-kube-proxy at: Tasks/kube-node-1-kube-proxy/TaskStatus | |
WARN 2017-12-06 17:44:28,738 [pool-7-thread-1] com.mesosphere.sdk.state.StateStore:fetchStatus(349): No TaskStatus found for the requested name: kube-node-1-kubelet at: Tasks/kube-node-1-kubelet/TaskStatus | |
INFO 2017-12-06 17:44:28,738 [pool-7-thread-1] com.mesosphere.sdk.scheduler.recovery.DefaultRecoveryPlanManager:getRecoveryRequirements(202): Found pods needing recovery: [] | |
INFO 2017-12-06 17:44:28,738 [pool-7-thread-1] com.mesosphere.sdk.scheduler.recovery.DefaultRecoveryPlanManager:getRecoveryRequirements(211): Found recoveries already in progress: [] | |
INFO 2017-12-06 17:44:28,738 [pool-7-thread-1] com.mesosphere.sdk.scheduler.recovery.DefaultRecoveryPlanManager:getRecoveryRequirements(216): New pods needing recovery: [] | |
INFO 2017-12-06 17:44:28,738 [pool-7-thread-1] com.mesosphere.sdk.scheduler.recovery.DefaultRecoveryPlanManager:setPlan(82): Recovery plan set to: [] | |
INFO 2017-12-06 17:44:28,739 [pool-7-thread-1] com.mesosphere.sdk.scheduler.plan.DefaultPlanCoordinator:getCandidates(61): Got candidates: [], from plan: recovery | |
INFO 2017-12-06 17:44:28,739 [pool-7-thread-1] com.mesosphere.sdk.scheduler.plan.DefaultPlanCoordinator:getCandidates(72): Updated dirtied assets: [] | |
INFO 2017-12-06 17:44:28,739 [pool-7-thread-1] com.mesosphere.sdk.scheduler.plan.DefaultPlanCoordinator:getRelevantDirtyAssets(91): Input dirty assets: [] | |
INFO 2017-12-06 17:44:28,739 [pool-7-thread-1] com.mesosphere.sdk.scheduler.plan.DefaultPlanCoordinator:getRelevantDirtyAssets(92): Plan's dirty assets: [] | |
INFO 2017-12-06 17:44:28,739 [pool-7-thread-1] com.mesosphere.sdk.scheduler.plan.DefaultPlanCoordinator:getCandidates(56): Getting candidates for plan: 'decommission' with relevant dirtied assets: []. | |
INFO 2017-12-06 17:44:28,739 [pool-7-thread-1] com.mesosphere.sdk.scheduler.plan.DefaultPlanCoordinator:getCandidates(61): Got candidates: [], from plan: decommission | |
INFO 2017-12-06 17:44:28,739 [pool-7-thread-1] com.mesosphere.sdk.scheduler.plan.DefaultPlanCoordinator:getCandidates(72): Updated dirtied assets: [] | |
INFO 2017-12-06 17:44:28,739 [pool-7-thread-1] com.mesosphere.sdk.scheduler.plan.DefaultPlanCoordinator:getCandidates(49): Skipping interrupted plan: replace | |
INFO 2017-12-06 17:44:28,739 [pool-7-thread-1] com.mesosphere.sdk.scheduler.plan.DefaultPlanCoordinator:getCandidates(78): Got total candidates: [] | |
INFO 2017-12-06 17:44:28,739 [pool-7-thread-1] com.mesosphere.sdk.scheduler.AbstractScheduler:processQueuedOffers(412): InProgress Steps: [] | |
INFO 2017-12-06 17:44:28,739 [pool-7-thread-1] com.mesosphere.sdk.scheduler.ReviveManager:revive(74): Candidates, old: [], current: [], new:[] | |
INFO 2017-12-06 17:44:28,739 [pool-7-thread-1] com.mesosphere.sdk.scheduler.AbstractScheduler:processQueuedOffers(420): Processing 0 offers against 0 steps: | |
INFO 2017-12-06 17:44:28,741 [pool-7-thread-1] com.mesosphere.sdk.scheduler.DefaultScheduler:processOffers(261): 0 Offers processed. | |
INFO 2017-12-06 17:44:28,741 [pool-7-thread-1] com.mesosphere.sdk.scheduler.AbstractScheduler:processQueuedOffers(442): Processed 0 queued offers. 3 offers remain in progress: [66df60ae-ce98-4ce6-968a-f99922382ef3-O836, 66df60ae-ce98-4ce6-968a-f99922382ef3-O837, 66df60ae-ce98-4ce6-968a-f99922382ef3-O838] | |
INFO 2017-12-06 17:44:28,741 [pool-7-thread-1] com.mesosphere.sdk.scheduler.AbstractScheduler:processQueuedOffers(385): Waiting for queued offers... | |
INFO 2017-12-06 17:44:33,226 [Thread-45] com.mesosphere.mesos.HTTPAdapter.MesosToSchedulerDriverAdapter:received(181): Received event of type: UPDATE | |
INFO 2017-12-06 17:44:33,226 [Thread-45] com.mesosphere.sdk.scheduler.AbstractScheduler:statusUpdate(321): Received status update for taskId=kube-controller-manager-1-instance__2511d189-757d-417d-a34c-b25e5e4b1e11 state=TASK_RUNNING message= protobuf=task_id { value: "kube-controller-manager-1-instance__2511d189-757d-417d-a34c-b25e5e4b1e11" } state: TASK_RUNNING slave_id { value: "66df60ae-ce98-4ce6-968a-f99922382ef3-S3" } timestamp: 1.512582273178442E9 executor_id { value: "kube-controller-manager__62e74d4c-7552-443a-82ae-fb4491eb2f82" } healthy: false source: SOURCE_EXECUTOR reason: REASON_TASK_HEALTH_CHECK_STATUS_UPDATED uuid: "\307\v\034\323\207\373Hm\206|\333\232O\246F7" container_status { network_infos { ip_addresses { ip_address: "10.138.0.6" } } executor_pid: 5379 container_id { value: "b545281a-c68f-4433-b422-e56545dbd15e" parent { value: "ca14c9cc-5f24-4956-be43-c4ce7590b434" } } } check_status { type: COMMAND command { exit_code: 0 } } | |
INFO 2017-12-06 17:44:33,227 [Thread-45] com.mesosphere.sdk.state.StateStore:storeStatus(192): Storing status 'TASK_RUNNING' for 'kube-controller-manager-1-instance' in 'Tasks/kube-controller-manager-1-instance/TaskStatus' | |
INFO 2017-12-06 17:44:33,742 [pool-7-thread-1] com.mesosphere.sdk.scheduler.plan.DefaultPlanCoordinator:getCandidates(44): Initial dirtied assets: [] | |
INFO 2017-12-06 17:44:33,742 [pool-7-thread-1] com.mesosphere.sdk.scheduler.plan.DefaultPlanCoordinator:getRelevantDirtyAssets(91): Input dirty assets: [] | |
INFO 2017-12-06 17:44:33,742 [pool-7-thread-1] com.mesosphere.sdk.scheduler.plan.DefaultPlanCoordinator:getRelevantDirtyAssets(92): Plan's dirty assets: [] | |
INFO 2017-12-06 17:44:33,742 [pool-7-thread-1] com.mesosphere.sdk.scheduler.plan.DefaultPlanCoordinator:getCandidates(56): Getting candidates for plan: 'deploy' with relevant dirtied assets: []. | |
INFO 2017-12-06 17:44:33,742 [pool-7-thread-1] com.mesosphere.sdk.kubernetes.scheduler.plan.KubernetesUpgradeStrategy:getCandidates(49): Selected Kubernetes upgrade candidates, [] | |
INFO 2017-12-06 17:44:33,743 [pool-7-thread-1] com.mesosphere.sdk.scheduler.plan.DefaultPlanCoordinator:getCandidates(61): Got candidates: [], from plan: deploy | |
INFO 2017-12-06 17:44:33,743 [pool-7-thread-1] com.mesosphere.sdk.scheduler.plan.DefaultPlanCoordinator:getCandidates(72): Updated dirtied assets: [] | |
INFO 2017-12-06 17:44:33,743 [pool-7-thread-1] com.mesosphere.sdk.scheduler.plan.DefaultPlanCoordinator:getRelevantDirtyAssets(91): Input dirty assets: [] | |
INFO 2017-12-06 17:44:33,743 [pool-7-thread-1] com.mesosphere.sdk.scheduler.plan.DefaultPlanCoordinator:getRelevantDirtyAssets(92): Plan's dirty assets: [] | |
INFO 2017-12-06 17:44:33,743 [pool-7-thread-1] com.mesosphere.sdk.scheduler.plan.DefaultPlanCoordinator:getCandidates(56): Getting candidates for plan: 'recovery' with relevant dirtied assets: []. | |
INFO 2017-12-06 17:44:33,743 [pool-7-thread-1] com.mesosphere.sdk.scheduler.recovery.DefaultRecoveryPlanManager:updatePlan(115): Dirty assets for recovery plan consideration: [] | |
INFO 2017-12-06 17:44:33,745 [pool-7-thread-1] com.mesosphere.sdk.scheduler.recovery.DefaultRecoveryPlanManager:getRecoveryRequirements(186): Found tasks needing recovery: [] | |
WARN 2017-12-06 17:44:33,746 [pool-7-thread-1] com.mesosphere.sdk.state.StateStore:fetchStatus(349): No TaskStatus found for the requested name: etcd-0-recover at: Tasks/etcd-0-recover/TaskStatus | |
WARN 2017-12-06 17:44:33,747 [pool-7-thread-1] com.mesosphere.sdk.state.StateStore:fetchStatus(349): No TaskStatus found for the requested name: etcd-1-recover at: Tasks/etcd-1-recover/TaskStatus | |
WARN 2017-12-06 17:44:33,747 [pool-7-thread-1] com.mesosphere.sdk.state.StateStore:fetchStatus(349): No TaskStatus found for the requested name: etcd-2-recover at: Tasks/etcd-2-recover/TaskStatus | |
WARN 2017-12-06 17:44:33,747 [pool-7-thread-1] com.mesosphere.sdk.state.StateStore:fetchStatus(349): No TaskStatus found for the requested name: kube-node-0-commission at: Tasks/kube-node-0-commission/TaskStatus | |
WARN 2017-12-06 17:44:33,747 [pool-7-thread-1] com.mesosphere.sdk.state.StateStore:fetchStatus(349): No TaskStatus found for the requested name: kube-node-0-decommission at: Tasks/kube-node-0-decommission/TaskStatus | |
WARN 2017-12-06 17:44:33,747 [pool-7-thread-1] com.mesosphere.sdk.state.StateStore:fetchStatus(349): No TaskStatus found for the requested name: kube-node-1-commission at: Tasks/kube-node-1-commission/TaskStatus | |
WARN 2017-12-06 17:44:33,747 [pool-7-thread-1] com.mesosphere.sdk.state.StateStore:fetchStatus(349): No TaskStatus found for the requested name: kube-node-1-decommission at: Tasks/kube-node-1-decommission/TaskStatus | |
WARN 2017-12-06 17:44:33,747 [pool-7-thread-1] com.mesosphere.sdk.state.StateStore:fetchStatus(349): No TaskStatus found for the requested name: kube-node-1-kube-proxy at: Tasks/kube-node-1-kube-proxy/TaskStatus | |
WARN 2017-12-06 17:44:33,747 [pool-7-thread-1] com.mesosphere.sdk.state.StateStore:fetchStatus(349): No TaskStatus found for the requested name: kube-node-1-kubelet at: Tasks/kube-node-1-kubelet/TaskStatus | |
INFO 2017-12-06 17:44:33,748 [pool-7-thread-1] com.mesosphere.sdk.scheduler.recovery.DefaultRecoveryPlanManager:getRecoveryRequirements(202): Found pods needing recovery: [] | |
INFO 2017-12-06 17:44:33,748 [pool-7-thread-1] com.mesosphere.sdk.scheduler.recovery.DefaultRecoveryPlanManager:getRecoveryRequirements(211): Found recoveries already in progress: [] | |
INFO 2017-12-06 17:44:33,748 [pool-7-thread-1] com.mesosphere.sdk.scheduler.recovery.DefaultRecoveryPlanManager:getRecoveryRequirements(216): New pods needing recovery: [] | |
INFO 2017-12-06 17:44:33,748 [pool-7-thread-1] com.mesosphere.sdk.scheduler.recovery.DefaultRecoveryPlanManager:setPlan(82): Recovery plan set to: [] | |
INFO 2017-12-06 17:44:33,748 [pool-7-thread-1] com.mesosphere.sdk.scheduler.plan.DefaultPlanCoordinator:getCandidates(61): Got candidates: [], from plan: recovery | |
INFO 2017-12-06 17:44:33,748 [pool-7-thread-1] com.mesosphere.sdk.scheduler.plan.DefaultPlanCoordinator:getCandidates(72): Updated dirtied assets: [] | |
INFO 2017-12-06 17:44:33,748 [pool-7-thread-1] com.mesosphere.sdk.scheduler.plan.DefaultPlanCoordinator:getRelevantDirtyAssets(91): Input dirty assets: [] | |
INFO 2017-12-06 17:44:33,748 [pool-7-thread-1] com.mesosphere.sdk.scheduler.plan.DefaultPlanCoordinator:getRelevantDirtyAssets(92): Plan's dirty assets: [] | |
INFO 2017-12-06 17:44:33,748 [pool-7-thread-1] com.mesosphere.sdk.scheduler.plan.DefaultPlanCoordinator:getCandidates(56): Getting candidates for plan: 'decommission' with relevant dirtied assets: []. | |
INFO 2017-12-06 17:44:33,748 [pool-7-thread-1] com.mesosphere.sdk.scheduler.plan.DefaultPlanCoordinator:getCandidates(61): Got candidates: [], from plan: decommission | |
INFO 2017-12-06 17:44:33,748 [pool-7-thread-1] com.mesosphere.sdk.scheduler.plan.DefaultPlanCoordinator:getCandidates(72): Updated dirtied assets: [] | |
INFO 2017-12-06 17:44:33,749 [pool-7-thread-1] com.mesosphere.sdk.scheduler.plan.DefaultPlanCoordinator:getCandidates(49): Skipping interrupted plan: replace | |
INFO 2017-12-06 17:44:33,749 [pool-7-thread-1] com.mesosphere.sdk.scheduler.plan.DefaultPlanCoordinator:getCandidates(78): Got total candidates: [] | |
INFO 2017-12-06 17:44:33,749 [pool-7-thread-1] com.mesosphere.sdk.scheduler.AbstractScheduler:processQueuedOffers(412): InProgress Steps: [] | |
INFO 2017-12-06 17:44:33,749 [pool-7-thread-1] com.mesosphere.sdk.scheduler.ReviveManager:revive(74): Candidates, old: [], current: [], new:[] | |
INFO 2017-12-06 17:44:33,749 [pool-7-thread-1] com.mesosphere.sdk.scheduler.AbstractScheduler:processQueuedOffers(420): Processing 0 offers against 0 steps: | |
INFO 2017-12-06 17:44:33,751 [pool-7-thread-1] com.mesosphere.sdk.scheduler.DefaultScheduler:processOffers(261): 0 Offers processed. | |
INFO 2017-12-06 17:44:33,751 [pool-7-thread-1] com.mesosphere.sdk.scheduler.AbstractScheduler:processQueuedOffers(442): Processed 0 queued offers. 3 offers remain in progress: [66df60ae-ce98-4ce6-968a-f99922382ef3-O836, 66df60ae-ce98-4ce6-968a-f99922382ef3-O837, 66df60ae-ce98-4ce6-968a-f99922382ef3-O838] | |
INFO 2017-12-06 17:44:33,751 [pool-7-thread-1] com.mesosphere.sdk.scheduler.AbstractScheduler:processQueuedOffers(385): Waiting for queued offers... | |
INFO 2017-12-06 17:44:38,751 [pool-7-thread-1] com.mesosphere.sdk.scheduler.plan.DefaultPlanCoordinator:getCandidates(44): Initial dirtied assets: [] | |
INFO 2017-12-06 17:44:38,752 [pool-7-thread-1] com.mesosphere.sdk.scheduler.plan.DefaultPlanCoordinator:getRelevantDirtyAssets(91): Input dirty assets: [] | |
INFO 2017-12-06 17:44:38,752 [pool-7-thread-1] com.mesosphere.sdk.scheduler.plan.DefaultPlanCoordinator:getRelevantDirtyAssets(92): Plan's dirty assets: [] | |
INFO 2017-12-06 17:44:38,752 [pool-7-thread-1] com.mesosphere.sdk.scheduler.plan.DefaultPlanCoordinator:getCandidates(56): Getting candidates for plan: 'deploy' with relevant dirtied assets: []. | |
INFO 2017-12-06 17:44:38,752 [pool-7-thread-1] com.mesosphere.sdk.kubernetes.scheduler.plan.KubernetesUpgradeStrategy:getCandidates(49): Selected Kubernetes upgrade candidates, [] | |
INFO 2017-12-06 17:44:38,752 [pool-7-thread-1] com.mesosphere.sdk.scheduler.plan.DefaultPlanCoordinator:getCandidates(61): Got candidates: [], from plan: deploy | |
INFO 2017-12-06 17:44:38,752 [pool-7-thread-1] com.mesosphere.sdk.scheduler.plan.DefaultPlanCoordinator:getCandidates(72): Updated dirtied assets: [] | |
INFO 2017-12-06 17:44:38,752 [pool-7-thread-1] com.mesosphere.sdk.scheduler.plan.DefaultPlanCoordinator:getRelevantDirtyAssets(91): Input dirty assets: [] | |
INFO 2017-12-06 17:44:38,752 [pool-7-thread-1] com.mesosphere.sdk.scheduler.plan.DefaultPlanCoordinator:getRelevantDirtyAssets(92): Plan's dirty assets: [] | |
INFO 2017-12-06 17:44:38,753 [pool-7-thread-1] com.mesosphere.sdk.scheduler.plan.DefaultPlanCoordinator:getCandidates(56): Getting candidates for plan: 'recovery' with relevant dirtied assets: []. | |
INFO 2017-12-06 17:44:38,753 [pool-7-thread-1] com.mesosphere.sdk.scheduler.recovery.DefaultRecoveryPlanManager:updatePlan(115): Dirty assets for recovery plan consideration: [] | |
INFO 2017-12-06 17:44:38,755 [pool-7-thread-1] com.mesosphere.sdk.scheduler.recovery.DefaultRecoveryPlanManager:getRecoveryRequirements(186): Found tasks needing recovery: [] | |
WARN 2017-12-06 17:44:38,756 [pool-7-thread-1] com.mesosphere.sdk.state.StateStore:fetchStatus(349): No TaskStatus found for the requested name: etcd-0-recover at: Tasks/etcd-0-recover/TaskStatus | |
WARN 2017-12-06 17:44:38,756 [pool-7-thread-1] com.mesosphere.sdk.state.StateStore:fetchStatus(349): No TaskStatus found for the requested name: etcd-1-recover at: Tasks/etcd-1-recover/TaskStatus | |
WARN 2017-12-06 17:44:38,756 [pool-7-thread-1] com.mesosphere.sdk.state.StateStore:fetchStatus(349): No TaskStatus found for the requested name: etcd-2-recover at: Tasks/etcd-2-recover/TaskStatus | |
WARN 2017-12-06 17:44:38,756 [pool-7-thread-1] com.mesosphere.sdk.state.StateStore:fetchStatus(349): No TaskStatus found for the requested name: kube-node-0-commission at: Tasks/kube-node-0-commission/TaskStatus | |
WARN 2017-12-06 17:44:38,756 [pool-7-thread-1] com.mesosphere.sdk.state.StateStore:fetchStatus(349): No TaskStatus found for the requested name: kube-node-0-decommission at: Tasks/kube-node-0-decommission/TaskStatus | |
WARN 2017-12-06 17:44:38,756 [pool-7-thread-1] com.mesosphere.sdk.state.StateStore:fetchStatus(349): No TaskStatus found for the requested name: kube-node-1-commission at: Tasks/kube-node-1-commission/TaskStatus | |
WARN 2017-12-06 17:44:38,757 [pool-7-thread-1] com.mesosphere.sdk.state.StateStore:fetchStatus(349): No TaskStatus found for the requested name: kube-node-1-decommission at: Tasks/kube-node-1-decommission/TaskStatus | |
WARN 2017-12-06 17:44:38,757 [pool-7-thread-1] com.mesosphere.sdk.state.StateStore:fetchStatus(349): No TaskStatus found for the requested name: kube-node-1-kube-proxy at: Tasks/kube-node-1-kube-proxy/TaskStatus | |
WARN 2017-12-06 17:44:38,757 [pool-7-thread-1] com.mesosphere.sdk.state.StateStore:fetchStatus(349): No TaskStatus found for the requested name: kube-node-1-kubelet at: Tasks/kube-node-1-kubelet/TaskStatus | |
INFO 2017-12-06 17:44:38,757 [pool-7-thread-1] com.mesosphere.sdk.scheduler.recovery.DefaultRecoveryPlanManager:getRecoveryRequirements(202): Found pods needing recovery: [] | |
INFO 2017-12-06 17:44:38,757 [pool-7-thread-1] com.mesosphere.sdk.scheduler.recovery.DefaultRecoveryPlanManager:getRecoveryRequirements(211): Found recoveries already in progress: [] | |
INFO 2017-12-06 17:44:38,757 [pool-7-thread-1] com.mesosphere.sdk.scheduler.recovery.DefaultRecoveryPlanManager:getRecoveryRequirements(216): New pods needing recovery: [] | |
INFO 2017-12-06 17:44:38,757 [pool-7-thread-1] com.mesosphere.sdk.scheduler.recovery.DefaultRecoveryPlanManager:setPlan(82): Recovery plan set to: [] | |
INFO 2017-12-06 17:44:38,757 [pool-7-thread-1] com.mesosphere.sdk.scheduler.plan.DefaultPlanCoordinator:getCandidates(61): Got candidates: [], from plan: recovery | |
INFO 2017-12-06 17:44:38,757 [pool-7-thread-1] com.mesosphere.sdk.scheduler.plan.DefaultPlanCoordinator:getCandidates(72): Updated dirtied assets: [] | |
INFO 2017-12-06 17:44:38,758 [pool-7-thread-1] com.mesosphere.sdk.scheduler.plan.DefaultPlanCoordinator:getRelevantDirtyAssets(91): Input dirty assets: [] | |
INFO 2017-12-06 17:44:38,758 [pool-7-thread-1] com.mesosphere.sdk.scheduler.plan.DefaultPlanCoordinator:getRelevantDirtyAssets(92): Plan's dirty assets: [] | |
INFO 2017-12-06 17:44:38,758 [pool-7-thread-1] com.mesosphere.sdk.scheduler.plan.DefaultPlanCoordinator:getCandidates(56): Getting candidates for plan: 'decommission' with relevant dirtied assets: []. | |
INFO 2017-12-06 17:44:38,758 [pool-7-thread-1] com.mesosphere.sdk.scheduler.plan.DefaultPlanCoordinator:getCandidates(61): Got candidates: [], from plan: decommission | |
INFO 2017-12-06 17:44:38,758 [pool-7-thread-1] com.mesosphere.sdk.scheduler.plan.DefaultPlanCoordinator:getCandidates(72): Updated dirtied assets: [] | |
INFO 2017-12-06 17:44:38,758 [pool-7-thread-1] com.mesosphere.sdk.scheduler.plan.DefaultPlanCoordinator:getCandidates(49): Skipping interrupted plan: replace | |
INFO 2017-12-06 17:44:38,758 [pool-7-thread-1] com.mesosphere.sdk.scheduler.plan.DefaultPlanCoordinator:getCandidates(78): Got total candidates: [] | |
INFO 2017-12-06 17:44:38,758 [pool-7-thread-1] com.mesosphere.sdk.scheduler.AbstractScheduler:processQueuedOffers(412): InProgress Steps: [] | |
INFO 2017-12-06 17:44:38,758 [pool-7-thread-1] com.mesosphere.sdk.scheduler.ReviveManager:revive(74): Candidates, old: [], current: [], new:[] | |
INFO 2017-12-06 17:44:38,758 [pool-7-thread-1] com.mesosphere.sdk.scheduler.AbstractScheduler:processQueuedOffers(420): Processing 0 offers against 0 steps: | |
INFO 2017-12-06 17:44:38,760 [pool-7-thread-1] com.mesosphere.sdk.scheduler.DefaultScheduler:processOffers(261): 0 Offers processed. | |
INFO 2017-12-06 17:44:38,760 [pool-7-thread-1] com.mesosphere.sdk.scheduler.AbstractScheduler:processQueuedOffers(442): Processed 0 queued offers. 3 offers remain in progress: [66df60ae-ce98-4ce6-968a-f99922382ef3-O836, 66df60ae-ce98-4ce6-968a-f99922382ef3-O837, 66df60ae-ce98-4ce6-968a-f99922382ef3-O838] | |
INFO 2017-12-06 17:44:38,760 [pool-7-thread-1] com.mesosphere.sdk.scheduler.AbstractScheduler:processQueuedOffers(385): Waiting for queued offers... | |
INFO 2017-12-06 17:44:42,023 [Thread-46] com.mesosphere.mesos.HTTPAdapter.MesosToSchedulerDriverAdapter:received(181): Received event of type: HEARTBEAT | |
INFO 2017-12-06 17:44:42,835 [qtp636866781-33] com.mesosphere.sdk.kubernetes.scheduler.plan.KubernetesUpgradeStrategy:getCandidates(49): Selected Kubernetes upgrade candidates, [] | |
INFO 2017-12-06 17:44:42,835 [qtp636866781-33] com.mesosphere.sdk.kubernetes.scheduler.plan.KubernetesUpgradeStrategy:getCandidates(49): Selected Kubernetes upgrade candidates, [] | |
INFO 2017-12-06 17:44:42,837 [qtp636866781-33] com.mesosphere.sdk.kubernetes.scheduler.plan.KubernetesUpgradeStrategy:getCandidates(49): Selected Kubernetes upgrade candidates, [] | |
INFO 2017-12-06 17:44:43,761 [pool-7-thread-1] com.mesosphere.sdk.scheduler.plan.DefaultPlanCoordinator:getCandidates(44): Initial dirtied assets: [] | |
INFO 2017-12-06 17:44:43,761 [pool-7-thread-1] com.mesosphere.sdk.scheduler.plan.DefaultPlanCoordinator:getRelevantDirtyAssets(91): Input dirty assets: [] | |
INFO 2017-12-06 17:44:43,761 [pool-7-thread-1] com.mesosphere.sdk.scheduler.plan.DefaultPlanCoordinator:getRelevantDirtyAssets(92): Plan's dirty assets: [] | |
INFO 2017-12-06 17:44:43,761 [pool-7-thread-1] com.mesosphere.sdk.scheduler.plan.DefaultPlanCoordinator:getCandidates(56): Getting candidates for plan: 'deploy' with relevant dirtied assets: []. | |
INFO 2017-12-06 17:44:43,761 [pool-7-thread-1] com.mesosphere.sdk.kubernetes.scheduler.plan.KubernetesUpgradeStrategy:getCandidates(49): Selected Kubernetes upgrade candidates, [] | |
INFO 2017-12-06 17:44:43,762 [pool-7-thread-1] com.mesosphere.sdk.scheduler.plan.DefaultPlanCoordinator:getCandidates(61): Got candidates: [], from plan: deploy | |
INFO 2017-12-06 17:44:43,762 [pool-7-thread-1] com.mesosphere.sdk.scheduler.plan.DefaultPlanCoordinator:getCandidates(72): Updated dirtied assets: [] | |
INFO 2017-12-06 17:44:43,762 [pool-7-thread-1] com.mesosphere.sdk.scheduler.plan.DefaultPlanCoordinator:getRelevantDirtyAssets(91): Input dirty assets: [] | |
INFO 2017-12-06 17:44:43,762 [pool-7-thread-1] com.mesosphere.sdk.scheduler.plan.DefaultPlanCoordinator:getRelevantDirtyAssets(92): Plan's dirty assets: [] | |
INFO 2017-12-06 17:44:43,762 [pool-7-thread-1] com.mesosphere.sdk.scheduler.plan.DefaultPlanCoordinator:getCandidates(56): Getting candidates for plan: 'recovery' with relevant dirtied assets: []. | |
INFO 2017-12-06 17:44:43,762 [pool-7-thread-1] com.mesosphere.sdk.scheduler.recovery.DefaultRecoveryPlanManager:updatePlan(115): Dirty assets for recovery plan consideration: [] | |
INFO 2017-12-06 17:44:43,764 [pool-7-thread-1] com.mesosphere.sdk.scheduler.recovery.DefaultRecoveryPlanManager:getRecoveryRequirements(186): Found tasks needing recovery: [] | |
WARN 2017-12-06 17:44:43,765 [pool-7-thread-1] com.mesosphere.sdk.state.StateStore:fetchStatus(349): No TaskStatus found for the requested name: etcd-0-recover at: Tasks/etcd-0-recover/TaskStatus | |
WARN 2017-12-06 17:44:43,765 [pool-7-thread-1] com.mesosphere.sdk.state.StateStore:fetchStatus(349): No TaskStatus found for the requested name: etcd-1-recover at: Tasks/etcd-1-recover/TaskStatus | |
WARN 2017-12-06 17:44:43,765 [pool-7-thread-1] com.mesosphere.sdk.state.StateStore:fetchStatus(349): No TaskStatus found for the requested name: etcd-2-recover at: Tasks/etcd-2-recover/TaskStatus | |
WARN 2017-12-06 17:44:43,765 [pool-7-thread-1] com.mesosphere.sdk.state.StateStore:fetchStatus(349): No TaskStatus found for the requested name: kube-node-0-commission at: Tasks/kube-node-0-commission/TaskStatus | |
WARN 2017-12-06 17:44:43,766 [pool-7-thread-1] com.mesosphere.sdk.state.StateStore:fetchStatus(349): No TaskStatus found for the requested name: kube-node-0-decommission at: Tasks/kube-node-0-decommission/TaskStatus | |
WARN 2017-12-06 17:44:43,766 [pool-7-thread-1] com.mesosphere.sdk.state.StateStore:fetchStatus(349): No TaskStatus found for the requested name: kube-node-1-commission at: Tasks/kube-node-1-commission/TaskStatus | |
WARN 2017-12-06 17:44:43,766 [pool-7-thread-1] com.mesosphere.sdk.state.StateStore:fetchStatus(349): No TaskStatus found for the requested name: kube-node-1-decommission at: Tasks/kube-node-1-decommission/TaskStatus | |
WARN 2017-12-06 17:44:43,766 [pool-7-thread-1] com.mesosphere.sdk.state.StateStore:fetchStatus(349): No TaskStatus found for the requested name: kube-node-1-kube-proxy at: Tasks/kube-node-1-kube-proxy/TaskStatus | |
WARN 2017-12-06 17:44:43,766 [pool-7-thread-1] com.mesosphere.sdk.state.StateStore:fetchStatus(349): No TaskStatus found for the requested name: kube-node-1-kubelet at: Tasks/kube-node-1-kubelet/TaskStatus | |
INFO 2017-12-06 17:44:43,766 [pool-7-thread-1] com.mesosphere.sdk.scheduler.recovery.DefaultRecoveryPlanManager:getRecoveryRequirements(202): Found pods needing recovery: [] | |
INFO 2017-12-06 17:44:43,766 [pool-7-thread-1] com.mesosphere.sdk.scheduler.recovery.DefaultRecoveryPlanManager:getRecoveryRequirements(211): Found recoveries already in progress: [] | |
INFO 2017-12-06 17:44:43,766 [pool-7-thread-1] com.mesosphere.sdk.scheduler.recovery.DefaultRecoveryPlanManager:getRecoveryRequirements(216): New pods needing recovery: [] | |
INFO 2017-12-06 17:44:43,766 [pool-7-thread-1] com.mesosphere.sdk.scheduler.recovery.DefaultRecoveryPlanManager:setPlan(82): Recovery plan set to: [] | |
INFO 2017-12-06 17:44:43,767 [pool-7-thread-1] com.mesosphere.sdk.scheduler.plan.DefaultPlanCoordinator:getCandidates(61): Got candidates: [], from plan: recovery | |
INFO 2017-12-06 17:44:43,767 [pool-7-thread-1] com.mesosphere.sdk.scheduler.plan.DefaultPlanCoordinator:getCandidates(72): Updated dirtied assets: [] | |
INFO 2017-12-06 17:44:43,767 [pool-7-thread-1] com.mesosphere.sdk.scheduler.plan.DefaultPlanCoordinator:getRelevantDirtyAssets(91): Input dirty assets: [] | |
INFO 2017-12-06 17:44:43,767 [pool-7-thread-1] com.mesosphere.sdk.scheduler.plan.DefaultPlanCoordinator:getRelevantDirtyAssets(92): Plan's dirty assets: [] | |
INFO 2017-12-06 17:44:43,767 [pool-7-thread-1] com.mesosphere.sdk.scheduler.plan.DefaultPlanCoordinator:getCandidates(56): Getting candidates for plan: 'decommission' with relevant dirtied assets: []. | |
INFO 2017-12-06 17:44:43,767 [pool-7-thread-1] com.mesosphere.sdk.scheduler.plan.DefaultPlanCoordinator:getCandidates(61): Got candidates: [], from plan: decommission | |
INFO 2017-12-06 17:44:43,767 [pool-7-thread-1] com.mesosphere.sdk.scheduler.plan.DefaultPlanCoordinator:getCandidates(72): Updated dirtied assets: [] | |
INFO 2017-12-06 17:44:43,767 [pool-7-thread-1] com.mesosphere.sdk.scheduler.plan.DefaultPlanCoordinator:getCandidates(49): Skipping interrupted plan: replace | |
INFO 2017-12-06 17:44:43,767 [pool-7-thread-1] com.mesosphere.sdk.scheduler.plan.DefaultPlanCoordinator:getCandidates(78): Got total candidates: [] | |
INFO 2017-12-06 17:44:43,767 [pool-7-thread-1] com.mesosphere.sdk.scheduler.AbstractScheduler:processQueuedOffers(412): InProgress Steps: [] | |
INFO 2017-12-06 17:44:43,767 [pool-7-thread-1] com.mesosphere.sdk.scheduler.ReviveManager:revive(74): Candidates, old: [], current: [], new:[] | |
INFO 2017-12-06 17:44:43,767 [pool-7-thread-1] com.mesosphere.sdk.scheduler.AbstractScheduler:processQueuedOffers(420): Processing 0 offers against 0 steps: | |
INFO 2017-12-06 17:44:43,821 [pool-7-thread-1] com.mesosphere.sdk.scheduler.DefaultScheduler:processOffers(261): 0 Offers processed. | |
INFO 2017-12-06 17:44:43,822 [pool-7-thread-1] com.mesosphere.sdk.scheduler.AbstractScheduler:processQueuedOffers(442): Processed 0 queued offers. 3 offers remain in progress: [66df60ae-ce98-4ce6-968a-f99922382ef3-O836, 66df60ae-ce98-4ce6-968a-f99922382ef3-O837, 66df60ae-ce98-4ce6-968a-f99922382ef3-O838] | |
INFO 2017-12-06 17:44:43,822 [pool-7-thread-1] com.mesosphere.sdk.scheduler.AbstractScheduler:processQueuedOffers(385): Waiting for queued offers... | |
INFO 2017-12-06 17:44:48,511 [Thread-47] com.mesosphere.mesos.HTTPAdapter.MesosToSchedulerDriverAdapter:received(181): Received event of type: UPDATE | |
INFO 2017-12-06 17:44:48,511 [Thread-47] com.mesosphere.sdk.scheduler.AbstractScheduler:statusUpdate(321): Received status update for taskId=kube-controller-manager-1-instance__2511d189-757d-417d-a34c-b25e5e4b1e11 state=TASK_RUNNING message= protobuf=task_id { value: "kube-controller-manager-1-instance__2511d189-757d-417d-a34c-b25e5e4b1e11" } state: TASK_RUNNING slave_id { value: "66df60ae-ce98-4ce6-968a-f99922382ef3-S3" } timestamp: 1.512582288462801E9 executor_id { value: "kube-controller-manager__62e74d4c-7552-443a-82ae-fb4491eb2f82" } healthy: true source: SOURCE_EXECUTOR reason: REASON_TASK_HEALTH_CHECK_STATUS_UPDATED uuid: "\301\320\377G\352\202I\224\272\002\006\004\363\274\273}" container_status { network_infos { ip_addresses { ip_address: "10.138.0.6" } } executor_pid: 5379 container_id { value: "b545281a-c68f-4433-b422-e56545dbd15e" parent { value: "ca14c9cc-5f24-4956-be43-c4ce7590b434" } } } check_status { type: COMMAND command { exit_code: 0 } } | |
INFO 2017-12-06 17:44:48,513 [Thread-47] com.mesosphere.sdk.state.StateStore:storeStatus(192): Storing status 'TASK_RUNNING' for 'kube-controller-manager-1-instance' in 'Tasks/kube-controller-manager-1-instance/TaskStatus' | |
INFO 2017-12-06 17:44:48,822 [pool-7-thread-1] com.mesosphere.sdk.scheduler.plan.DefaultPlanCoordinator:getCandidates(44): Initial dirtied assets: [] | |
INFO 2017-12-06 17:44:48,823 [pool-7-thread-1] com.mesosphere.sdk.scheduler.plan.DefaultPlanCoordinator:getRelevantDirtyAssets(91): Input dirty assets: [] | |
INFO 2017-12-06 17:44:48,823 [pool-7-thread-1] com.mesosphere.sdk.scheduler.plan.DefaultPlanCoordinator:getRelevantDirtyAssets(92): Plan's dirty assets: [] | |
INFO 2017-12-06 17:44:48,823 [pool-7-thread-1] com.mesosphere.sdk.scheduler.plan.DefaultPlanCoordinator:getCandidates(56): Getting candidates for plan: 'deploy' with relevant dirtied assets: []. | |
INFO 2017-12-06 17:44:48,823 [pool-7-thread-1] com.mesosphere.sdk.kubernetes.scheduler.plan.KubernetesUpgradeStrategy:getCandidates(49): Selected Kubernetes upgrade candidates, [] | |
INFO 2017-12-06 17:44:48,823 [pool-7-thread-1] com.mesosphere.sdk.scheduler.plan.DefaultPlanCoordinator:getCandidates(61): Got candidates: [], from plan: deploy | |
INFO 2017-12-06 17:44:48,823 [pool-7-thread-1] com.mesosphere.sdk.scheduler.plan.DefaultPlanCoordinator:getCandidates(72): Updated dirtied assets: [] | |
INFO 2017-12-06 17:44:48,824 [pool-7-thread-1] com.mesosphere.sdk.scheduler.plan.DefaultPlanCoordinator:getRelevantDirtyAssets(91): Input dirty assets: [] | |
INFO 2017-12-06 17:44:48,824 [pool-7-thread-1] com.mesosphere.sdk.scheduler.plan.DefaultPlanCoordinator:getRelevantDirtyAssets(92): Plan's dirty assets: [] | |
INFO 2017-12-06 17:44:48,824 [pool-7-thread-1] com.mesosphere.sdk.scheduler.plan.DefaultPlanCoordinator:getCandidates(56): Getting candidates for plan: 'recovery' with relevant dirtied assets: []. | |
INFO 2017-12-06 17:44:48,824 [pool-7-thread-1] com.mesosphere.sdk.scheduler.recovery.DefaultRecoveryPlanManager:updatePlan(115): Dirty assets for recovery plan consideration: [] | |
INFO 2017-12-06 17:44:48,825 [pool-7-thread-1] com.mesosphere.sdk.scheduler.recovery.DefaultRecoveryPlanManager:getRecoveryRequirements(186): Found tasks needing recovery: [] | |
WARN 2017-12-06 17:44:48,826 [pool-7-thread-1] com.mesosphere.sdk.state.StateStore:fetchStatus(349): No TaskStatus found for the requested name: etcd-0-recover at: Tasks/etcd-0-recover/TaskStatus | |
WARN 2017-12-06 17:44:48,826 [pool-7-thread-1] com.mesosphere.sdk.state.StateStore:fetchStatus(349): No TaskStatus found for the requested name: etcd-1-recover at: Tasks/etcd-1-recover/TaskStatus | |
WARN 2017-12-06 17:44:48,827 [pool-7-thread-1] com.mesosphere.sdk.state.StateStore:fetchStatus(349): No TaskStatus found for the requested name: etcd-2-recover at: Tasks/etcd-2-recover/TaskStatus | |
WARN 2017-12-06 17:44:48,827 [pool-7-thread-1] com.mesosphere.sdk.state.StateStore:fetchStatus(349): No TaskStatus found for the requested name: kube-node-0-commission at: Tasks/kube-node-0-commission/TaskStatus | |
WARN 2017-12-06 17:44:48,827 [pool-7-thread-1] com.mesosphere.sdk.state.StateStore:fetchStatus(349): No TaskStatus found for the requested name: kube-node-0-decommission at: Tasks/kube-node-0-decommission/TaskStatus | |
WARN 2017-12-06 17:44:48,827 [pool-7-thread-1] com.mesosphere.sdk.state.StateStore:fetchStatus(349): No TaskStatus found for the requested name: kube-node-1-commission at: Tasks/kube-node-1-commission/TaskStatus | |
WARN 2017-12-06 17:44:48,827 [pool-7-thread-1] com.mesosphere.sdk.state.StateStore:fetchStatus(349): No TaskStatus found for the requested name: kube-node-1-decommission at: Tasks/kube-node-1-decommission/TaskStatus | |
WARN 2017-12-06 17:44:48,827 [pool-7-thread-1] com.mesosphere.sdk.state.StateStore:fetchStatus(349): No TaskStatus found for the requested name: kube-node-1-kube-proxy at: Tasks/kube-node-1-kube-proxy/TaskStatus | |
WARN 2017-12-06 17:44:48,827 [pool-7-thread-1] com.mesosphere.sdk.state.StateStore:fetchStatus(349): No TaskStatus found for the requested name: kube-node-1-kubelet at: Tasks/kube-node-1-kubelet/TaskStatus | |
INFO 2017-12-06 17:44:48,827 [pool-7-thread-1] com.mesosphere.sdk.scheduler.recovery.DefaultRecoveryPlanManager:getRecoveryRequirements(202): Found pods needing recovery: [] | |
INFO 2017-12-06 17:44:48,827 [pool-7-thread-1] com.mesosphere.sdk.scheduler.recovery.DefaultRecoveryPlanManager:getRecoveryRequirements(211): Found recoveries already in progress: [] | |
INFO 2017-12-06 17:44:48,827 [pool-7-thread-1] com.mesosphere.sdk.scheduler.recovery.DefaultRecoveryPlanManager:getRecoveryRequirements(216): New pods needing recovery: [] | |
INFO 2017-12-06 17:44:48,828 [pool-7-thread-1] com.mesosphere.sdk.scheduler.recovery.DefaultRecoveryPlanManager:setPlan(82): Recovery plan set to: [] | |
INFO 2017-12-06 17:44:48,828 [pool-7-thread-1] com.mesosphere.sdk.scheduler.plan.DefaultPlanCoordinator:getCandidates(61): Got candidates: [], from plan: recovery | |
INFO 2017-12-06 17:44:48,828 [pool-7-thread-1] com.mesosphere.sdk.scheduler.plan.DefaultPlanCoordinator:getCandidates(72): Updated dirtied assets: [] | |
INFO 2017-12-06 17:44:48,828 [pool-7-thread-1] com.mesosphere.sdk.scheduler.plan.DefaultPlanCoordinator:getRelevantDirtyAssets(91): Input dirty assets: [] | |
INFO 2017-12-06 17:44:48,828 [pool-7-thread-1] com.mesosphere.sdk.scheduler.plan.DefaultPlanCoordinator:getRelevantDirtyAssets(92): Plan's dirty assets: [] | |
INFO 2017-12-06 17:44:48,828 [pool-7-thread-1] com.mesosphere.sdk.scheduler.plan.DefaultPlanCoordinator:getCandidates(56): Getting candidates for plan: 'decommission' with relevant dirtied assets: []. | |
INFO 2017-12-06 17:44:48,828 [pool-7-thread-1] com.mesosphere.sdk.scheduler.plan.DefaultPlanCoordinator:getCandidates(61): Got candidates: [], from plan: decommission | |
INFO 2017-12-06 17:44:48,828 [pool-7-thread-1] com.mesosphere.sdk.scheduler.plan.DefaultPlanCoordinator:getCandidates(72): Updated dirtied assets: [] | |
INFO 2017-12-06 17:44:48,828 [pool-7-thread-1] com.mesosphere.sdk.scheduler.plan.DefaultPlanCoordinator:getCandidates(49): Skipping interrupted plan: replace | |
INFO 2017-12-06 17:44:48,828 [pool-7-thread-1] com.mesosphere.sdk.scheduler.plan.DefaultPlanCoordinator:getCandidates(78): Got total candidates: [] | |
INFO 2017-12-06 17:44:48,828 [pool-7-thread-1] com.mesosphere.sdk.scheduler.AbstractScheduler:processQueuedOffers(412): InProgress Steps: [] | |
INFO 2017-12-06 17:44:48,828 [pool-7-thread-1] com.mesosphere.sdk.scheduler.ReviveManager:revive(74): Candidates, old: [], current: [], new:[] | |
INFO 2017-12-06 17:44:48,828 [pool-7-thread-1] com.mesosphere.sdk.scheduler.AbstractScheduler:processQueuedOffers(420): Processing 0 offers against 0 steps: | |
INFO 2017-12-06 17:44:48,830 [pool-7-thread-1] com.mesosphere.sdk.scheduler.DefaultScheduler:processOffers(261): 0 Offers processed. | |
INFO 2017-12-06 17:44:48,830 [pool-7-thread-1] com.mesosphere.sdk.scheduler.AbstractScheduler:processQueuedOffers(442): Processed 0 queued offers. 3 offers remain in progress: [66df60ae-ce98-4ce6-968a-f99922382ef3-O836, 66df60ae-ce98-4ce6-968a-f99922382ef3-O837, 66df60ae-ce98-4ce6-968a-f99922382ef3-O838] | |
INFO 2017-12-06 17:44:48,920 [pool-7-thread-1] com.mesosphere.sdk.scheduler.AbstractScheduler:processQueuedOffers(385): Waiting for queued offers... | |
INFO 2017-12-06 17:44:53,921 [pool-7-thread-1] com.mesosphere.sdk.scheduler.plan.DefaultPlanCoordinator:getCandidates(44): Initial dirtied assets: [] | |
INFO 2017-12-06 17:44:53,921 [pool-7-thread-1] com.mesosphere.sdk.scheduler.plan.DefaultPlanCoordinator:getRelevantDirtyAssets(91): Input dirty assets: [] | |
INFO 2017-12-06 17:44:53,921 [pool-7-thread-1] com.mesosphere.sdk.scheduler.plan.DefaultPlanCoordinator:getRelevantDirtyAssets(92): Plan's dirty assets: [] | |
INFO 2017-12-06 17:44:53,921 [pool-7-thread-1] com.mesosphere.sdk.scheduler.plan.DefaultPlanCoordinator:getCandidates(56): Getting candidates for plan: 'deploy' with relevant dirtied assets: []. | |
INFO 2017-12-06 17:44:53,921 [pool-7-thread-1] com.mesosphere.sdk.kubernetes.scheduler.plan.KubernetesUpgradeStrategy:getCandidates(49): Selected Kubernetes upgrade candidates, [] | |
INFO 2017-12-06 17:44:53,921 [pool-7-thread-1] com.mesosphere.sdk.scheduler.plan.DefaultPlanCoordinator:getCandidates(61): Got candidates: [], from plan: deploy | |
INFO 2017-12-06 17:44:53,921 [pool-7-thread-1] com.mesosphere.sdk.scheduler.plan.DefaultPlanCoordinator:getCandidates(72): Updated dirtied assets: [] | |
INFO 2017-12-06 17:44:53,921 [pool-7-thread-1] com.mesosphere.sdk.scheduler.plan.DefaultPlanCoordinator:getRelevantDirtyAssets(91): Input dirty assets: [] | |
INFO 2017-12-06 17:44:53,922 [pool-7-thread-1] com.mesosphere.sdk.scheduler.plan.DefaultPlanCoordinator:getRelevantDirtyAssets(92): Plan's dirty assets: [] | |
INFO 2017-12-06 17:44:53,922 [pool-7-thread-1] com.mesosphere.sdk.scheduler.plan.DefaultPlanCoordinator:getCandidates(56): Getting candidates for plan: 'recovery' with relevant dirtied assets: []. | |
INFO 2017-12-06 17:44:53,922 [pool-7-thread-1] com.mesosphere.sdk.scheduler.recovery.DefaultRecoveryPlanManager:updatePlan(115): Dirty assets for recovery plan consideration: [] | |
INFO 2017-12-06 17:44:53,923 [pool-7-thread-1] com.mesosphere.sdk.scheduler.recovery.DefaultRecoveryPlanManager:getRecoveryRequirements(186): Found tasks needing recovery: [] | |
WARN 2017-12-06 17:44:53,924 [pool-7-thread-1] com.mesosphere.sdk.state.StateStore:fetchStatus(349): No TaskStatus found for the requested name: etcd-0-recover at: Tasks/etcd-0-recover/TaskStatus | |
WARN 2017-12-06 17:44:53,924 [pool-7-thread-1] com.mesosphere.sdk.state.StateStore:fetchStatus(349): No TaskStatus found for the requested name: etcd-1-recover at: Tasks/etcd-1-recover/TaskStatus | |
WARN 2017-12-06 17:44:53,924 [pool-7-thread-1] com.mesosphere.sdk.state.StateStore:fetchStatus(349): No TaskStatus found for the requested name: etcd-2-recover at: Tasks/etcd-2-recover/TaskStatus | |
WARN 2017-12-06 17:44:53,924 [pool-7-thread-1] com.mesosphere.sdk.state.StateStore:fetchStatus(349): No TaskStatus found for the requested name: kube-node-0-commission at: Tasks/kube-node-0-commission/TaskStatus | |
WARN 2017-12-06 17:44:53,925 [pool-7-thread-1] com.mesosphere.sdk.state.StateStore:fetchStatus(349): No TaskStatus found for the requested name: kube-node-0-decommission at: Tasks/kube-node-0-decommission/TaskStatus | |
WARN 2017-12-06 17:44:53,925 [pool-7-thread-1] com.mesosphere.sdk.state.StateStore:fetchStatus(349): No TaskStatus found for the requested name: kube-node-1-commission at: Tasks/kube-node-1-commission/TaskStatus | |
WARN 2017-12-06 17:44:53,925 [pool-7-thread-1] com.mesosphere.sdk.state.StateStore:fetchStatus(349): No TaskStatus found for the requested name: kube-node-1-decommission at: Tasks/kube-node-1-decommission/TaskStatus | |
WARN 2017-12-06 17:44:53,925 [pool-7-thread-1] com.mesosphere.sdk.state.StateStore:fetchStatus(349): No TaskStatus found for the requested name: kube-node-1-kube-proxy at: Tasks/kube-node-1-kube-proxy/TaskStatus | |
WARN 2017-12-06 17:44:53,925 [pool-7-thread-1] com.mesosphere.sdk.state.StateStore:fetchStatus(349): No TaskStatus found for the requested name: kube-node-1-kubelet at: Tasks/kube-node-1-kubelet/TaskStatus | |
INFO 2017-12-06 17:44:53,925 [pool-7-thread-1] com.mesosphere.sdk.scheduler.recovery.DefaultRecoveryPlanManager:getRecoveryRequirements(202): Found pods needing recovery: [] | |
INFO 2017-12-06 17:44:53,925 [pool-7-thread-1] com.mesosphere.sdk.scheduler.recovery.DefaultRecoveryPlanManager:getRecoveryRequirements(211): Found recoveries already in progress: [] | |
INFO 2017-12-06 17:44:53,925 [pool-7-thread-1] com.mesosphere.sdk.scheduler.recovery.DefaultRecoveryPlanManager:getRecoveryRequirements(216): New pods needing recovery: [] | |
INFO 2017-12-06 17:44:53,925 [pool-7-thread-1] com.mesosphere.sdk.scheduler.recovery.DefaultRecoveryPlanManager:setPlan(82): Recovery plan set to: [] | |
INFO 2017-12-06 17:44:53,925 [pool-7-thread-1] com.mesosphere.sdk.scheduler.plan.DefaultPlanCoordinator:getCandidates(61): Got candidates: [], from plan: recovery | |
INFO 2017-12-06 17:44:53,926 [pool-7-thread-1] com.mesosphere.sdk.scheduler.plan.DefaultPlanCoordinator:getCandidates(72): Updated dirtied assets: [] | |
INFO 2017-12-06 17:44:53,926 [pool-7-thread-1] com.mesosphere.sdk.scheduler.plan.DefaultPlanCoordinator:getRelevantDirtyAssets(91): Input dirty assets: [] | |
INFO 2017-12-06 17:44:53,926 [pool-7-thread-1] com.mesosphere.sdk.scheduler.plan.DefaultPlanCoordinator:getRelevantDirtyAssets(92): Plan's dirty assets: [] | |
INFO 2017-12-06 17:44:53,926 [pool-7-thread-1] com.mesosphere.sdk.scheduler.plan.DefaultPlanCoordinator:getCandidates(56): Getting candidates for plan: 'decommission' with relevant dirtied assets: []. | |
INFO 2017-12-06 17:44:53,926 [pool-7-thread-1] com.mesosphere.sdk.scheduler.plan.DefaultPlanCoordinator:getCandidates(61): Got candidates: [], from plan: decommission | |
INFO 2017-12-06 17:44:53,926 [pool-7-thread-1] com.mesosphere.sdk.scheduler.plan.DefaultPlanCoordinator:getCandidates(72): Updated dirtied assets: [] | |
INFO 2017-12-06 17:44:53,926 [pool-7-thread-1] com.mesosphere.sdk.scheduler.plan.DefaultPlanCoordinator:getCandidates(49): Skipping interrupted plan: replace | |
INFO 2017-12-06 17:44:53,926 [pool-7-thread-1] com.mesosphere.sdk.scheduler.plan.DefaultPlanCoordinator:getCandidates(78): Got total candidates: [] | |
INFO 2017-12-06 17:44:53,926 [pool-7-thread-1] com.mesosphere.sdk.scheduler.AbstractScheduler:processQueuedOffers(412): InProgress Steps: [] | |
INFO 2017-12-06 17:44:53,926 [pool-7-thread-1] com.mesosphere.sdk.scheduler.ReviveManager:revive(74): Candidates, old: [], current: [], new:[] | |
INFO 2017-12-06 17:44:53,926 [pool-7-thread-1] com.mesosphere.sdk.scheduler.AbstractScheduler:processQueuedOffers(420): Processing 0 offers against 0 steps: | |
INFO 2017-12-06 17:44:53,927 [pool-7-thread-1] com.mesosphere.sdk.scheduler.DefaultScheduler:processOffers(261): 0 Offers processed. | |
INFO 2017-12-06 17:44:53,928 [pool-7-thread-1] com.mesosphere.sdk.scheduler.AbstractScheduler:processQueuedOffers(442): Processed 0 queued offers. 3 offers remain in progress: [66df60ae-ce98-4ce6-968a-f99922382ef3-O836, 66df60ae-ce98-4ce6-968a-f99922382ef3-O837, 66df60ae-ce98-4ce6-968a-f99922382ef3-O838] | |
INFO 2017-12-06 17:44:53,928 [pool-7-thread-1] com.mesosphere.sdk.scheduler.AbstractScheduler:processQueuedOffers(385): Waiting for queued offers... | |
INFO 2017-12-06 17:44:57,025 [Thread-48] com.mesosphere.mesos.HTTPAdapter.MesosToSchedulerDriverAdapter:received(181): Received event of type: HEARTBEAT | |
INFO 2017-12-06 17:44:58,928 [pool-7-thread-1] com.mesosphere.sdk.scheduler.plan.DefaultPlanCoordinator:getCandidates(44): Initial dirtied assets: [] | |
INFO 2017-12-06 17:44:58,928 [pool-7-thread-1] com.mesosphere.sdk.scheduler.plan.DefaultPlanCoordinator:getRelevantDirtyAssets(91): Input dirty assets: [] | |
INFO 2017-12-06 17:44:58,928 [pool-7-thread-1] com.mesosphere.sdk.scheduler.plan.DefaultPlanCoordinator:getRelevantDirtyAssets(92): Plan's dirty assets: [] | |
INFO 2017-12-06 17:44:58,928 [pool-7-thread-1] com.mesosphere.sdk.scheduler.plan.DefaultPlanCoordinator:getCandidates(56): Getting candidates for plan: 'deploy' with relevant dirtied assets: []. | |
INFO 2017-12-06 17:44:58,929 [pool-7-thread-1] com.mesosphere.sdk.kubernetes.scheduler.plan.KubernetesUpgradeStrategy:getCandidates(49): Selected Kubernetes upgrade candidates, [] | |
INFO 2017-12-06 17:44:58,929 [pool-7-thread-1] com.mesosphere.sdk.scheduler.plan.DefaultPlanCoordinator:getCandidates(61): Got candidates: [], from plan: deploy | |
INFO 2017-12-06 17:44:58,929 [pool-7-thread-1] com.mesosphere.sdk.scheduler.plan.DefaultPlanCoordinator:getCandidates(72): Updated dirtied assets: [] | |
INFO 2017-12-06 17:44:58,929 [pool-7-thread-1] com.mesosphere.sdk.scheduler.plan.DefaultPlanCoordinator:getRelevantDirtyAssets(91): Input dirty assets: [] | |
INFO 2017-12-06 17:44:58,929 [pool-7-thread-1] com.mesosphere.sdk.scheduler.plan.DefaultPlanCoordinator:getRelevantDirtyAssets(92): Plan's dirty assets: [] | |
INFO 2017-12-06 17:44:58,929 [pool-7-thread-1] com.mesosphere.sdk.scheduler.plan.DefaultPlanCoordinator:getCandidates(56): Getting candidates for plan: 'recovery' with relevant dirtied assets: []. | |
INFO 2017-12-06 17:44:58,929 [pool-7-thread-1] com.mesosphere.sdk.scheduler.recovery.DefaultRecoveryPlanManager:updatePlan(115): Dirty assets for recovery plan consideration: [] | |
INFO 2017-12-06 17:44:58,931 [pool-7-thread-1] com.mesosphere.sdk.scheduler.recovery.DefaultRecoveryPlanManager:getRecoveryRequirements(186): Found tasks needing recovery: [] | |
WARN 2017-12-06 17:44:58,932 [pool-7-thread-1] com.mesosphere.sdk.state.StateStore:fetchStatus(349): No TaskStatus found for the requested name: etcd-0-recover at: Tasks/etcd-0-recover/TaskStatus | |
WARN 2017-12-06 17:44:58,932 [pool-7-thread-1] com.mesosphere.sdk.state.StateStore:fetchStatus(349): No TaskStatus found for the requested name: etcd-1-recover at: Tasks/etcd-1-recover/TaskStatus | |
WARN 2017-12-06 17:44:59,020 [pool-7-thread-1] com.mesosphere.sdk.state.StateStore:fetchStatus(349): No TaskStatus found for the requested name: etcd-2-recover at: Tasks/etcd-2-recover/TaskStatus | |
WARN 2017-12-06 17:44:59,020 [pool-7-thread-1] com.mesosphere.sdk.state.StateStore:fetchStatus(349): No TaskStatus found for the requested name: kube-node-0-commission at: Tasks/kube-node-0-commission/TaskStatus | |
WARN 2017-12-06 17:44:59,021 [pool-7-thread-1] com.mesosphere.sdk.state.StateStore:fetchStatus(349): No TaskStatus found for the requested name: kube-node-0-decommission at: Tasks/kube-node-0-decommission/TaskStatus | |
WARN 2017-12-06 17:44:59,021 [pool-7-thread-1] com.mesosphere.sdk.state.StateStore:fetchStatus(349): No TaskStatus found for the requested name: kube-node-1-commission at: Tasks/kube-node-1-commission/TaskStatus | |
WARN 2017-12-06 17:44:59,021 [pool-7-thread-1] com.mesosphere.sdk.state.StateStore:fetchStatus(349): No TaskStatus found for the requested name: kube-node-1-decommission at: Tasks/kube-node-1-decommission/TaskStatus | |
WARN 2017-12-06 17:44:59,021 [pool-7-thread-1] com.mesosphere.sdk.state.StateStore:fetchStatus(349): No TaskStatus found for the requested name: kube-node-1-kube-proxy at: Tasks/kube-node-1-kube-proxy/TaskStatus | |
WARN 2017-12-06 17:44:59,021 [pool-7-thread-1] com.mesosphere.sdk.state.StateStore:fetchStatus(349): No TaskStatus found for the requested name: kube-node-1-kubelet at: Tasks/kube-node-1-kubelet/TaskStatus | |
INFO 2017-12-06 17:44:59,021 [pool-7-thread-1] com.mesosphere.sdk.scheduler.recovery.DefaultRecoveryPlanManager:getRecoveryRequirements(202): Found pods needing recovery: [] | |
INFO 2017-12-06 17:44:59,021 [pool-7-thread-1] com.mesosphere.sdk.scheduler.recovery.DefaultRecoveryPlanManager:getRecoveryRequirements(211): Found recoveries already in progress: [] | |
INFO 2017-12-06 17:44:59,021 [pool-7-thread-1] com.mesosphere.sdk.scheduler.recovery.DefaultRecoveryPlanManager:getRecoveryRequirements(216): New pods needing recovery: [] | |
INFO 2017-12-06 17:44:59,021 [pool-7-thread-1] com.mesosphere.sdk.scheduler.recovery.DefaultRecoveryPlanManager:setPlan(82): Recovery plan set to: [] | |
INFO 2017-12-06 17:44:59,022 [pool-7-thread-1] com.mesosphere.sdk.scheduler.plan.DefaultPlanCoordinator:getCandidates(61): Got candidates: [], from plan: recovery | |
INFO 2017-12-06 17:44:59,022 [pool-7-thread-1] com.mesosphere.sdk.scheduler.plan.DefaultPlanCoordinator:getCandidates(72): Updated dirtied assets: [] | |
INFO 2017-12-06 17:44:59,022 [pool-7-thread-1] com.mesosphere.sdk.scheduler.plan.DefaultPlanCoordinator:getRelevantDirtyAssets(91): Input dirty assets: [] | |
INFO 2017-12-06 17:44:59,022 [pool-7-thread-1] com.mesosphere.sdk.scheduler.plan.DefaultPlanCoordinator:getRelevantDirtyAssets(92): Plan's dirty assets: [] | |
INFO 2017-12-06 17:44:59,022 [pool-7-thread-1] com.mesosphere.sdk.scheduler.plan.DefaultPlanCoordinator:getCandidates(56): Getting candidates for plan: 'decommission' with relevant dirtied assets: []. | |
INFO 2017-12-06 17:44:59,022 [pool-7-thread-1] com.mesosphere.sdk.scheduler.plan.DefaultPlanCoordinator:getCandidates(61): Got candidates: [], from plan: decommission | |
INFO 2017-12-06 17:44:59,022 [pool-7-thread-1] com.mesosphere.sdk.scheduler.plan.DefaultPlanCoordinator:getCandidates(72): Updated dirtied assets: [] | |
INFO 2017-12-06 17:44:59,022 [pool-7-thread-1] com.mesosphere.sdk.scheduler.plan.DefaultPlanCoordinator:getCandidates(49): Skipping interrupted plan: replace | |
INFO 2017-12-06 17:44:59,022 [pool-7-thread-1] com.mesosphere.sdk.scheduler.plan.DefaultPlanCoordinator:getCandidates(78): Got total candidates: [] | |
INFO 2017-12-06 17:44:59,022 [pool-7-thread-1] com.mesosphere.sdk.scheduler.AbstractScheduler:processQueuedOffers(412): InProgress Steps: [] | |
INFO 2017-12-06 17:44:59,022 [pool-7-thread-1] com.mesosphere.sdk.scheduler.ReviveManager:revive(74): Candidates, old: [], current: [], new:[] | |
INFO 2017-12-06 17:44:59,022 [pool-7-thread-1] com.mesosphere.sdk.scheduler.AbstractScheduler:processQueuedOffers(420): Processing 0 offers against 0 steps: | |
INFO 2017-12-06 17:44:59,024 [pool-7-thread-1] com.mesosphere.sdk.scheduler.DefaultScheduler:processOffers(261): 0 Offers processed. | |
INFO 2017-12-06 17:44:59,024 [pool-7-thread-1] com.mesosphere.sdk.scheduler.AbstractScheduler:processQueuedOffers(442): Processed 0 queued offers. 3 offers remain in progress: [66df60ae-ce98-4ce6-968a-f99922382ef3-O836, 66df60ae-ce98-4ce6-968a-f99922382ef3-O837, 66df60ae-ce98-4ce6-968a-f99922382ef3-O838] | |
INFO 2017-12-06 17:44:59,024 [pool-7-thread-1] com.mesosphere.sdk.scheduler.AbstractScheduler:processQueuedOffers(385): Waiting for queued offers... |
Sign up for free
to join this conversation on GitHub.
Already have an account?
Sign in to comment