Created
March 6, 2019 15:51
-
-
Save DTrierweiler/f2eecb5568fdf899695cb6f644318ffb to your computer and use it in GitHub Desktop.
This file contains bidirectional Unicode text that may be interpreted or compiled differently than what appears below. To review, open the file in an editor that reveals hidden Unicode characters.
Learn more about bidirectional Unicode characters
2019-03-06T15:43:23.175405363Z level=info msg=" --access-log=''" subsys=daemon | |
2019-03-06T15:43:23.175474163Z level=info msg=" --agent-labels=''" subsys=daemon | |
2019-03-06T15:43:23.17548224Z level=info msg=" --allow-localhost='auto'" subsys=daemon | |
2019-03-06T15:43:23.175487885Z level=info msg=" --auto-direct-node-routes='false'" subsys=daemon | |
2019-03-06T15:43:23.175494628Z level=info msg=" --auto-ipv6-node-routes='false'" subsys=daemon | |
2019-03-06T15:43:23.175499877Z level=info msg=" --bpf-compile-debug='false'" subsys=daemon | |
2019-03-06T15:43:23.175603938Z level=info msg=" --bpf-ct-global-any-max='262144'" subsys=daemon | |
2019-03-06T15:43:23.175622296Z level=info msg=" --bpf-ct-global-tcp-max='1000000'" subsys=daemon | |
2019-03-06T15:43:23.175694982Z level=info msg=" --bpf-root=''" subsys=daemon | |
2019-03-06T15:43:23.175717767Z level=info msg=" --cgroup-root=''" subsys=daemon | |
2019-03-06T15:43:23.175723621Z level=info msg=" --cluster-id='0'" subsys=daemon | |
2019-03-06T15:43:23.175728802Z level=info msg=" --cluster-name='default'" subsys=daemon | |
2019-03-06T15:43:23.175733933Z level=info msg=" --clustermesh-config='/var/lib/cilium/clustermesh/'" subsys=daemon | |
2019-03-06T15:43:23.175854858Z level=info msg=" --cmdref=''" subsys=daemon | |
2019-03-06T15:43:23.175868468Z level=info msg=" --config=''" subsys=daemon | |
2019-03-06T15:43:23.175953399Z level=info msg=" --conntrack-garbage-collector-interval='60'" subsys=daemon | |
2019-03-06T15:43:23.175965436Z level=info msg=" --container-runtime=''" subsys=daemon | |
2019-03-06T15:43:23.175970947Z level=info msg=" --container-runtime-endpoint='map[]'" subsys=daemon | |
2019-03-06T15:43:23.175976252Z level=info msg=" --datapath-mode='veth'" subsys=daemon | |
2019-03-06T15:43:23.175981863Z level=info msg=" --debug='false'" subsys=daemon | |
2019-03-06T15:43:23.175987027Z level=info msg=" --debug-verbose=''" subsys=daemon | |
2019-03-06T15:43:23.176096038Z level=info msg=" --device='undefined'" subsys=daemon | |
2019-03-06T15:43:23.176107146Z level=info msg=" --disable-conntrack='false'" subsys=daemon | |
2019-03-06T15:43:23.17617028Z level=info msg=" --disable-endpoint-crd='false'" subsys=daemon | |
2019-03-06T15:43:23.176182078Z level=info msg=" --disable-envoy-version-check='false'" subsys=daemon | |
2019-03-06T15:43:23.176187481Z level=info msg=" --disable-ipv4='false'" subsys=daemon | |
2019-03-06T15:43:23.176194411Z level=info msg=" --disable-k8s-services='false'" subsys=daemon | |
2019-03-06T15:43:23.176284846Z level=info msg=" --docker='unix:///var/run/docker.sock'" subsys=daemon | |
2019-03-06T15:43:23.176295618Z level=info msg=" --enable-ipsec='false'" subsys=daemon | |
2019-03-06T15:43:23.176300977Z level=info msg=" --enable-ipv4='true'" subsys=daemon | |
2019-03-06T15:43:23.176362355Z level=info msg=" --enable-ipv6='false'" subsys=daemon | |
2019-03-06T15:43:23.176373205Z level=info msg=" --enable-policy='default'" subsys=daemon | |
2019-03-06T15:43:23.176401739Z level=info msg=" --enable-tracing='false'" subsys=daemon | |
2019-03-06T15:43:23.176487261Z level=info msg=" --envoy-log=''" subsys=daemon | |
2019-03-06T15:43:23.176540458Z level=info msg=" --fixed-identity-mapping='map[]'" subsys=daemon | |
2019-03-06T15:43:23.176549698Z level=info msg=" --flannel-master-device=''" subsys=daemon | |
2019-03-06T15:43:23.176604581Z level=info msg=" --flannel-uninstall-on-exit='false'" subsys=daemon | |
2019-03-06T15:43:23.176616332Z level=info msg=" --http-403-msg=''" subsys=daemon | |
2019-03-06T15:43:23.17662156Z level=info msg=" --http-idle-timeout='0'" subsys=daemon | |
2019-03-06T15:43:23.176716906Z level=info msg=" --http-max-grpc-timeout='0'" subsys=daemon | |
2019-03-06T15:43:23.176727341Z level=info msg=" --http-request-timeout='3600'" subsys=daemon | |
2019-03-06T15:43:23.176732719Z level=info msg=" --http-retry-count='3'" subsys=daemon | |
2019-03-06T15:43:23.176738172Z level=info msg=" --http-retry-timeout='0'" subsys=daemon | |
2019-03-06T15:43:23.176842879Z level=info msg=" --install-iptables-rules='true'" subsys=daemon | |
2019-03-06T15:43:23.176855935Z level=info msg=" --ipv4-cluster-cidr-mask-size='8'" subsys=daemon | |
2019-03-06T15:43:23.176861644Z level=info msg=" --ipv4-node='auto'" subsys=daemon | |
2019-03-06T15:43:23.176866841Z level=info msg=" --ipv4-range='auto'" subsys=daemon | |
2019-03-06T15:43:23.176961767Z level=info msg=" --ipv4-service-range='auto'" subsys=daemon | |
2019-03-06T15:43:23.176982404Z level=info msg=" --ipv6-cluster-alloc-cidr='f00d::/64'" subsys=daemon | |
2019-03-06T15:43:23.177077057Z level=info msg=" --ipv6-node='auto'" subsys=daemon | |
2019-03-06T15:43:23.177088595Z level=info msg=" --ipv6-range='auto'" subsys=daemon | |
2019-03-06T15:43:23.177093517Z level=info msg=" --ipv6-service-range='auto'" subsys=daemon | |
2019-03-06T15:43:23.177176873Z level=info msg=" --ipvlan-master-device='undefined'" subsys=daemon | |
2019-03-06T15:43:23.177188072Z level=info msg=" --k8s-api-server=''" subsys=daemon | |
2019-03-06T15:43:23.177193488Z level=info msg=" --k8s-kubeconfig-path=''" subsys=daemon | |
2019-03-06T15:43:23.177198774Z level=info msg=" --k8s-legacy-host-allows-world='false'" subsys=daemon | |
2019-03-06T15:43:23.177204158Z level=info msg=" --k8s-namespace='kube-system'" subsys=daemon | |
2019-03-06T15:43:23.177210158Z level=info msg=" --k8s-require-ipv4-pod-cidr='false'" subsys=daemon | |
2019-03-06T15:43:23.17721534Z level=info msg=" --k8s-require-ipv6-pod-cidr='false'" subsys=daemon | |
2019-03-06T15:43:23.177314471Z level=info msg=" --keep-bpf-templates='false'" subsys=daemon | |
2019-03-06T15:43:23.177325255Z level=info msg=" --keep-config='false'" subsys=daemon | |
2019-03-06T15:43:23.177406594Z level=info msg=" --kvstore='etcd'" subsys=daemon | |
2019-03-06T15:43:23.177418454Z level=info msg=" --kvstore-opt='map[etcd.config:/var/lib/etcd-config/etcd.config]'" subsys=daemon | |
2019-03-06T15:43:23.177432879Z level=info msg=" --label-prefix-file=''" subsys=daemon | |
2019-03-06T15:43:23.177438671Z level=info msg=" --labels=''" subsys=daemon | |
2019-03-06T15:43:23.177456424Z level=info msg=" --lb=''" subsys=daemon | |
2019-03-06T15:43:23.177461576Z level=info msg=" --lib-dir='/var/lib/cilium'" subsys=daemon | |
2019-03-06T15:43:23.177937493Z level=info msg=" --log-driver=''" subsys=daemon | |
2019-03-06T15:43:23.177952213Z level=info msg=" --log-opt='map[]'" subsys=daemon | |
2019-03-06T15:43:23.177957839Z level=info msg=" --log-system-load='false'" subsys=daemon | |
2019-03-06T15:43:23.17796304Z level=info msg=" --masquerade='true'" subsys=daemon | |
2019-03-06T15:43:23.177968323Z level=info msg=" --max-controller-interval='0'" subsys=daemon | |
2019-03-06T15:43:23.177973528Z level=info msg=" --monitor-aggregation='none'" subsys=daemon | |
2019-03-06T15:43:23.177978648Z level=info msg=" --monitor-queue-size='32768'" subsys=daemon | |
2019-03-06T15:43:23.177983878Z level=info msg=" --mtu='0'" subsys=daemon | |
2019-03-06T15:43:23.177989028Z level=info msg=" --nat46-range='0:0:0:0:0:FFFF::/96'" subsys=daemon | |
2019-03-06T15:43:23.177994274Z level=info msg=" --pprof='false'" subsys=daemon | |
2019-03-06T15:43:23.177999317Z level=info msg=" --preallocate-bpf-maps='false'" subsys=daemon | |
2019-03-06T15:43:23.178004482Z level=info msg=" --prefilter-device='undefined'" subsys=daemon | |
2019-03-06T15:43:23.178009566Z level=info msg=" --prefilter-mode='native'" subsys=daemon | |
2019-03-06T15:43:23.178014748Z level=info msg=" --prepend-iptables-chains='true'" subsys=daemon | |
2019-03-06T15:43:23.178019938Z level=info msg=" --prometheus-serve-addr=''" subsys=daemon | |
2019-03-06T15:43:23.178025088Z level=info msg=" --proxy-connect-timeout='1'" subsys=daemon | |
2019-03-06T15:43:23.17803009Z level=info msg=" --restore='true'" subsys=daemon | |
2019-03-06T15:43:23.178035179Z level=info msg=" --sidecar-http-proxy='false'" subsys=daemon | |
2019-03-06T15:43:23.178040269Z level=info msg=" --sidecar-istio-proxy-image='cilium/istio_proxy'" subsys=daemon | |
2019-03-06T15:43:23.178045571Z level=info msg=" --single-cluster-route='false'" subsys=daemon | |
2019-03-06T15:43:23.178050835Z level=info msg=" --socket-path='/var/run/cilium/cilium.sock'" subsys=daemon | |
2019-03-06T15:43:23.178056389Z level=info msg=" --sockops-enable='false'" subsys=daemon | |
2019-03-06T15:43:23.178061536Z level=info msg=" --state-dir='/var/run/cilium'" subsys=daemon | |
2019-03-06T15:43:23.178066629Z level=info msg=" --tofqdns-dns-reject-response-code='refused'" subsys=daemon | |
2019-03-06T15:43:23.178071949Z level=info msg=" --tofqdns-enable-poller='false'" subsys=daemon | |
2019-03-06T15:43:23.178077219Z level=info msg=" --tofqdns-enable-poller-events='true'" subsys=daemon | |
2019-03-06T15:43:23.178082326Z level=info msg=" --tofqdns-min-ttl='0'" subsys=daemon | |
2019-03-06T15:43:23.178098349Z level=info msg=" --tofqdns-proxy-port='0'" subsys=daemon | |
2019-03-06T15:43:23.17810392Z level=info msg=" --trace-payloadlen='128'" subsys=daemon | |
2019-03-06T15:43:23.178109093Z level=info msg=" --tunnel='vxlan'" subsys=daemon | |
2019-03-06T15:43:23.178114195Z level=info msg=" --version='false'" subsys=daemon | |
2019-03-06T15:43:23.178119254Z level=info msg=" _ _ _" subsys=daemon | |
2019-03-06T15:43:23.178124323Z level=info msg=" ___|_| |_|_ _ _____" subsys=daemon | |
2019-03-06T15:43:23.178129414Z level=info msg="| _| | | | | | |" subsys=daemon | |
2019-03-06T15:43:23.17813536Z level=info msg="|___|_|_|_|___|_|_|_|" subsys=daemon | |
2019-03-06T15:43:23.178150469Z level=info msg="Cilium 1.4.0-rc5 e5f5c6f 2019-01-25T17:29:27-08:00 go version go1.11.1 linux/amd64" subsys=daemon | |
2019-03-06T15:43:23.204986546Z level=info msg="cilium-envoy version: 84ee839e1d78ef858a39e390288ad417d35bb1d4/1.9.0-dev/Modified/RELEASE" subsys=daemon | |
2019-03-06T15:43:23.275847747Z level=info msg="clang (3.8.1) and kernel (4.19.0) versions: OK!" subsys=daemon | |
2019-03-06T15:43:23.280112838Z level=info msg="linking environment: OK!" subsys=daemon | |
2019-03-06T15:43:24.440041798Z level=info msg="bpf_requirements check: OK!" subsys=daemon | |
2019-03-06T15:43:24.44109134Z level=warning msg="BPF filesystem is going to be mounted automatically in /run/cilium/bpffs. However, it probably means that Cilium is running inside container and BPFFS is not mounted on the host. for more information, see: https://cilium.link/err-bpf-mount" subsys=bpf | |
2019-03-06T15:43:24.441936912Z level=info msg="Mounted BPF filesystem /run/cilium/bpffs" subsys=bpf | |
2019-03-06T15:43:24.443548097Z level=info msg="Connecting to etcd server..." config=/var/lib/etcd-config/etcd.config endpoints="[https://03cc9904-4704-44b7-970a-657501fe2f87.k8s.ondigitalocean.com:2379]" subsys=kvstore | |
2019-03-06T15:43:24.446294837Z level=info msg="Valid label prefix configuration:" subsys=labels-filter | |
2019-03-06T15:43:24.448019094Z level=info msg=" - :io.kubernetes.pod.namespace" subsys=labels-filter | |
2019-03-06T15:43:24.448144168Z level=info msg=" - :io.cilium.k8s.namespace.labels" subsys=labels-filter | |
2019-03-06T15:43:24.44824113Z level=info msg=" - !:io.kubernetes" subsys=labels-filter | |
2019-03-06T15:43:24.448346879Z level=info msg=" - !:.*kubernetes.io" subsys=labels-filter | |
2019-03-06T15:43:24.448433214Z level=info msg=" - !:pod-template-generation" subsys=labels-filter | |
2019-03-06T15:43:24.448527721Z level=info msg=" - !:pod-template-hash" subsys=labels-filter | |
2019-03-06T15:43:24.448600627Z level=info msg=" - !:controller-revision-hash" subsys=labels-filter | |
2019-03-06T15:43:24.448701384Z level=info msg=" - !:annotation.cilium.io/" subsys=labels-filter | |
2019-03-06T15:43:24.448778561Z level=info msg=" - !:annotation.cilium-identity" subsys=labels-filter | |
2019-03-06T15:43:24.448895869Z level=info msg=" - !:annotation.sidecar.istio.io" subsys=labels-filter | |
2019-03-06T15:43:24.448923318Z level=info msg=" - !:annotation.etcd.version" subsys=labels-filter | |
2019-03-06T15:43:24.448979546Z level=info msg=" - !:etcd_node" subsys=labels-filter | |
2019-03-06T15:43:24.449152464Z level=info msg="Container runtime options set: endpoint=/var/run/containerd/containerd.sock,endpoint=/var/run/crio.sock,datapath-mode=veth,endpoint=unix:///var/run/docker.sock" subsys=daemon | |
2019-03-06T15:43:24.449165251Z level=info msg="Initializing daemon" subsys=daemon | |
2019-03-06T15:43:24.449409199Z level=info msg="Detected MTU 1500" subsys=mtu | |
2019-03-06T15:43:24.457436969Z level=error msg="Attempt to upsert invalid IP into ipcache layer" identity="{host agent-local}" ipAddr="<nil>" subsys=ipcache | |
2019-03-06T15:43:24.457487085Z level=error msg="Attempt to upsert invalid IP into ipcache layer" identity="{host agent-local}" ipAddr="<nil>" subsys=ipcache | |
2019-03-06T15:43:24.458064567Z level=error msg="Attempt to upsert invalid IP into ipcache layer" identity="{host agent-local}" ipAddr="<nil>" subsys=ipcache | |
2019-03-06T15:43:24.458206122Z level=error msg="Attempt to upsert invalid IP into ipcache layer" identity="{host agent-local}" ipAddr="<nil>" subsys=ipcache | |
2019-03-06T15:43:24.458883344Z level=info msg="Clearing leftover Cilium veths" subsys=daemon | |
2019-03-06T15:43:24.461507076Z level=info msg="Waiting for k8s api-server to be ready..." subsys=k8s | |
2019-03-06T15:43:24.474998239Z level=info msg="Connected to k8s api-server" ipAddr="https://10.245.0.1:443" subsys=k8s | |
2019-03-06T15:43:24.481592699Z level=info msg="Retrieved node information from kubernetes" nodeName=sleepy-mccarthy-uomv subsys=k8s | |
2019-03-06T15:43:24.481630085Z level=info msg="Received own node information from API server" ipAddr.ipv4=10.135.147.17 ipAddr.ipv6="<nil>" nodeName=sleepy-mccarthy-uomv subsys=k8s | |
2019-03-06T15:43:24.481634966Z level=info msg="Retrieved IPv4 allocation range for node. Using it for ipv4-range" node=sleepy-mccarthy-uomv subsys=node v4Prefix=10.244.0.0/24 | |
2019-03-06T15:43:24.481638733Z level=info msg="Retrieved IPv6 allocation range for node. Using it for ipv6-range" node=sleepy-mccarthy-uomv subsys=node v6Prefix="f00d::af4:0:0:0/96" | |
2019-03-06T15:43:24.481643517Z level=info msg="Automatically retrieved IP for node. Using it for ipv4-node" ipAddr=10.135.147.17 node=sleepy-mccarthy-uomv subsys=node | |
2019-03-06T15:43:24.481647189Z level=info msg="Kubernetes information:" subsys=daemon | |
2019-03-06T15:43:24.481650283Z level=info msg=" Namespace: kube-system" subsys=daemon | |
2019-03-06T15:43:24.481653436Z level=info msg="k8s mode: Allowing localhost to reach local endpoints" subsys=daemon | |
2019-03-06T15:43:24.481656676Z level=info msg="Initializing node addressing" subsys=daemon | |
2019-03-06T15:43:24.481792996Z level=info msg="Restored IPv4 internal node IP: 10.244.0.1" subsys=node | |
2019-03-06T15:43:24.482505194Z level=info msg="Initializing IPAM" subsys=daemon | |
2019-03-06T15:43:24.520210992Z level=info msg="Restoring endpoints from former life..." subsys=daemon | |
2019-03-06T15:43:24.520250571Z level=info msg="Endpoints restored" count.restored=0 count.total=1 subsys=daemon | |
2019-03-06T15:43:24.520255098Z level=info msg="Addressing information:" subsys=daemon | |
2019-03-06T15:43:24.52025842Z level=info msg=" Cluster-Name: default" subsys=daemon | |
2019-03-06T15:43:24.520261692Z level=info msg=" Cluster-ID: 0" subsys=daemon | |
2019-03-06T15:43:24.520264902Z level=info msg=" Local node-name: sleepy-mccarthy-uomv" subsys=daemon | |
2019-03-06T15:43:24.520268031Z level=info msg=" External-Node IPv4: 10.135.147.17" subsys=daemon | |
2019-03-06T15:43:24.520271121Z level=info msg=" Internal-Node IPv4: 10.244.0.1" subsys=daemon | |
2019-03-06T15:43:24.520274171Z level=info msg=" Cluster IPv4 prefix: 10.0.0.0/8" subsys=daemon | |
2019-03-06T15:43:24.520277341Z level=info msg=" IPv4 allocation prefix: 10.244.0.0/24" subsys=daemon | |
2019-03-06T15:43:24.520280414Z level=info msg=" Loopback IPv4: 10.244.0.46" subsys=daemon | |
2019-03-06T15:43:24.520283492Z level=info msg="Annotating k8s node with CIDR ranges" subsys=daemon | |
2019-03-06T15:43:24.520286614Z level=info msg="Initializing identity allocator" subsys=identity-cache | |
2019-03-06T15:43:24.52028975Z level=info msg="Adding local node to cluster" subsys=daemon | |
2019-03-06T15:43:24.533712109Z level=info msg="Starting to watch allocation changes" kvstoreErr="<nil>" kvstoreStatus="No connection to etcd" prefix=cilium/state/identities/v1/id subsys=allocator | |
2019-03-06T15:43:24.5493722Z level=info msg="Cluster-ID is not specified, skipping ClusterMesh initialization" subsys=daemon | |
2019-03-06T15:43:24.553683513Z level=info msg="Sockmap disabled." subsys=sockops | |
2019-03-06T15:43:24.553710645Z level=info msg="Sockmsg Disabled." subsys=sockops | |
2019-03-06T15:43:26.051978273Z level=info msg="Setting sysctl net.core.bpf_jit_enable=1" subsys=daemon | |
2019-03-06T15:43:26.052030011Z level=info msg="Setting sysctl net.ipv4.conf.all.rp_filter=0" subsys=daemon | |
2019-03-06T15:43:26.052038038Z level=info msg="Setting sysctl net.ipv6.conf.all.disable_ipv6=0" subsys=daemon | |
2019-03-06T15:43:26.052116041Z level=info msg="Starting IP identity watcher" subsys=ipcache | |
2019-03-06T15:43:26.052470399Z level=info msg="Envoy: Starting xDS gRPC server listening on /var/run/cilium/xds.sock" subsys=envoy-manager | |
2019-03-06T15:43:26.052721984Z level=info msg="Validating configured node address ranges" subsys=daemon | |
2019-03-06T15:43:26.052853487Z level=info msg="Starting connection tracking garbage collector" subsys=daemon | |
2019-03-06T15:43:26.053265207Z level=info msg="Skipping CT garbage collection" error="Unable to get object /run/cilium/bpffs/tc/globals/cilium_ct4_global: no such file or directory" file-path=/run/cilium/bpffs/tc/globals/cilium_ct4_global subsys=endpoint-manager | |
2019-03-06T15:43:26.053277349Z level=info msg="Skipping CT garbage collection" error="Unable to get object /run/cilium/bpffs/tc/globals/cilium_ct_any4_global: no such file or directory" file-path=/run/cilium/bpffs/tc/globals/cilium_ct_any4_global subsys=endpoint-manager | |
2019-03-06T15:43:26.053300676Z level=info msg="Initial scan of connection tracking completed" subsys=endpoint-manager | |
2019-03-06T15:43:26.053307586Z level=info msg="Launching node monitor daemon" subsys=daemon | |
2019-03-06T15:43:26.053313784Z level=info msg="Enabling k8s event listener" subsys=daemon | |
2019-03-06T15:43:26.194428054Z level=info msg="Serving cilium node monitor v1.0 API at unix:///var/run/cilium/monitor.sock" subsys=cilium-node-monitor | |
2019-03-06T15:43:26.194665224Z level=info msg="Serving cilium node monitor v1.2 API at unix:///var/run/cilium/monitor1_2.sock" subsys=cilium-node-monitor | |
2019-03-06T15:43:26.194920442Z level=info msg="Beginning to read cilium agent events" subsys=cilium-node-monitor | |
2019-03-06T15:43:26.643593496Z level=info msg="CRD (CustomResourceDefinition) is installed and up-to-date" name=CiliumNetworkPolicy/v2 subsys=k8s | |
2019-03-06T15:43:26.646279794Z level=info msg="Updating CRD (CustomResourceDefinition)..." name=v2.CiliumEndpoint subsys=k8s | |
2019-03-06T15:43:27.661262432Z level=info msg="CRD (CustomResourceDefinition) is installed and up-to-date" name=v2.CiliumEndpoint subsys=k8s | |
2019-03-06T15:43:27.662263599Z level=info msg="Waiting until all pre-existing resources related to policy have been received" subsys=daemon | |
2019-03-06T15:43:27.709367401Z level=info msg="Kubernetes service definition changed" action=service-updated endpoints= k8sNamespace=ingress-nginx k8sSvcName=ingress-nginx service="frontend:10.245.240.51/ports=[http https]/selector=map[app.kubernetes.io/name:ingress-nginx app.kubernetes.io/part-of:ingress-nginx]" subsys=daemon | |
2019-03-06T15:43:27.709532882Z level=info msg="Kubernetes service definition changed" action=service-updated endpoints= k8sNamespace=kube-system k8sSvcName=kube-dns service="frontend:10.245.0.10/ports=[dns dns-tcp metrics]/selector=map[k8s-app:kube-dns]" subsys=daemon | |
2019-03-06T15:43:27.709711074Z level=info msg="Kubernetes service definition changed" action=service-updated endpoints= k8sNamespace=elasticsearch k8sSvcName=elasticsearch service="frontend:10.245.169.182/ports=[es transport]/selector=map[app:es-cluster workloadID_elasticsearch:true]" subsys=daemon | |
2019-03-06T15:43:27.709868967Z level=info msg="Kubernetes service definition changed" action=service-updated endpoints="104.248.248.41:443/TCP" k8sNamespace=default k8sSvcName=kubernetes service="frontend:10.245.0.1/ports=[https]/selector=map[]" subsys=daemon | |
2019-03-06T15:43:27.710043279Z level=info msg="Kubernetes service definition changed" action=service-updated endpoints= k8sNamespace=redis k8sSvcName=redis service="frontend:10.245.27.162/ports=[redis]/selector=map[app:redis]" subsys=daemon | |
2019-03-06T15:43:27.710150049Z level=info msg="Kubernetes service definition changed" action=service-updated endpoints= k8sNamespace=wir-e-ng k8sSvcName=wir-e-ng service="frontend:10.245.114.159/ports=[default]/selector=map[workloadID_wir-e-ng:true]" subsys=daemon | |
2019-03-06T15:43:27.71026088Z level=info msg="Kubernetes service definition changed" action=service-updated endpoints= k8sNamespace=kube-system k8sSvcName=tiller-deploy service="frontend:10.245.184.217/ports=[tiller]/selector=map[app:helm name:tiller]" subsys=daemon | |
2019-03-06T15:43:27.71032727Z level=info msg="Kubernetes service definition changed" action=service-updated endpoints= k8sNamespace=memcached k8sSvcName=memcached service="frontend:10.245.32.241/ports=[memcached]/selector=map[app:memcached]" subsys=daemon | |
2019-03-06T15:43:27.71044224Z level=info msg="Kubernetes service definition changed" action=service-updated endpoints="10.244.3.172:6443/TCP" k8sNamespace=cert-manager k8sSvcName=cert-manager-webhook service="frontend:10.245.18.115/ports=[https]/selector=map[app:webhook release:cert-manager]" subsys=daemon | |
2019-03-06T15:43:27.710565511Z level=info msg="Kubernetes service definition changed" action=service-updated endpoints= k8sNamespace=default k8sSvcName=stellenboerse service="frontend:<nil>/ports=[default]/selector=map[workloadID_stellenboerse:true]" subsys=daemon | |
2019-03-06T15:43:27.76369742Z level=info msg="All pre-existing resources related to policy have been received; continuing" subsys=daemon | |
2019-03-06T15:43:27.76373641Z level=info msg="Regenerating 0 restored endpoints" subsys=daemon | |
2019-03-06T15:43:27.763902923Z level=info msg="Enabling docker event listener" subsys=workload-watcher | |
2019-03-06T15:43:27.764435445Z level=info msg="Finished regenerating restored endpoints" regenerated=0 subsys=daemon total=0 | |
2019-03-06T15:43:27.764723578Z level=warning msg="Unable to release endpoint ID" error="Unable to release endpoint ID 3239" subsys=endpoint-manager | |
2019-03-06T15:43:27.765255616Z level=info msg="Removed endpoint" containerID= datapathPolicyRevision=0 desiredPolicyRevision=0 endpointID=3239 identity=4 ipv4=10.244.0.56 ipv6= k8sPodName=/ subsys=endpoint | |
2019-03-06T15:43:27.769532757Z level=info msg="Building health endpoint" subsys=daemon | |
2019-03-06T15:43:27.769574999Z level=info msg="Launching Cilium health daemon" subsys=daemon | |
2019-03-06T15:43:27.76958177Z level=info msg="Launching Cilium health endpoint" subsys=daemon | |
2019-03-06T15:43:27.790403177Z level=warning msg="health endpoint is unreachable, restarting health endpoint" error="cilium-health endpoint hasn't yet been initialized" subsys=daemon | |
2019-03-06T15:43:27.803805791Z level=info msg="Spawning health endpoint with arguments []string{\"cilium-health\", \"cilium_health\", \"cilium\", \"\", \"10.244.0.151/32\", \"cilium-health\", \"-d --admin=unix --passive --pidfile /var/run/cilium/state/health-endpoint.pid\"}" subsys=cilium-health-launcher | |
2019-03-06T15:43:27.8286553Z level=info msg="Identity of endpoint changed" containerID= datapathPolicyRevision=0 desiredPolicyRevision=0 endpointID=0 identity=4 identityLabels="reserved:health" ipv4= ipv6= k8sPodName=/ oldIdentity="no identity" subsys=endpoint | |
2019-03-06T15:43:27.921561408Z level=info msg="Initializing Cilium API" subsys=daemon | |
2019-03-06T15:43:27.95365254Z level=info msg="Daemon initialization completed" bootstrapTime=4.780620629s subsys=daemon | |
2019-03-06T15:43:27.953949113Z level=info msg="Serving cilium at unix:///var/run/cilium/cilium.sock" subsys=daemon | |
2019-03-06T15:43:28.136008841Z level=info msg="Serving cilium health at http://[::]:4240" subsys=health-server | |
2019-03-06T15:43:28.829151503Z level=info msg="Adding route" command="ip route add 10.244.0.1/32 dev cilium" netns=cilium-health subsys=cilium-health-launcher | |
2019-03-06T15:43:28.829390881Z level=info msg="Adding route" command="ip route add 0.0.0.0/0 via 10.244.0.1 mtu 1450 dev cilium" netns=cilium-health subsys=cilium-health-launcher | |
2019-03-06T15:43:28.869838746Z level=info msg="New endpoint" containerID= datapathPolicyRevision=0 desiredPolicyRevision=0 endpointID=337 identity=4 ipv4= ipv6= k8sPodName=/ subsys=endpoint | |
2019-03-06T15:43:28.87012Z level=info msg="Regenerating endpoint" containerID= datapathPolicyRevision=0 desiredPolicyRevision=0 endpointID=337 identity=4 ipv4= ipv6= k8sPodName=/ reason="health daemon bootstrap" startTime="2019-03-06 15:43:28.869952186 +0000 UTC m=+5.889821003" subsys=endpoint | |
2019-03-06T15:43:28.926154684Z level=info msg="Serving cilium health at http://[::]:4240" subsys=health-server | |
2019-03-06T15:43:29.472986575Z level=info msg="No request received to manage networking for container" containerID=2e767bc17f maxRetry=20 subsys=workload-watcher willRetry=false | |
2019-03-06T15:43:29.666306608Z level=info msg="Recompiled endpoint BPF program" BPFCompilationTime=794.73931ms containerID= datapathPolicyRevision=0 desiredPolicyRevision=1 endpointID=337 error="<nil>" identity=4 ipv4= ipv6= k8sPodName=/ subsys=endpoint | |
2019-03-06T15:43:29.666661811Z level=info msg="Completed endpoint regeneration" bpfCompilation=794.73931ms buildDuration=796.425457ms containerID= datapathPolicyRevision=1 desiredPolicyRevision=1 endpointID=337 identity=4 ipv4= ipv6= k8sPodName=/ mapSync="210.093µs" policyCalculation="65.975µs" prepareBuild="637.269µs" proxyConfiguration="9.579µs" proxyPolicyCalculation="23.955µs" proxyWaitForAck="1.912µs" reason="health daemon bootstrap" subsys=endpoint waitingForCTClean="4.384µs" waitingForLock="3.001µs" | |
2019-03-06T15:43:30.070903612Z level=info msg="No request received to manage networking for container" containerID=2c62eae047 maxRetry=20 subsys=workload-watcher willRetry=false | |
2019-03-06T15:43:30.264746904Z level=info msg="No request received to manage networking for container" containerID=7d87f4f76d maxRetry=20 subsys=workload-watcher willRetry=false | |
2019-03-06T15:43:30.467264688Z level=info msg="No request received to manage networking for container" containerID=1a7dc3f238 maxRetry=20 subsys=workload-watcher willRetry=false | |
2019-03-06T15:43:30.665370517Z level=info msg="No request received to manage networking for container" containerID=159af81d2c maxRetry=20 subsys=workload-watcher willRetry=false | |
2019-03-06T15:43:30.865310294Z level=info msg="No request received to manage networking for container" containerID=30f2a30255 maxRetry=20 subsys=workload-watcher willRetry=false | |
2019-03-06T15:43:31.078404956Z level=info msg="No request received to manage networking for container" containerID=94f3fbe6e8 maxRetry=20 subsys=workload-watcher willRetry=false | |
2019-03-06T15:43:31.278615008Z level=info msg="No request received to manage networking for container" containerID=c937f9ad70 maxRetry=20 subsys=workload-watcher willRetry=false | |
2019-03-06T15:43:31.477154814Z level=info msg="No request received to manage networking for container" containerID=9ee82ea752 maxRetry=20 subsys=workload-watcher willRetry=false | |
2019-03-06T15:43:31.665181767Z level=info msg="No request received to manage networking for container" containerID=789dfd7ae7 maxRetry=20 subsys=workload-watcher willRetry=false | |
2019-03-06T15:43:31.867783802Z level=info msg="No request received to manage networking for container" containerID=2797e17441 maxRetry=20 subsys=workload-watcher willRetry=false | |
2019-03-06T15:44:26.055496349Z level=info msg="Conntrack garbage collection statistics" completed=true duration=2.035276ms family=ipv4 maxEntries=1000000 numDeleted=1 numKeyFallbacks=0 numLookups=2 numLookupsFailed=0 protocol=TCP startTime="2019-03-06 15:44:26.053217443 +0000 UTC m=+63.073086337" subsys=map-ct | |
2019-03-06T15:44:26.056175206Z level=info msg="Conntrack garbage collection statistics" completed=true duration="521.673µs" family=ipv4 maxEntries=262144 numDeleted=0 numKeyFallbacks=0 numLookups=1 numLookupsFailed=0 protocol=non-TCP startTime="2019-03-06 15:44:26.055539698 +0000 UTC m=+63.075408554" subsys=map-ct | |
2019-03-06T15:45:26.060065003Z level=info msg="Conntrack garbage collection statistics" completed=true duration=2.599541ms family=ipv4 maxEntries=1000000 numDeleted=2 numKeyFallbacks=0 numLookups=3 numLookupsFailed=0 protocol=TCP startTime="2019-03-06 15:45:26.056452207 +0000 UTC m=+123.076321115" subsys=map-ct | |
2019-03-06T15:45:26.060113397Z level=info msg="Conntrack garbage collection statistics" completed=true duration="617.798µs" family=ipv4 maxEntries=262144 numDeleted=0 numKeyFallbacks=0 numLookups=1 numLookupsFailed=0 protocol=non-TCP startTime="2019-03-06 15:45:26.059201086 +0000 UTC m=+123.079069964" subsys=map-ct | |
2019-03-06T15:45:57.80869105Z level=info msg="No request received to manage networking for container" containerID=5288df25fe maxRetry=20 subsys=workload-watcher willRetry=false | |
2019-03-06T15:45:59.937020508Z level=info msg="Serving cilium health at unix:///var/run/cilium/health.sock" subsys=health-server | |
2019-03-06T15:46:24.518346199Z level=fatal msg="Unable to initialize local node" error="Time out while retrieving initial list of objects from kvstore" subsys=daemon |
Sign up for free
to join this conversation on GitHub.
Already have an account?
Sign in to comment