Last active
June 22, 2017 17:54
-
-
Save amitkumarj441/40fa582f5c8a74f46c9ddc2cb8e39c0b to your computer and use it in GitHub Desktop.
Logging components logs
This file contains bidirectional Unicode text that may be interpreted or compiled differently than what appears below. To review, open the file in an editor that reveals hidden Unicode characters.
Learn more about bidirectional Unicode characters
[root@viaq openshift-ansible]# oc get pods | |
NAME READY STATUS RESTARTS AGE | |
logging-curator-1-hklth 1/1 Running 22 4h | |
logging-es-7vo926zw-1-deploy 0/1 Error 0 4h | |
logging-es-7vo926zw-2-phjrq 1/1 Running 0 50m | |
logging-fluentd-024kj 1/1 Running 1 4h | |
logging-kibana-1-deploy 0/1 Error 0 4h | |
logging-kibana-2-jmt73 2/2 Running 0 20m | |
[root@viaq openshift-ansible]# | |
[root@viaq openshift-ansible]# oc describe pod logging-curator-1-hklth | |
Name: logging-curator-1-hklth | |
Namespace: logging | |
Security Policy: restricted | |
Node: viaq.logging.test/172.16.93.5 | |
Start Time: Thu, 22 Jun 2017 13:08:55 +0000 | |
Labels: component=curator | |
deployment=logging-curator-1 | |
deploymentconfig=logging-curator | |
logging-infra=curator | |
provider=openshift | |
Status: Running | |
IP: 10.128.0.16 | |
Controllers: ReplicationController/logging-curator-1 | |
Containers: | |
curator: | |
Container ID: docker://66b800dbd32ec731d8acd639ed94d56fad83509ddff76f6ba9eeb1ac96e60b4e | |
Image: docker.io/openshift/origin-logging-curator:v1.5.1 | |
Image ID: docker-pullable://docker.io/openshift/origin-logging-curator@sha256:72f1e279da63531941978d98f1cea7cbce6be4a935ce986a229f436ffa03697d | |
Port: | |
Limits: | |
cpu: 100m | |
Requests: | |
cpu: 100m | |
State: Running | |
Started: Thu, 22 Jun 2017 16:57:02 +0000 | |
Last State: Terminated | |
Reason: Error | |
Exit Code: 255 | |
Started: Thu, 22 Jun 2017 16:50:54 +0000 | |
Finished: Thu, 22 Jun 2017 16:53:57 +0000 | |
Ready: True | |
Restart Count: 22 | |
Volume Mounts: | |
/etc/curator/keys from certs (ro) | |
/etc/curator/settings from config (ro) | |
/var/run/secrets/kubernetes.io/serviceaccount from aggregated-logging-curator-token-5g95b (ro) | |
Environment Variables: | |
K8S_HOST_URL: https://kubernetes.default.svc.cluster.local | |
ES_HOST: logging-es | |
ES_PORT: 9200 | |
ES_CLIENT_CERT: /etc/curator/keys/cert | |
ES_CLIENT_KEY: /etc/curator/keys/key | |
ES_CA: /etc/curator/keys/ca | |
CURATOR_DEFAULT_DAYS: 30 | |
CURATOR_RUN_HOUR: 0 | |
CURATOR_RUN_MINUTE: 0 | |
CURATOR_RUN_TIMEZONE: UTC | |
CURATOR_SCRIPT_LOG_LEVEL: INFO | |
CURATOR_LOG_LEVEL: ERROR | |
Conditions: | |
Type Status | |
Initialized True | |
Ready True | |
PodScheduled True | |
Volumes: | |
certs: | |
Type: Secret (a volume populated by a Secret) | |
SecretName: logging-curator | |
config: | |
Type: ConfigMap (a volume populated by a ConfigMap) | |
Name: logging-curator | |
aggregated-logging-curator-token-5g95b: | |
Type: Secret (a volume populated by a Secret) | |
SecretName: aggregated-logging-curator-token-5g95b | |
QoS Class: Burstable | |
Tolerations: <none> | |
Events: | |
FirstSeen LastSeen Count From SubObjectPath Type Reason Message | |
--------- -------- ----- ---- ------------- -------- ------ ------- | |
4h 1h 403 {kubelet viaq.logging.test} spec.containers{curator} Warning BackOff Back-off restarting failed docker container | |
4h 1h 390 {kubelet viaq.logging.test} Warning FailedSync Error syncing pod, skipping: failed to "StartContainer" for "curator" with CrashLoopBackOff: "Back-off 5m0s restarting failed container=curator pod=logging-curator-1-hklth_logging(f236dae1-574b-11e7-8b3f-000c293d9bc2)" | |
4h 1h 24 {kubelet viaq.logging.test} spec.containers{curator} Normal Pulling pulling image "docker.io/openshift/origin-logging-curator:v1.5.1" | |
3h 1h 11 {kubelet viaq.logging.test} spec.containers{curator} Normal Created (events with common reason combined) | |
4h 1h 20 {kubelet viaq.logging.test} spec.containers{curator} Normal Pulled Successfully pulled image "docker.io/openshift/origin-logging-curator:v1.5.1" | |
3h 1h 11 {kubelet viaq.logging.test} spec.containers{curator} Normal Started (events with common reason combined) | |
59m 58m 4 {kubelet viaq.logging.test} Warning FailedSync Error syncing pod, skipping: network is not ready: [SDN pod network is not ready] | |
58m 58m 1 {kubelet viaq.logging.test} spec.containers{curator} Normal Created Created container with docker id 95e1142beaab; Security:[seccomp=unconfined] | |
58m 58m 1 {kubelet viaq.logging.test} spec.containers{curator} Normal Started Started container with docker id 95e1142beaab | |
54m 54m 1 {kubelet viaq.logging.test} spec.containers{curator} Normal Created Created container with docker id 44bd7c7c735c; Security:[seccomp=unconfined] | |
54m 54m 1 {kubelet viaq.logging.test} spec.containers{curator} Normal Started Started container with docker id 44bd7c7c735c | |
51m 51m 1 {kubelet viaq.logging.test} spec.containers{curator} Warning BackOff Back-off restarting failed docker container | |
51m 51m 1 {kubelet viaq.logging.test} Warning FailedSync Error syncing pod, skipping: failed to "StartContainer" for "curator" with CrashLoopBackOff: "Back-off 10s restarting failed container=curator pod=logging-curator-1-hklth_logging(f236dae1-574b-11e7-8b3f-000c293d9bc2)" | |
50m 50m 1 {kubelet viaq.logging.test} Warning FailedSync Error syncing pod, skipping: failed to "StartContainer" for "curator" with ErrImagePull: "image pull failed for docker.io/openshift/origin-logging-curator:v1.5.1, this may be because there are no credentials on this request. details: (net/http: request canceled)" | |
50m 50m 1 {kubelet viaq.logging.test} spec.containers{curator} Warning Failed Failed to pull image "docker.io/openshift/origin-logging-curator:v1.5.1": image pull failed for docker.io/openshift/origin-logging-curator:v1.5.1, this may be because there are no credentials on this request. details: (net/http: request canceled) | |
49m 49m 1 {kubelet viaq.logging.test} spec.containers{curator} Warning Failed Failed to pull image "docker.io/openshift/origin-logging-curator:v1.5.1": image pull failed for docker.io/openshift/origin-logging-curator:v1.5.1, this may be because there are no credentials on this request. details: (Tag v1.5.1 not found in repository docker.io/openshift/origin-logging-curator) | |
49m 49m 1 {kubelet viaq.logging.test} Warning FailedSync Error syncing pod, skipping: failed to "StartContainer" for "curator" with ErrImagePull: "image pull failed for docker.io/openshift/origin-logging-curator:v1.5.1, this may be because there are no credentials on this request. details: (Tag v1.5.1 not found in repository docker.io/openshift/origin-logging-curator)" | |
49m 49m 1 {kubelet viaq.logging.test} Warning FailedSync Error syncing pod, skipping: failed to "StartContainer" for "curator" with ImagePullBackOff: "Back-off pulling image \"docker.io/openshift/origin-logging-curator:v1.5.1\"" | |
49m 49m 1 {kubelet viaq.logging.test} spec.containers{curator} Normal BackOff Back-off pulling image "docker.io/openshift/origin-logging-curator:v1.5.1" | |
58m 49m 5 {kubelet viaq.logging.test} spec.containers{curator} Normal Pulling pulling image "docker.io/openshift/origin-logging-curator:v1.5.1" | |
58m 48m 3 {kubelet viaq.logging.test} spec.containers{curator} Normal Pulled Successfully pulled image "docker.io/openshift/origin-logging-curator:v1.5.1" | |
48m 48m 1 {kubelet viaq.logging.test} spec.containers{curator} Normal Created Created container with docker id 66b800dbd32e; Security:[seccomp=unconfined] | |
48m 48m 1 {kubelet viaq.logging.test} spec.containers{curator} Normal Started Started container with docker id 66b800dbd32e | |
[root@viaq openshift-ansible]# | |
[root@viaq openshift-ansible]# oc logs -f logging-es-7vo926zw-2-phjrq | |
Comparing the specificed RAM to the maximum recommended for ElasticSearch... | |
Inspecting the maximum RAM available... | |
ES_JAVA_OPTS: '-Dmapper.allow_dots_in_name=true -Xms128M -Xmx4096m' | |
Checking if Elasticsearch is ready on https://localhost:9200 ..............Will connect to localhost:9300 ... done | |
Contacting elasticsearch cluster 'elasticsearch' and wait for YELLOW clusterstate ... | |
Clustername: logging-es | |
Clusterstate: GREEN | |
Number of nodes: 1 | |
Number of data nodes: 1 | |
.searchguard.logging-es-7vo926zw-2-phjrq index does not exists, attempt to create it ... done (with 0 replicas, auto expand replicas is off) | |
Populate config from /opt/app-root/src/sgconfig/ | |
Will update 'config' with /opt/app-root/src/sgconfig/sg_config.yml | |
SUCC: Configuration for 'config' created or updated | |
Will update 'roles' with /opt/app-root/src/sgconfig/sg_roles.yml | |
SUCC: Configuration for 'roles' created or updated | |
Will update 'rolesmapping' with /opt/app-root/src/sgconfig/sg_roles_mapping.yml | |
SUCC: Configuration for 'rolesmapping' created or updated | |
Will update 'internalusers' with /opt/app-root/src/sgconfig/sg_internal_users.yml | |
SUCC: Configuration for 'internalusers' created or updated | |
Will update 'actiongroups' with /opt/app-root/src/sgconfig/sg_action_groups.yml | |
SUCC: Configuration for 'actiongroups' created or updated | |
Done with success | |
Seeded the searchguard ACL index | |
Create index template 'com.redhat.viaq-openshift-operations.template.json' | |
* About to connect() to localhost port 9200 (#0) | |
* Trying ::1... | |
* Connected to localhost (::1) port 9200 (#0) | |
* Initializing NSS with certpath: sql:/etc/pki/nssdb | |
* CAfile: /etc/elasticsearch/secret/admin-ca | |
CApath: none | |
* NSS: client certificate from file | |
* subject: CN=system.admin,OU=OpenShift,O=Logging | |
* start date: Jun 22 13:03:21 2017 GMT | |
* expire date: Jun 22 13:03:21 2019 GMT | |
* common name: system.admin | |
* issuer: CN=logging-signer-test | |
* SSL connection using TLS_ECDHE_RSA_WITH_AES_256_GCM_SHA384 | |
* Server certificate: | |
* subject: CN=logging-es,OU=OpenShift,O=Logging | |
* start date: Jun 22 13:03:31 2017 GMT | |
* expire date: Jun 22 13:03:31 2019 GMT | |
* common name: logging-es | |
* issuer: CN=logging-signer-test | |
> PUT /_template/com.redhat.viaq-openshift-operations.template.json HTTP/1.1 | |
> User-Agent: curl/7.29.0 | |
> Host: localhost:9200 | |
> Accept: */* | |
> Content-Length: 16125 | |
> Content-Type: application/x-www-form-urlencoded | |
> Expect: 100-continue | |
> | |
< HTTP/1.1 100 Continue | |
} [data not shown] | |
< HTTP/1.1 200 OK | |
< Content-Type: application/json; charset=UTF-8 | |
< Content-Length: 21 | |
< | |
{ [data not shown] | |
* Connection #0 to host localhost left intact | |
{"acknowledged":true}Create index template 'com.redhat.viaq-openshift-project.template.json' | |
* About to connect() to localhost port 9200 (#0) | |
* Trying ::1... | |
* Connected to localhost (::1) port 9200 (#0) | |
* Initializing NSS with certpath: sql:/etc/pki/nssdb | |
* CAfile: /etc/elasticsearch/secret/admin-ca | |
CApath: none | |
* NSS: client certificate from file | |
* subject: CN=system.admin,OU=OpenShift,O=Logging | |
* start date: Jun 22 13:03:21 2017 GMT | |
* expire date: Jun 22 13:03:21 2019 GMT | |
* common name: system.admin | |
* issuer: CN=logging-signer-test | |
* SSL connection using TLS_ECDHE_RSA_WITH_AES_256_GCM_SHA384 | |
* Server certificate: | |
* subject: CN=logging-es,OU=OpenShift,O=Logging | |
* start date: Jun 22 13:03:31 2017 GMT | |
* expire date: Jun 22 13:03:31 2019 GMT | |
* common name: logging-es | |
* issuer: CN=logging-signer-test | |
> PUT /_template/com.redhat.viaq-openshift-project.template.json HTTP/1.1 | |
> User-Agent: curl/7.29.0 | |
> Host: localhost:9200 | |
> Accept: */* | |
> Content-Length: 16121 | |
> Content-Type: application/x-www-form-urlencoded | |
> Expect: 100-continue | |
> | |
< HTTP/1.1 100 Continue | |
} [data not shown] | |
< HTTP/1.1 200 OK | |
< Content-Type: application/json; charset=UTF-8 | |
< Content-Length: 21 | |
< | |
{ [data not shown] | |
* Connection #0 to host localhost left intact | |
^C | |
[root@viaq openshift-ansible]# oc logs -f logging-es-7vo926zw-2-phjrq|more | |
Comparing the specificed RAM to the maximum recommended for ElasticSearch... | |
Inspecting the maximum RAM available... | |
ES_JAVA_OPTS: '-Dmapper.allow_dots_in_name=true -Xms128M -Xmx4096m' | |
Checking if Elasticsearch is ready on https://localhost:9200 ..............Will connect to localhost:9300 ... done | |
Contacting elasticsearch cluster 'elasticsearch' and wait for YELLOW clusterstate ... | |
Clustername: logging-es | |
Clusterstate: GREEN | |
Number of nodes: 1 | |
Number of data nodes: 1 | |
.searchguard.logging-es-7vo926zw-2-phjrq index does not exists, attempt to create it ... done (with 0 replicas, auto expand replicas is off) | |
Populate config from /opt/app-root/src/sgconfig/ | |
Will update 'config' with /opt/app-root/src/sgconfig/sg_config.yml | |
SUCC: Configuration for 'config' created or updated | |
Will update 'roles' with /opt/app-root/src/sgconfig/sg_roles.yml | |
SUCC: Configuration for 'roles' created or updated | |
Will update 'rolesmapping' with /opt/app-root/src/sgconfig/sg_roles_mapping.yml | |
SUCC: Configuration for 'rolesmapping' created or updated | |
Will update 'internalusers' with /opt/app-root/src/sgconfig/sg_internal_users.yml | |
SUCC: Configuration for 'internalusers' created or updated | |
Will update 'actiongroups' with /opt/app-root/src/sgconfig/sg_action_groups.yml | |
SUCC: Configuration for 'actiongroups' created or updated | |
Done with success | |
Seeded the searchguard ACL index | |
Create index template 'com.redhat.viaq-openshift-operations.template.json' | |
* About to connect() to localhost port 9200 (#0) | |
* Trying ::1... | |
* Connected to localhost (::1) port 9200 (#0) | |
* Initializing NSS with certpath: sql:/etc/pki/nssdb | |
* CAfile: /etc/elasticsearch/secret/admin-ca | |
CApath: none | |
* NSS: client certificate from file | |
* subject: CN=system.admin,OU=OpenShift,O=Logging | |
* start date: Jun 22 13:03:21 2017 GMT | |
* expire date: Jun 22 13:03:21 2019 GMT | |
* common name: system.admin | |
* issuer: CN=logging-signer-test | |
* SSL connection using TLS_ECDHE_RSA_WITH_AES_256_GCM_SHA384 | |
* Server certificate: | |
* subject: CN=logging-es,OU=OpenShift,O=Logging | |
* start date: Jun 22 13:03:31 2017 GMT | |
* expire date: Jun 22 13:03:31 2019 GMT | |
* common name: logging-es | |
* issuer: CN=logging-signer-test | |
> PUT /_template/com.redhat.viaq-openshift-operations.template.json HTTP/1.1 | |
> User-Agent: curl/7.29.0 | |
> Host: localhost:9200 | |
> Accept: */* | |
> Content-Length: 16125 | |
> Content-Type: application/x-www-form-urlencoded | |
> Expect: 100-continue | |
> | |
< HTTP/1.1 100 Continue | |
} [data not shown] | |
< HTTP/1.1 200 OK | |
< Content-Type: application/json; charset=UTF-8 | |
< Content-Length: 21 | |
< | |
{ [data not shown] | |
* Connection #0 to host localhost left intact | |
{"acknowledged":true}Create index template 'com.redhat.viaq-openshift-project.template.json' | |
* About to connect() to localhost port 9200 (#0) | |
* Trying ::1... | |
* Connected to localhost (::1) port 9200 (#0) | |
* Initializing NSS with certpath: sql:/etc/pki/nssdb | |
* CAfile: /etc/elasticsearch/secret/admin-ca | |
CApath: none | |
* NSS: client certificate from file | |
* subject: CN=system.admin,OU=OpenShift,O=Logging | |
* start date: Jun 22 13:03:21 2017 GMT | |
* expire date: Jun 22 13:03:21 2019 GMT | |
* common name: system.admin | |
* issuer: CN=logging-signer-test | |
* SSL connection using TLS_ECDHE_RSA_WITH_AES_256_GCM_SHA384 | |
* Server certificate: | |
* subject: CN=logging-es,OU=OpenShift,O=Logging | |
* start date: Jun 22 13:03:31 2017 GMT | |
* expire date: Jun 22 13:03:31 2019 GMT | |
* common name: logging-es | |
* issuer: CN=logging-signer-test | |
> PUT /_template/com.redhat.viaq-openshift-project.template.json HTTP/1.1 | |
> User-Agent: curl/7.29.0 | |
> Host: localhost:9200 | |
> Accept: */* | |
> Content-Length: 16121 | |
> Content-Type: application/x-www-form-urlencoded | |
> Expect: 100-continue | |
> | |
< HTTP/1.1 100 Continue | |
} [data not shown] | |
< HTTP/1.1 200 OK | |
< Content-Type: application/json; charset=UTF-8 | |
< Content-Length: 21 | |
< | |
{ [data not shown] | |
* Connection #0 to host localhost left intact | |
[root@viaq openshift-ansible]# oc describe pod logging-es-7vo926zw-2-phjrq|more | |
Name: logging-es-7vo926zw-2-phjrq | |
Namespace: logging | |
Security Policy: restricted | |
Node: viaq.logging.test/172.16.93.5 | |
Start Time: Thu, 22 Jun 2017 16:52:35 +0000 | |
Labels: component=es | |
deployment=logging-es-7vo926zw-2 | |
deploymentconfig=logging-es-7vo926zw | |
logging-infra=elasticsearch | |
provider=openshift | |
Status: Running | |
IP: 10.128.0.20 | |
Controllers: ReplicationController/logging-es-7vo926zw-2 | |
Containers: | |
elasticsearch: | |
Container ID: docker://4b43ba3693fd180f4356030d321d3266cfd0df529750fa2f1f54c88330e46724 | |
Image: docker.io/openshift/origin-logging-elasticsearch:v1.5.1 | |
Image ID: docker-pullable://docker.io/openshift/origin-logging-elasticsearch@sha256:12256ce9478e1483c1eb1c4a7fcae8e9e4d4b578f65ba918949863b91d9d7c05 | |
Ports: 9200/TCP, 9300/TCP | |
Limits: | |
memory: 8Gi | |
Requests: | |
memory: 512Mi | |
State: Running | |
Started: Thu, 22 Jun 2017 16:57:15 +0000 | |
Ready: True | |
Restart Count: 0 | |
Volume Mounts: | |
/elasticsearch/persistent from elasticsearch-storage (rw) | |
/etc/elasticsearch/secret from elasticsearch (ro) | |
/usr/share/java/elasticsearch/config from elasticsearch-config (ro) | |
/var/run/secrets/kubernetes.io/serviceaccount from aggregated-logging-elasticsearch-token-mx6bh (ro) | |
Environment Variables: | |
NAMESPACE: logging (v1:metadata.namespace) | |
KUBERNETES_TRUST_CERT: true | |
SERVICE_DNS: logging-es-cluster | |
CLUSTER_NAME: logging-es | |
INSTANCE_RAM: 8Gi | |
NODE_QUORUM: 1 | |
RECOVER_AFTER_NODES: 0 | |
RECOVER_EXPECTED_NODES: 1 | |
RECOVER_AFTER_TIME: 5m | |
Conditions: | |
Type Status | |
Initialized True | |
Ready True | |
PodScheduled True | |
Volumes: | |
elasticsearch: | |
Type: Secret (a volume populated by a Secret) | |
SecretName: logging-elasticsearch | |
elasticsearch-config: | |
Type: ConfigMap (a volume populated by a ConfigMap) | |
Name: logging-elasticsearch | |
elasticsearch-storage: | |
Type: EmptyDir (a temporary directory that shares a pod's lifetime) | |
Medium: | |
aggregated-logging-elasticsearch-token-mx6bh: | |
Type: Secret (a volume populated by a Secret) | |
SecretName: aggregated-logging-elasticsearch-token-mx6bh | |
QoS Class: Burstable | |
Tolerations: <none> | |
Events: | |
FirstSeen LastSeen Count From SubObjectPath Type Reason Message | |
--------- -------- ----- ---- ------------- -------- ------ ------- | |
55m 55m 1 {default-scheduler } Normal Scheduled Successfully assigned logging-es | |
-7vo926zw-2-phjrq to viaq.logging.test | |
55m 55m 1 {kubelet viaq.logging.test} spec.containers{elasticsearch} Normal Pulling pulling image "docker.io/openshi | |
ft/origin-logging-elasticsearch:v1.5.1" | |
51m 51m 1 {kubelet viaq.logging.test} spec.containers{elasticsearch} Normal Pulled Successfully pulled image "docke | |
r.io/openshift/origin-logging-elasticsearch:v1.5.1" | |
51m 51m 1 {kubelet viaq.logging.test} spec.containers{elasticsearch} Normal Created Created container with docker id | |
4b43ba3693fd; Security:[seccomp=unconfined] | |
51m 51m 1 {kubelet viaq.logging.test} spec.containers{elasticsearch} Normal Started Started container with docker id | |
4b43ba3693fd | |
[root@viaq openshift-ansible]# | |
[root@viaq openshift-ansible]# oc logs -f logging-fluentd-024kj | |
2017-06-22 16:47:40 +0000 [info]: reading config file path="/etc/fluent/fluent.conf" | |
2017-06-22 16:48:35 +0000 [warn]: temporarily failed to flush the buffer. next_retry=2017-06-22 16:48:00 +0000 error_class="Fluent::ElasticsearchOutput::ConnectionFailure" error="Can not reach Elasticsearch cluster ({:host=>\"logging-es\", :port=>9200, :scheme=>\"https\", :user=>\"fluentd\", :password=>\"obfuscated\"})!" plugin_id="object:f92414" | |
2017-06-22 16:48:35 +0000 [warn]: /opt/app-root/src/gems/fluent-plugin-elasticsearch-1.9.3/lib/fluent/plugin/out_elasticsearch_dynamic.rb:58:in `client' | |
2017-06-22 16:48:35 +0000 [warn]: /opt/app-root/src/gems/fluent-plugin-elasticsearch-1.9.3/lib/fluent/plugin/out_elasticsearch_dynamic.rb:197:in `rescue in send_bulk' | |
2017-06-22 16:48:35 +0000 [warn]: /opt/app-root/src/gems/fluent-plugin-elasticsearch-1.9.3/lib/fluent/plugin/out_elasticsearch_dynamic.rb:192:in `send_bulk' | |
2017-06-22 16:48:35 +0000 [warn]: /opt/app-root/src/gems/fluent-plugin-elasticsearch-1.9.3/lib/fluent/plugin/out_elasticsearch_dynamic.rb:185:in `block in write_objects' | |
2017-06-22 16:48:35 +0000 [warn]: /opt/app-root/src/gems/fluent-plugin-elasticsearch-1.9.3/lib/fluent/plugin/out_elasticsearch_dynamic.rb:184:in `each' | |
2017-06-22 16:48:35 +0000 [warn]: /opt/app-root/src/gems/fluent-plugin-elasticsearch-1.9.3/lib/fluent/plugin/out_elasticsearch_dynamic.rb:184:in `write_objects' | |
2017-06-22 16:48:35 +0000 [warn]: /opt/app-root/src/gems/fluentd-0.12.31/lib/fluent/output.rb:490:in `write' | |
2017-06-22 16:48:35 +0000 [warn]: /opt/app-root/src/gems/fluentd-0.12.31/lib/fluent/buffer.rb:354:in `write_chunk' | |
2017-06-22 16:48:35 +0000 [warn]: /opt/app-root/src/gems/fluentd-0.12.31/lib/fluent/buffer.rb:333:in `pop' | |
2017-06-22 16:48:35 +0000 [warn]: /opt/app-root/src/gems/fluentd-0.12.31/lib/fluent/output.rb:342:in `try_flush' | |
2017-06-22 16:48:35 +0000 [warn]: /opt/app-root/src/gems/fluentd-0.12.31/lib/fluent/output.rb:149:in `run' | |
2017-06-22 16:49:11 +0000 [warn]: temporarily failed to flush the buffer. next_retry=2017-06-22 16:48:03 +0000 error_class="Fluent::ElasticsearchOutput::ConnectionFailure" error="Can not reach Elasticsearch cluster ({:host=>\"logging-es\", :port=>9200, :scheme=>\"https\", :user=>\"fluentd\", :password=>\"obfuscated\"})!" plugin_id="object:f92414" | |
2017-06-22 16:49:11 +0000 [warn]: suppressed same stacktrace | |
2017-06-22 16:49:48 +0000 [warn]: temporarily failed to flush the buffer. next_retry=2017-06-22 16:48:07 +0000 error_class="Fluent::ElasticsearchOutput::ConnectionFailure" error="Can not reach Elasticsearch cluster ({:host=>\"logging-es\", :port=>9200, :scheme=>\"https\", :user=>\"fluentd\", :password=>\"obfuscated\"})!" plugin_id="object:f92414" | |
2017-06-22 16:49:48 +0000 [warn]: suppressed same stacktrace | |
2017-06-22 16:50:24 +0000 [warn]: temporarily failed to flush the buffer. next_retry=2017-06-22 16:48:16 +0000 error_class="Fluent::ElasticsearchOutput::ConnectionFailure" error="Can not reach Elasticsearch cluster ({:host=>\"logging-es\", :port=>9200, :scheme=>\"https\", :user=>\"fluentd\", :password=>\"obfuscated\"})!" plugin_id="object:f92414" | |
2017-06-22 16:50:24 +0000 [warn]: suppressed same stacktrace | |
2017-06-22 16:51:01 +0000 [warn]: temporarily failed to flush the buffer. next_retry=2017-06-22 16:48:31 +0000 error_class="Fluent::ElasticsearchOutput::ConnectionFailure" error="Can not reach Elasticsearch cluster ({:host=>\"logging-es\", :port=>9200, :scheme=>\"https\", :user=>\"fluentd\", :password=>\"obfuscated\"})!" plugin_id="object:f92414" | |
2017-06-22 16:51:01 +0000 [warn]: suppressed same stacktrace | |
2017-06-22 16:51:37 +0000 [warn]: temporarily failed to flush the buffer. next_retry=2017-06-22 16:49:01 +0000 error_class="Fluent::ElasticsearchOutput::ConnectionFailure" error="Can not reach Elasticsearch cluster ({:host=>\"logging-es\", :port=>9200, :scheme=>\"https\", :user=>\"fluentd\", :password=>\"obfuscated\"})!" plugin_id="object:f92414" | |
2017-06-22 16:51:37 +0000 [warn]: suppressed same stacktrace | |
2017-06-22 16:52:07 +0000 [warn]: temporarily failed to flush the buffer. next_retry=2017-06-22 16:51:34 +0000 error_class="Fluent::ElasticsearchOutput::ConnectionFailure" error="Can not reach Elasticsearch cluster ({:host=>\"logging-es\", :port=>9200, :scheme=>\"https\", :user=>\"fluentd\", :password=>\"obfuscated\"})!" plugin_id="object:11f32a8" | |
2017-06-22 16:52:07 +0000 [warn]: /opt/app-root/src/gems/fluent-plugin-elasticsearch-1.9.3/lib/fluent/plugin/out_elasticsearch_dynamic.rb:58:in `client' | |
2017-06-22 16:52:07 +0000 [warn]: /opt/app-root/src/gems/fluent-plugin-elasticsearch-1.9.3/lib/fluent/plugin/out_elasticsearch_dynamic.rb:197:in `rescue in send_bulk' | |
2017-06-22 16:52:07 +0000 [warn]: /opt/app-root/src/gems/fluent-plugin-elasticsearch-1.9.3/lib/fluent/plugin/out_elasticsearch_dynamic.rb:192:in `send_bulk' | |
2017-06-22 16:52:07 +0000 [warn]: /opt/app-root/src/gems/fluent-plugin-elasticsearch-1.9.3/lib/fluent/plugin/out_elasticsearch_dynamic.rb:185:in `block in write_objects' | |
2017-06-22 16:52:07 +0000 [warn]: /opt/app-root/src/gems/fluent-plugin-elasticsearch-1.9.3/lib/fluent/plugin/out_elasticsearch_dynamic.rb:184:in `each' | |
2017-06-22 16:52:07 +0000 [warn]: /opt/app-root/src/gems/fluent-plugin-elasticsearch-1.9.3/lib/fluent/plugin/out_elasticsearch_dynamic.rb:184:in `write_objects' | |
2017-06-22 16:52:07 +0000 [warn]: /opt/app-root/src/gems/fluentd-0.12.31/lib/fluent/output.rb:490:in `write' | |
2017-06-22 16:52:07 +0000 [warn]: /opt/app-root/src/gems/fluentd-0.12.31/lib/fluent/buffer.rb:354:in `write_chunk' | |
2017-06-22 16:52:07 +0000 [warn]: /opt/app-root/src/gems/fluentd-0.12.31/lib/fluent/buffer.rb:333:in `pop' | |
2017-06-22 16:52:07 +0000 [warn]: /opt/app-root/src/gems/fluentd-0.12.31/lib/fluent/output.rb:342:in `try_flush' | |
2017-06-22 16:52:07 +0000 [warn]: /opt/app-root/src/gems/fluentd-0.12.31/lib/fluent/output.rb:149:in `run' | |
2017-06-22 16:52:13 +0000 [warn]: temporarily failed to flush the buffer. next_retry=2017-06-22 16:49:58 +0000 error_class="Fluent::ElasticsearchOutput::ConnectionFailure" error="Can not reach Elasticsearch cluster ({:host=>\"logging-es\", :port=>9200, :scheme=>\"https\", :user=>\"fluentd\", :password=>\"obfuscated\"})!" plugin_id="object:f92414" | |
2017-06-22 16:52:13 +0000 [warn]: suppressed same stacktrace | |
2017-06-22 16:52:45 +0000 [warn]: temporarily failed to flush the buffer. next_retry=2017-06-22 16:51:36 +0000 error_class="Fluent::ElasticsearchOutput::ConnectionFailure" error="Can not reach Elasticsearch cluster ({:host=>\"logging-es\", :port=>9200, :scheme=>\"https\", :user=>\"fluentd\", :password=>\"obfuscated\"})!" plugin_id="object:11f32a8" | |
2017-06-22 16:52:45 +0000 [warn]: suppressed same stacktrace | |
2017-06-22 16:52:54 +0000 [warn]: temporarily failed to flush the buffer. next_retry=2017-06-22 16:51:53 +0000 error_class="Fluent::ElasticsearchOutput::ConnectionFailure" error="Can not reach Elasticsearch cluster ({:host=>\"logging-es\", :port=>9200, :scheme=>\"https\", :user=>\"fluentd\", :password=>\"obfuscated\"})!" plugin_id="object:f92414" | |
2017-06-22 16:52:54 +0000 [warn]: suppressed same stacktrace | |
2017-06-22 16:53:23 +0000 [warn]: temporarily failed to flush the buffer. next_retry=2017-06-22 16:51:40 +0000 error_class="Fluent::ElasticsearchOutput::ConnectionFailure" error="Can not reach Elasticsearch cluster ({:host=>\"logging-es\", :port=>9200, :scheme=>\"https\", :user=>\"fluentd\", :password=>\"obfuscated\"})!" plugin_id="object:11f32a8" | |
2017-06-22 16:53:23 +0000 [warn]: suppressed same stacktrace | |
2017-06-22 16:53:29 +0000 [warn]: temporarily failed to flush the buffer. next_retry=2017-06-22 16:55:42 +0000 error_class="Fluent::ElasticsearchOutput::ConnectionFailure" error="Can not reach Elasticsearch cluster ({:host=>\"logging-es\", :port=>9200, :scheme=>\"https\", :user=>\"fluentd\", :password=>\"obfuscated\"})!" plugin_id="object:f92414" | |
2017-06-22 16:53:29 +0000 [warn]: suppressed same stacktrace | |
2017-06-22 16:53:59 +0000 [warn]: temporarily failed to flush the buffer. next_retry=2017-06-22 16:51:49 +0000 error_class="Fluent::ElasticsearchOutput::ConnectionFailure" error="Can not reach Elasticsearch cluster ({:host=>\"logging-es\", :port=>9200, :scheme=>\"https\", :user=>\"fluentd\", :password=>\"obfuscated\"})!" plugin_id="object:11f32a8" | |
2017-06-22 16:53:59 +0000 [warn]: suppressed same stacktrace | |
2017-06-22 16:54:35 +0000 [warn]: temporarily failed to flush the buffer. next_retry=2017-06-22 16:52:06 +0000 error_class="Fluent::ElasticsearchOutput::ConnectionFailure" error="Can not reach Elasticsearch cluster ({:host=>\"logging-es\", :port=>9200, :scheme=>\"https\", :user=>\"fluentd\", :password=>\"obfuscated\"})!" plugin_id="object:11f32a8" | |
2017-06-22 16:54:35 +0000 [warn]: suppressed same stacktrace | |
2017-06-22 16:55:11 +0000 [warn]: temporarily failed to flush the buffer. next_retry=2017-06-22 16:52:39 +0000 error_class="Fluent::ElasticsearchOutput::ConnectionFailure" error="Can not reach Elasticsearch cluster ({:host=>\"logging-es\", :port=>9200, :scheme=>\"https\", :user=>\"fluentd\", :password=>\"obfuscated\"})!" plugin_id="object:11f32a8" | |
2017-06-22 16:55:11 +0000 [warn]: suppressed same stacktrace | |
2017-06-22 16:55:47 +0000 [warn]: temporarily failed to flush the buffer. next_retry=2017-06-22 16:53:41 +0000 error_class="Fluent::ElasticsearchOutput::ConnectionFailure" error="Can not reach Elasticsearch cluster ({:host=>\"logging-es\", :port=>9200, :scheme=>\"https\", :user=>\"fluentd\", :password=>\"obfuscated\"})!" plugin_id="object:11f32a8" | |
2017-06-22 16:55:47 +0000 [warn]: suppressed same stacktrace | |
2017-06-22 16:56:17 +0000 [warn]: temporarily failed to flush the buffer. next_retry=2017-06-22 17:00:42 +0000 error_class="Fluent::ElasticsearchOutput::ConnectionFailure" error="Can not reach Elasticsearch cluster ({:host=>\"logging-es\", :port=>9200, :scheme=>\"https\", :user=>\"fluentd\", :password=>\"obfuscated\"})!" plugin_id="object:f92414" | |
2017-06-22 16:56:17 +0000 [warn]: suppressed same stacktrace | |
2017-06-22 16:56:23 +0000 [warn]: temporarily failed to flush the buffer. next_retry=2017-06-22 16:55:52 +0000 error_class="Fluent::ElasticsearchOutput::ConnectionFailure" error="Can not reach Elasticsearch cluster ({:host=>\"logging-es\", :port=>9200, :scheme=>\"https\", :user=>\"fluentd\", :password=>\"obfuscated\"})!" plugin_id="object:11f32a8" | |
2017-06-22 16:56:23 +0000 [warn]: suppressed same stacktrace | |
2017-06-22 16:57:00 +0000 [warn]: temporarily failed to flush the buffer. next_retry=2017-06-22 17:00:00 +0000 error_class="Fluent::ElasticsearchOutput::ConnectionFailure" error="Can not reach Elasticsearch cluster ({:host=>\"logging-es\", :port=>9200, :scheme=>\"https\", :user=>\"fluentd\", :password=>\"obfuscated\"})!" plugin_id="object:11f32a8" | |
2017-06-22 16:57:00 +0000 [warn]: suppressed same stacktrace | |
2017-06-22 17:00:01 +0000 [warn]: retry succeeded. plugin_id="object:11f32a8" | |
2017-06-22 17:00:44 +0000 [warn]: retry succeeded. plugin_id="object:f92414" | |
2017-06-22 17:01:56 +0000 [warn]: buffer flush took longer time than slow_flush_log_threshold: plugin_id="object:f92414" elapsed_time=72.298514849 slow_flush_log_threshold=20.0 | |
^C | |
[root@viaq openshift-ansible]# | |
[root@viaq openshift-ansible]# oc describe pods logging-fluentd-024kj|more | |
Name: logging-fluentd-024kj | |
Namespace: logging | |
Security Policy: privileged | |
Node: viaq.logging.test/172.16.93.5 | |
Start Time: Thu, 22 Jun 2017 13:05:25 +0000 | |
Labels: component=fluentd | |
logging-infra=fluentd | |
provider=openshift | |
Status: Running | |
IP: 10.128.0.17 | |
Controllers: DaemonSet/logging-fluentd | |
Containers: | |
fluentd-elasticsearch: | |
Container ID: docker://4b4a5b5cae4c152f494e199384c2801c98862345ef187119d0ae5fe8c24a9e47 | |
Image: docker.io/openshift/origin-logging-fluentd:v1.5.1 | |
Image ID: docker-pullable://docker.io/openshift/origin-logging-fluentd@sha256:8ec8dacbbb417d1b058f4964893dbbf6a88af1281bbd40d470ce91326fa27f93 | |
Port: | |
Limits: | |
cpu: 100m | |
memory: 512Mi | |
Requests: | |
cpu: 100m | |
memory: 512Mi | |
State: Running | |
Started: Thu, 22 Jun 2017 16:47:33 +0000 | |
Last State: Terminated | |
Reason: Error | |
Exit Code: 137 | |
Started: Thu, 22 Jun 2017 13:13:07 +0000 | |
Finished: Thu, 22 Jun 2017 15:59:53 +0000 | |
Ready: True | |
Restart Count: 1 | |
Volume Mounts: | |
/etc/docker-hostname from dockerhostname (ro) | |
/etc/fluent/configs.d/user from config (ro) | |
/etc/fluent/keys from certs (ro) | |
/etc/localtime from localtime (ro) | |
/etc/sysconfig/docker from dockercfg (ro) | |
/run/log/journal from runlogjournal (rw) | |
/var/lib/docker/containers from varlibdockercontainers (ro) | |
/var/log from varlog (rw) | |
/var/run/secrets/kubernetes.io/serviceaccount from aggregated-logging-fluentd-token-kzsfd (ro) | |
Environment Variables: | |
K8S_HOST_URL: https://kubernetes.default.svc.cluster.local | |
ES_HOST: logging-es | |
ES_PORT: 9200 | |
ES_CLIENT_CERT: /etc/fluent/keys/cert | |
ES_CLIENT_KEY: /etc/fluent/keys/key | |
ES_CA: /etc/fluent/keys/ca | |
OPS_HOST: logging-es | |
OPS_PORT: 9200 | |
OPS_CLIENT_CERT: /etc/fluent/keys/cert | |
OPS_CLIENT_KEY: /etc/fluent/keys/key | |
OPS_CA: /etc/fluent/keys/ca | |
ES_COPY: false | |
ES_COPY_HOST: | |
ES_COPY_PORT: | |
ES_COPY_SCHEME: https | |
ES_COPY_CLIENT_CERT: | |
ES_COPY_CLIENT_KEY: | |
ES_COPY_CA: | |
ES_COPY_USERNAME: | |
ES_COPY_PASSWORD: | |
OPS_COPY_HOST: | |
OPS_COPY_PORT: | |
OPS_COPY_SCHEME: https | |
OPS_COPY_CLIENT_CERT: | |
OPS_COPY_CLIENT_KEY: | |
OPS_COPY_CA: | |
OPS_COPY_USERNAME: | |
OPS_COPY_PASSWORD: | |
USE_JOURNAL: | |
JOURNAL_SOURCE: | |
JOURNAL_READ_FROM_HEAD: | |
Conditions: | |
Type Status | |
Initialized True | |
Ready True | |
PodScheduled True | |
Volumes: | |
runlogjournal: | |
Type: HostPath (bare host directory volume) | |
Path: /run/log/journal | |
varlog: | |
Type: HostPath (bare host directory volume) | |
Path: /var/log | |
varlibdockercontainers: | |
Type: HostPath (bare host directory volume) | |
Path: /var/lib/docker/containers | |
config: | |
Type: ConfigMap (a volume populated by a ConfigMap) | |
Name: logging-fluentd | |
certs: | |
Type: Secret (a volume populated by a Secret) | |
SecretName: logging-fluentd | |
dockerhostname: | |
Type: HostPath (bare host directory volume) | |
Path: /etc/hostname | |
localtime: | |
Type: HostPath (bare host directory volume) | |
Path: /etc/localtime | |
dockercfg: | |
Type: HostPath (bare host directory volume) | |
Path: /etc/sysconfig/docker | |
aggregated-logging-fluentd-token-kzsfd: | |
Type: Secret (a volume populated by a Secret) | |
SecretName: aggregated-logging-fluentd-token-kzsfd | |
QoS Class: Guaranteed | |
Tolerations: <none> | |
Events: | |
FirstSeen LastSeen Count From SubObjectPath Type Reason Message | |
--------- -------- ----- ---- ------------- -------- ------ ------- | |
1h 1h 4 {kubelet viaq.logging.test} Warning FailedSync Error syncing pod, skipp | |
ing: network is not ready: [SDN pod network is not ready] | |
1h 1h 1 {kubelet viaq.logging.test} spec.containers{fluentd-elasticsearch} Normal Pulling pulling image "docker.io | |
/openshift/origin-logging-fluentd:v1.5.1" | |
1h 1h 1 {kubelet viaq.logging.test} spec.containers{fluentd-elasticsearch} Normal Pulled Successfully pulled imag | |
e "docker.io/openshift/origin-logging-fluentd:v1.5.1" | |
1h 1h 1 {kubelet viaq.logging.test} spec.containers{fluentd-elasticsearch} Normal Created Created container with d | |
ocker id 4b4a5b5cae4c; Security:[seccomp=unconfined] | |
1h 1h 1 {kubelet viaq.logging.test} spec.containers{fluentd-elasticsearch} Normal Started Started container with d | |
ocker id 4b4a5b5cae4c | |
[root@viaq openshift-ansible]# | |
[root@viaq openshift-ansible]# docker images|grep logging | |
docker.io/openshift/origin-logging-curator v1.5.1 27a20151da17 6 weeks ago 224.9 MB | |
docker.io/openshift/origin-logging-auth-proxy v1.5.1 6bca7a2e8641 6 weeks ago 715.5 MB | |
docker.io/openshift/origin-logging-kibana v1.5.1 81f8b0aaea10 6 weeks ago 682.9 MB | |
docker.io/openshift/origin-logging-fluentd v1.5.1 b7012a18fd69 8 weeks ago 358.8 MB | |
docker.io/openshift/origin-logging-elasticsearch v1.5.1 033ed6e1c620 8 weeks ago 429.9 MB | |
[root@viaq openshift-ansible]# |
Sign up for free
to join this conversation on GitHub.
Already have an account?
Sign in to comment