I hereby claim:
- I am bitmonk on github.
- I am bitmonk (https://keybase.io/bitmonk) on keybase.
- I have a public key ASAwx-1Gl-tFgwuegKfQNTm2cbubnOUdISF1Ea1dzEfn9wo
To claim this, I am signing this object:
Traceback (most recent call last): | |
File "/etc/zabbix/alert.d//pagerduty", line 239, in <module> | |
pagerduty_queue.enqueue(Zabbix(sys.argv).event()) | |
File "/etc/zabbix/alert.d//pagerduty", line 213, in event | |
event = self._parse_zabbix_body(self.arguments[3]) | |
File "/etc/zabbix/alert.d//pagerduty", line 197, in _parse_zabbix_body | |
return dict(line.strip().split(':', 1) for line in body_str.strip().split('\n')) | |
ValueError: dictionary update sequence element #4 has length 1; 2 is required |
I hereby claim:
To claim this, I am signing this object:
zk01:~$ HADOOP_USER_NAME=hdfs hdfs balancer | |
16/02/19 17:59:44 INFO balancer.Balancer: namenodes = [hdfs://zk01.aur.test.ziprealty.com] | |
16/02/19 17:59:44 INFO balancer.Balancer: p = Balancer.Parameters[BalancingPolicy.Node, threshold=10.0, number of datanodes=0, exclude=true] | |
Time Stamp Iteration# Bytes Already Moved Bytes Left To Move Bytes Being Moved | |
16/02/19 17:59:44 INFO net.NetworkTopology: Adding a new node: /default-rack/10.100.6.248:50010 | |
16/02/19 17:59:44 INFO net.NetworkTopology: Adding a new node: /default-rack/10.100.6.245:50010 | |
16/02/19 17:59:44 INFO net.NetworkTopology: Adding a new node: /default-rack/10.100.6.250:50010 | |
16/02/19 17:59:44 INFO net.NetworkTopology: Adding a new node: /default-rack/10.100.6.241:50010 | |
16/02/19 17:59:44 INFO net.NetworkTopology: Adding a new node: /default-rack/10.100.6.242:50010 | |
16/02/19 17:59:44 INFO net.NetworkTopology: Adding a new node: /default-rack/10.100.6.243:50010 |
16/02/19 17:55:59 WARN balancer.Balancer: Failed to move blk_1073741949_1125 with size=45000000 from 10.100.6.243:50010 to 10.100.6.241:50010 through 10.100.6.243:50010: block move is failed: Not able to receive block 1073741949 from /10.100.6.251:39734 because threads quota is exceeded. | |
16/02/19 17:55:59 WARN balancer.Balancer: Failed to move blk_1073741952_1128 with size=45000000 from 10.100.6.243:50010 to 10.100.6.241:50010 through 10.100.6.243:50010: block move is failed: Not able to receive block 1073741952 from /10.100.6.251:39733 because threads quota is exceeded. | |
16/02/19 17:55:59 WARN balancer.Balancer: Failed to move blk_1073741932_1108 with size=17865452 from 10.100.6.243:50010 to 10.100.6.242:50010 through 10.100.6.250:50010: block move is failed: Not able to receive block 1073741932 from /10.100.6.251:46936 because threads quota is exceeded. | |
16/02/19 17:55:59 WARN balancer.Balancer: Failed to move blk_1073741947_1123 with size=45000000 from 10.100.6.243:50010 to 10.100.6.241:50010 through 10.100. |
cookbook_file '/etc/checksecurity.conf' do | |
source 'checksecurity.conf' | |
mode 0600 | |
owner 'root' | |
group 'root' | |
end |
knife ec2 server create -I ami-3fec7956 -f m1.small -x ubuntu -S justizin --run-list=role[base] -N cheftest-08 |
Traceback (most recent call last): | |
File "/etc/zabbix/alert.d//pagerduty", line 242, in <module> | |
pagerduty_queue.enqueue(Zabbix(sys.argv).event()) | |
File "/etc/zabbix/alert.d//pagerduty", line 222, in event | |
incident_key = "%s-%s" % (event["id"], event["hostname"]) | |
TypeError: 'NoneType' object has no attribute '__getitem__' |
2013-01-24_01:31:15.65113 udp_recv_channel mcast_join=NULL mcast_if=NULL port=8649 bind=NULL | |
2013-01-24_01:31:15.65116 tcp_accept_channel bind=NULL port=8649 | |
2013-01-24_01:31:15.65117 udp_send_channel mcast_join=NULL mcast_if=NULL host=10.240.227.155 port=8649 |
{ | |
"AWSTemplateFormatVersion": "2010-09-09", | |
"Description": "CoreOS on EC2 (Hipmunk): http://coreos.com/docs/running-coreos/cloud-providers/ec2/", | |
"Mappings" : { | |
"RegionMap" : { | |
"ap-northeast-1" : { | |
"AMI" : "ami-1fb9e61e" | |
}, |
# schema creation for Telephus (Twisted Python Cassandra client) example code | |
# feed into cassandra-cli | |
connect localhost/9160; | |
create keyspace Keyspace1; | |
use Keyspace1; | |
create column family Standard1 with comparator = UTF8Type; | |
update column family Standard1 with column_metadata = [ | |
{column_name: foo, validation_class: UTF8Type}]; |