Skip to content

Instantly share code, notes, and snippets.

View aryulianto's full-sized avatar
😶‍🌫️
behind your eyes

Saputro Aryulianto aryulianto

😶‍🌫️
behind your eyes
View GitHub Profile
#!/usr/bin/gawk -f
#
# Usage: ./rabbit-tell.awk <rabbitmqctl_report>
#
# Common RabbitMQ issues that this script can be used to identify
#
# - Partitioned cluster
#
# - High binary/queue memory usage. There are mainly two causes of this:
#
openstack network create \
--external \
--provider-network-type flat \
--provider-physical-network physnet1 \
extnet
openstack subnet create \
--subnet-range 192.168.0.0/24 \
--allocation-pool start=192.168.0.123,end=192.168.0.250 \
--network extnet \
"os_compute_api:os-evacuate": "rule:admin_api"
"os_compute_api:servers:create": "rule:admin_or_owner"
"os_compute_api:os-extended-volumes": "rule:admin_or_owner"
"os_compute_api:servers:create:forced_host": "rule:admin_api"
"os_compute_api:os-aggregates:remove_host": "rule:admin_api"
"os_compute_api:os-console-output": "rule:admin_or_owner"
"os_compute_api:os-floating-ips": "rule:admin_or_owner"
"os_compute_api:os-aggregates:update": "rule:admin_api"
"os_compute_api:server-metadata:show": "rule:admin_or_owner"
"os_compute_api:os-flavor-manage:create": "rule:os_compute_api:os-flavor-manage"
{
"annotations": {
"list": [
{
"builtIn": 1,
"datasource": "-- Grafana --",
"enable": true,
"hide": true,
"iconColor": "rgba(0, 211, 255, 1)",
"name": "Annotations & Alerts",
@aryulianto
aryulianto / remove_osd
Created September 27, 2018 11:26
Ceph: properly remove an OSD
ceph osd tree
ceph osd crush reweight osd.<ID> 0.0
ceph osd out <ID>
service ceph stop osd.<ID>
ceph osd crush remove osd.<ID>
ceph auth del osd.<ID>
ceph osd rm <ID>
#!/bin/bash
PS3='Mau SSH kemana bro ? : '
options=("SSH ke master" "SSH node" "Gak jadi")
select opt in "${options[@]}"
do
case $opt in
"SSH ke master")
/usr/bin/ssh -i /home/satrianugroho/key-satrianugroho.pem centos@10.1.1.37
break
#/bin/bash
virt-clone --original template-centos7 --name podX-ceph-mon --auto-clone
virsh start podX-ceph-mon
virt-clone --original template-centos7 --name podX-ceph0 --auto-clone
qemu-img create -f raw /data/vms/podX-ceph0-vdb.qcow2 +20G
qemu-img create -f raw /data/vms/podX-ceph0-vdc.qcow2 +20G
virsh attach-disk podX-ceph0 --source /data/vms/podX-ceph0-vdb.qcow2 --target vdb --persistent
virsh attach-disk podX-ceph0 --source /data/vms/podX-ceph0-vdc.qcow2 --target vdc --persistent
{
"nodes": [
{
"mac": [
"08:94:ef:4b:2c:82"
],
"name": "controller0",
"pm_user": "USERID",
"pm_addr": "10.11.0.244",
"pm_password": "PASSW0RD",
# Warning! Dangerous step! Destroys VMs
for x in $(virsh list --all | grep instance- | awk '{print $2}') ; do
virsh destroy $x ;
virsh undefine $x ;
done ;
# Warning! Dangerous step! Removes lots of packages
yum remove -y nrpe "*nagios*" puppet "*ntp*" "*openstack*" \
"*nova*" "*keystone*" "*glance*" "*cinder*" "*swift*" \
"*mariadb*" mysql mysql-server httpd "*memcache*" scsi-target-utils \
ceph pg dump | awk '
/^pg_stat/ { col=1; while($col!="up") {col++}; col++ }
/^[0-9a-f]+\.[0-9a-f]+/ { match($0,/^[0-9a-f]+/); pool=substr($0, RSTART, RLENGTH); poollist[pool]=0;
up=$col; i=0; RSTART=0; RLENGTH=0; delete osds; while(match(up,/[0-9]+/)>0) { osds[++i]=substr(up,RSTART,RLENGTH); up = substr(up, RSTART+RLENGTH) }
for(i in osds) {array[osds[i],pool]++; osdlist[osds[i]];}
}
END {
printf("\n");
printf("pool :\t"); for (i in poollist) printf("%s\t",i); printf("| SUM \n");
for (i in poollist) printf("--------"); printf("----------------\n");