Skip to content

Instantly share code, notes, and snippets.

@WeslyG
Last active December 25, 2021 15:31
Show Gist options
  • Save WeslyG/b37cd968d09ea2e361c257e5da8d2a75 to your computer and use it in GitHub Desktop.
Save WeslyG/b37cd968d09ea2e361c257e5da8d2a75 to your computer and use it in GitHub Desktop.
Shell Usage
# roles when
roles:
- { role: main, when: inventory_file|basename == 'inventory' }
# WSL connection bug
# https://stackoverflow.com/questions/62363901/ansible-msg-failed-to-create-temporary-directory-in-some-cases-fatal
mv /usr/bin/sleep /usr/bin/sleep.dist
ln -s /bin/true /usr/bin/sleep
# Run locally
ansible localhost -m shell -a"echo 'hey you'"
#Sum counter
cat myfile.log | awk '{ SUM += $2} END { print "Counter " SUM }'
#Sort on colum is 0
awk '{ if ( $7 == 0 ) print $3 }'
# test speed on 512 bytes on 10.000
dd if=/dev/zero of=/tmp/testfile2 bs=512 count=100000 oflag=direct
dd if=/dev/zero of=/tmp/testfile2 bs=4GB count=1 oflag=direct
cat /sys/block/sda/queue/rotational
# Recovery speed relocation (default 250mb)
curl -XPUT localhost:9200/_cluster/settings -H 'Content-Type: application/json' -d '{"transient" :{"indices.recovery.max_bytes_per_sec": "1024mb"}}'
# Allocation
curl localhost:9200/_cluster/allocation/explain
# watermark
curl -XPUT 'localhost:9200/_cluster/settings' -H 'Content-Type: application/json' -d '{"transient": {
"cluster.routing.allocation.disk.watermark.low": "96%",
"cluster.routing.allocation.disk.watermark.high": "97%",
"cluster.routing.allocation.disk.watermark.flood_stage": "98%"}}'
# Retry failure
curl -XPOST localhost:9200/_cluster/reroute?retry_failed
# Rebalance count
curl -XPUT localhost:9200/_cluster/settings -H 'Content-Type: application/json' -d '{"transient" :{"cluster.routing.allocation.cluster_concurrent_rebalance" : "20"}}';
# disable lock
curl -XPUT "http://localhost:9200/*/_settings" -H 'Content-Type: application/json' -d '{"index": {"blocks": {"read_only_allow_delete": "false"}}}'
# Rebalance shards - https://www.elastic.co/guide/en/elasticsearch/reference/6.8/shards-allocation.html
curl -XPUT localhost:9200/_cluster/settings?master_timeout=120s -H 'Content-Type: application/json' -d '{"transient": { "cluster.routing.allocation.balance.shard": "0.65f"}}'
#remove node from cluster
curl -XPUT localhost:9200/_cluster/settings -H 'Content-Type: application/json' -d '{"transient" :{"cluster.routing.allocation.exclude.name" : "node-name"}}';
# disable remove
curl -XPUT localhost:9200/_cluster/settings -H 'Content-Type: application/json' -d '{"transient" :{"cluster.routing.allocation.exclude.name" : null}}';
# change reroute shards count (default 2)
curl -XPUT localhost:9200/_cluster/settings -H 'Content-Type: application/json' -d '{"transient" :{"cluster.routing.allocation.node_concurrent_recoveries" : "50"}}';
# change watermark
curl -XPUT 'localhost:9200/_cluster/settings' -d '{"transient": {"cluster.routing.allocation.disk.watermark.low": "98%"}}'
# change replica
curl -XPUT localhost:9200/INDEX/_settings -H 'Content-Type: application/json' -d '{"index" : {"number_of_replicas" : 0}}'
POST /_aliases
{
"actions" : [
{ "add": {
"index": "forms-rpn3-forms.api.check-000005",
"alias": "forms-rpn3-forms.api.check",
"is_write_index" : true
} }
]
}
POST _ilm/move/test-000022?master_timeout=120s
{
"current_step": {
"phase": "hot",
"action": "rollover",
"name": "ERROR"
},
"next_step": {
"phase": "warm",
"action": "set_priority",
"name": "set_priority"
}
}
# force reroute
curl -XPOST 'localhost:9200/_cluster/reroute' -H 'Content-Type: application/json' -d '{"commands" : [{"move" : { "index" : "INDEX_NAME", "shard" : 0, "from_node" : "FROM_NODE_NAME", "to_node" : "TO_NODE_NAME"}}]}'
# config new_primary only
curl -XPUT localhost:9200/_cluster/settings?master_timeout=120s -H 'Content-Type: application/json' -d '{"transient": { "cluster.routing.allocation.enable": "new_primaries"}}'
curl -XPUT localhost:9200/_cluster/settings?master_timeout=120s -H 'Content-Type: application/json' -d '{"transient": { "cluster.routing.allocation.enable": "all"}}'
# RUn elk in docker
docker run -p 9200:9200 -p 9300:9300 -e "discovery.type=single-node" docker.elastic.co/elasticsearch/elasticsearch:7.0.1
# fix ilm
curl -XPUT "http://localhost:9200/myindex-000002/_settings" -H 'Content-Type: application/json' -d' { "index.lifecycle.rollover_alias": "myindex" }'
# create mapping
PUT indices/_mapping/type
{
"properties": {
"email": {
"type": "keyword"
}
}
}
# easy search
GET /indices/_search
{
"query": {
"match" : {
"field_for_search" : "what you search"
}
}
}
# sql
POST /_xpack/sql?format=txt
{
"query": """SELECT * FROM "devops*" WHERE "cluster" = 'production' AND "status" = 'open' ORDER BY all_store_size_byte DESC""",
"fetch_size": 100
}
# write one docs
curl -X POST http://localhost:9200/indexname/type -H 'Content-Type: application/json' -d '{"message": "youre message here"}'
# write bulk
POST
curl -H "Content-Type: application/x-ndjson" -XPOST localhost:9200/_bulk --data-binary "@data"
data files is:
{ "index" : { "_index": "indexname" } }
{ "field1" : "value1" }
{ "index" : { "_index": "indexname" } }
{ "field1" : "value2" }
curl -XPOST localhost:9200/_cluster/reroute?pretty -H 'Content-Type: application/json' -d '
{
"commands": [
{
"allocate_empty_primary": {
"index": "nginx-actions-2018.07.24",
"shard": 2,
"node": "nodeId",
"accept_data_loss": true
}
}
]
}'
# ILM
index_name=$1
curl -XPUT "http://localhost:9200/_template/${index_name}" -H 'Content-Type: application/json' -d"
{
\"index_patterns\": \"${index_name}-*\",
\"order\": \"${#index_name}\",
\"settings\": {
\"index.lifecycle.rollover_alias\": \"${index_name}\"
}
}"
echo " "
curl -XPUT "localhost:9200/${index_name}-000001?pretty" -H 'Content-Type: application/json' -d"
{
\"aliases\": {
\"${index_name}\": {
\"is_write_index\": true
}
}
}"
echo "$1 - created"
curl -X POST "localhost:9200/my_index/_ilm/remove?pretty"
or
# add new
firewall-cmd --permanent --add-port=5059-5061/udp
firewall-cmd --permanent --add-port=3000/tcp
# reload
firewall-cmd --reload
# list all
firewall-cmd --list-all
# remove port
firewall-cmd --permanent --remove-port=3000/udp
# remove rich
firewall-cmd --permanent --remove-rich-rule 'all rich rule here'
with open('data.txt') as json_file:
data = json.load(json_file)
yum list installed | grep
yum list | grep
yum install java-11-openjdk-devel-11.0.8*
yum localinstall qwe.rpm
yum search --showduplicates java
apt list
apt list --installed
apt install java-11-openjdk-devel=11.0.8*
dpkg -i /path/to/deb/file
apt-get install -f
#Negative grep (все кроме этого)
cat test | grep -v mytext
#RegExp grep
grep '^foo$' filename
#How to get files on hot
curl localhost:9200/* | jq 'map(select(.settings.index.routing.allocation.require.box_type == "hot")) | map(.settings.index.provided_name)' > file
# fix
map(select(.ilm.phase_execution.phase_definition.actions.allocate.require.box_type == "cold")) | map ({ size: .size, name: .name})
# play jq
https://jqplay.org/#
https://kubernetes.io/ru/docs/reference/kubectl/cheatsheet/
# enable api (default port 8001)
kubectl proxy
# get all events
kubectl get events
# remove all events
kubectl delete events --all
# get pods
kubectl get pods
kubectl get namespaces
# port forward
kubectl port-forward pods/{podsName} 8080:8080
# set-contest
kubectl config use-context context-name
# run busybox and delete on exit
kubectl run -i --tty busybox --image=busybox --restart=Never --rm -- sh
# port forward
kubectl port-forward rs/redis-master 7000:6379
# log delete containers
kubectl logs mypod --previous
# Run wget + ping (no curl)
kubectl run -i --tty --rm debug --image=busybox --restart=Never -- sh
# Scale
kubectl scale --replicas=3 rs/foo # Промасштабировать набор реплик (replicaset) 'foo' до 3
kubectl scale --replicas=3 -f foo.yaml # Промасштабировать ресурс в "foo.yaml" до 3
kubectl scale --current-replicas=2 --replicas=3 deployment/mysql # Если количество реплик в развёртывании mysql равен 2, промасштабировать его до 3
kubectl scale --replicas=5 rc/foo rc/bar rc/baz
mongodump --host <host-ip> --port 27017 --db <database> --authenticationDatabase admin --username <username> --password <password> --out ./Documents/
mongodump --uri="mongodb://host:27017,host2:27017,host3:27017/?replicaSet=dvops_tree&readPreference=secondary" --username userrw --db 'backupDbName' --authenticationDatabase admin --password 'password' --out ./out
sudo mysqldump -u root -h 127.0.0.1 -p --compatible=postgresql databaseName > file.sql
mysql billing < billing.sql
CREATE USER 'monty'@'localhost' IDENTIFIED BY 'some_pass';
GRANT ALL PRIVILEGES ON *.* TO 'monty'@'localhost' WITH GRANT OPTION;
CREATE USER 'monty'@'%' IDENTIFIED BY 'some_pass';
GRANT ALL PRIVILEGES ON *.* TO 'monty'@'%' WITH GRANT OPTION;
FLUSH PRIVILEGES;
pgloader mysql://remote:password@127.0.0.1/billing postgresql://postgres:qweqweqwe@127.0.0.1:5432/billing
# chat
# on first host
nc -lp 3000
# on second host
nc 10.0.0.1 3000
# who listen port
netstat -tpln | grep :80
# get all ports open on localhost (tcp + udp)
nmap localhost
# check ip on fqdn
nmap 10.0.0.1
nmap super.myexample.com
# UDP port
nmap -sU -p port target
nmap -sU -p 8080 10.0.0.1
# TCP port
nmap -sT -p 8080 10.0.0.1
#check all
nmap -sU 10.0.0.1 -p 1-65535
apt install postgresql
service postgresql status
systemctl status postgresql
# connect from external
nano /etc/postgresql/12/main/pg_hba.conf
```
host all all 0.0.0.0/0 md5
```
nano /etc/postgresql/12/main/postgresql.conf
listen_addresses = '0.0.0.0'
systemctl restart postgresql
su postgres
psql
ALTER USER postgres PASSWORD 'qweqweqwe';
local all postgres md5
systemctl restart postgresql
psql -U postgres
set statement_timeout = '99999 s';
#!/usr/bin/python
# -*- coding: utf-8 -*-
# sed
sed -r 's/-201*.*.*//g'
#Basic usage
cat file | sort
#Sort column
cat file | sort -k 3
#Sort column and number
cat file sort -kn 3
#Sort revert
cat file | sort -nrk 5
!! Ssh port forward !!
local port <==== remote port
ssh -N 192.168.1.1 -L 8080:localhost:80
TAR option
-c = create
-f = read to/from the named file (instead of the device /tape)
-t = list contents of .tar file
-r = append to a .tar file
-v = verbose (tells you everything its doing)
-x = extract contents of .tar file
-z = compress files (not so useful for minc files)
#File in tar
tar -cvf output.tar /dirname1 /dirname2 filename1 filename2
#Create tar.gz
tar -zcvf archive-name.tar.gz directory-name
#Extract tar.gz
tar -zxvf backups.tar.gz
# all on port 2003
tcpdump -Anni any dst port 2003
#run (all listen)
tcpdump
#eh0
tcpdump -i eth0
#logical
and or
tcpdump -n "src host 192.168.1.1 or src port 9003"
#dns
tcpdump src host 127.0.0.1 and port 53
#ssh
tcpdump src host 127.0.0.1 and port 22
#how to grep?
flag -l
tcpdump -l | awk '{ print $3}' | uniq
tcpdump | grep 127.0.0.1
# dst src (host port)
tcpdump src host/port
tcpdump dst host/port
tcpdump dst port 8080
tcpdump dst host 10.0.0.1
#flags
- с = count
tcpdump -c 100
-v -vv -vvv = verbose
tcpdump -v
-n = ip instead of names
tcpdump -n
-w write to file
tcpdump -w mytrafic.txt
tmux cheatsheet
As configured in my dotfiles.
start new:
tmux
start new with session name:
tmux new -s myname
attach:
tmux a # (or at, or attach)
attach to named:
tmux a -t myname
list sessions:
tmux ls
kill session:
tmux kill-session -t myname
In tmux, hit the prefix ctrl+b and then:
Sessions
:new<CR> new session
s list sessions
$ name session
Windows (tabs)
c new window
, name window
w list windows
f find window
& kill window
. move window - prompted for a new number
:movew<CR> move window to the next unused number
Panes (splits)
% horizontal split
" vertical split
o swap panes
q show pane numbers
x kill pane
⍽ space - toggle between layouts
Window/pane surgery
:joinp -s :2<CR> move window 2 into a new pane in the current window
:joinp -t :1<CR> move the current pane into a new pane in window 1
Move window to pane
How to reorder windows
Misc
d detach
t big clock
? list shortcuts
: prompt
# add new user
adduser username
useradd username
# dell user
userdel username
#add pass for user
passwd username
# add group to user
usermod -a -G sudo testuser
#remove user from group
gpasswd -d user group
Sign up for free to join this conversation on GitHub. Already have an account? Sign in to comment