Skip to content

Instantly share code, notes, and snippets.

@lukeasrodgers
Last active January 9, 2024 15:51
Show Gist options
  • Star 1 You must be signed in to star a gist
  • Fork 0 You must be signed in to fork a gist
  • Save lukeasrodgers/5144518 to your computer and use it in GitHub Desktop.
Save lukeasrodgers/5144518 to your computer and use it in GitHub Desktop.
commands I forget
tmux
move window
:move-window -t INDEX
swap window
:swap-window -t INDEX
kill session
tmux kill-session -t mynames
cycle sessions
C-a (
save scrollback text into a file:
:capture-pane -s -3000
:save-buffer filename.txt
nav to specific index window
C-a '
C-a w
find + grep
find . -type f -exec grep -i "searchstring" '{}' \; -print
find + sed
find . -type f -name \*.js -exec sed -i "" "s/findById/queryById/g" '{}' \;
find . -type f -name \*_spec.js -exec sed -i "" "/Application.data.id_2_name = window.stubs.id_2_name\;/d" '{}' \;
rename files
find . -type f -name '*Foo*' | sed 'p;s/Foo/Bar/' | xargs -n2 mv
find + perl (like sed, but doens't futz with newlines at end of file)
find . -type f -exec perl -i -pe 's/gaq/foo/g' {} \;
# delete line
find . -type f -name '*.rb' -exec perl -i -pe "s/^.*track\!.*\n//g" {} \;
# replace CR with NL
find . -name file.html -exec perl -i -pe "s/\x0d/\x0a/g" {} \;
match and print with perl
cat extract-2021-10-28_16-10-32.csv | perl -ne '/(authorization_id=[0-9]+)/ && print "$1\n"' | sort | uniq
git grep + sed (faster find/replace; will create temp files, unfortnately)
for OSX
git grep -l 'rich_media_attributes' | xargs sed -i.bak 's/rich_media_attributes/rich_media_object_attributes/g'
git grep + perl (even faster, no temp files)
git grep -l enable_product | xargs perl -i -pe 's/enable_product/enable_benefit/'
bundler
bundle update ––source gemname # conservatively update just a single gem
BUNDLE_GEMFILE=test/gemfiles/Gemfile.rails-3.0.x bundle exec rake test
create new gem
bundle gem my_gem
new gem with bin and rspec
bundle gem my_gem --bin --test=rspec
sed
split a single-line JSON into multiple lines, stupidly
sed -e 's/}/}\HIT ENTER THE CONTINUE/g' singleline.json > lined.json
print just the matching part of a regex mathc
sed -n -e 's/^.*blahblah=\([0-9]*\).*/\1/p' # will print the number
android sdk
editing /etc/hosts
-- optional? emulator -partition-size 128 @MyEmulatedDevice
adb -e shell
# mount -o remount,rw -t yaffs2 /dev/block/mtdblock0 /system
# echo '10.0.2.2 cat-yodeling.local' >> /etc/hosts
node
export NODE_DEBUG=module # helps with debugging require('') issues
fix proxy npm issues
npm config set registry http://registry.npmjs.org/
installing node versions
sudo npm cache clean -f
sudo npm install -g n
sudo n stable
install dev package and save
npm install FOO --save-dev
show node package dependency tree
npm ls
wireshark
display filters are different from capture filters. you can't capture filter ssl (but you can capture filter port 443)
display filter http and ssl
http || ssl
show HTTP GETs and responses
http and (http.request.method matches "GET" or http.response.code == 200)
ufw
# order of rules matter
disable ufw
ufw disable
(re-)enabled ufw
ufw enable
show open ports
ufw status
allow examples
ufw allow ssh
ufw allow www
ufw allow 443/tcp
ufw allow 3000:3500/tcp
ufw status numbered
ufw delete 7
ufw insert 1 deny in from xxx.xxx.xxx.xxx
sql/mysql
speed diff on dev/vs prod
update taggings set taggable_type = 'Promotion' where taggable_type = 'Promotions';
dev: 2core, 2700.321 mhz, 3 G RAM
prod: 8core, 2500 mhz, 30 G RAM
Query OK, 2052 rows affected (0.09 sec) Rows matched: 2052 Changed: 2052 Warnings: 0
Query OK, 3485129 rows affected (1 min 31.96 sec)
naive comparison: prod machine would have done 2052 rows in 0.05 sec,
show table status (e.g. engine)
SHOW TABLE STATUS WHERE Name = 'xxx'
reset autoincr (innodb only allows to be reset to value >= current highest index on that table)
ALTER TABLE tablename AUTO_INCREMENT = 1
mysqldump
mysqldump -u username -p database_name > backup_db.sql
import gzipped mysql database
gunzip < database.sql.gz | mysql -u user -p database
zcat DB_File_Name.sql.gz | mysql -u username -p Target_DB_Name
create user
CREATE USER 'jeffrey'@'localhost' IDENTIFIED BY 'mypass';
granting privileges
GRANT SELECT ON database.* TO user@'localhost' IDENTIFIED BY 'password';
GRANT ALL on database.* to root@'localhost';
followed by 'FLUSH PRIVILEGES';
show charset
SHOW VARIABLES LIKE "%character%";
SELECT CCSA.character_set_name FROM information_schema.`TABLES` T, information_schema.`COLLATION_CHARACTER_SET_APPLICABILITY` CCSA WHERE CCSA.collation_name = T.table_collation AND T.table_name = "promotions";
SHOW FULL COLUMNS FROM tablename;
drop column
alter table tablename drop column columnname;
alter column
ALTER TABLE table_name MODIFY column_name datatype;
add column
ALTER TABLE table_name ADD email VARCHAR(255);
delete rows
DELETE FROM table_name WHERE condition;
SELECTS
count number of Xs for a given Y
SELECT users.id, COUNT(users.id) AS scusers FROM users LEFT JOIN sound_clouds ON users.id = sound_clouds.user_id GROUP BY users.id HAVING scusers > 1;
count number of Xs for a given Y
SELECT opener_id, COUNT(*) AS SUM FROM promotions GROUP BY opener_id HAVING SUM > 2;
count dupes of X while keeping Y distinct
SELECT sc_user_id, user_id, COUNT(*) AS num_sc_accounts FROM (SELECT DISTINCT user_id, sc_user_id, active FROM sound_clouds) AS SC WHERE active = true GROUP BY sc_user_id HAVING num_sc_accounts > 1;
SELECT DATEDIFF(U.last_login_at, U.created_at) AS dd, SC.sc_user_id, SC.user_id, COUNT(*) AS num_sc_accounts FROM (SELECT distinct user_id, sc_user_id, active FROM sound_clouds) AS SC LEFT JOIN users U ON (U.id = SC.user_id) where SC.active = true and U.active = true GROUP BY sc_user_id HAVING num_sc_accounts > 1 AND (dd < 10 OR dd IS NULL);
where foreign key does not exist, use sub-query
select id, promotion_approval_id from promotion_posts where promotion_approval_id not in (select id from promotion_approvals);
multi-table joins and selects
select p.id, p.opener_id, pn.network_id from promotions p join promotion_networks pn on p.id = pn.promotion_id join users on p.opener_id = users.id where users.subscribed = true order by id desc limit 5;
inline table
select * from (select 10 as id UNION select 11 as id UNION select 12 as id UNION select 13 as id)
group by + SUM certain columns given WERE
SELECT distinct employer_employees.id AS employee_id,
SUM(mass_transit_records.suggested_election) as mass_transit_suggested_election,
SUM(mass_transit_records.deduction) as mass_transit_deduction,
SUM(mass_transit_records.reimbursement) as mass_transit_reimbursement,
SUM(parking_records.suggested_election) as parking_suggested_election,
SUM(parking_records.deduction) as parking_deduction,
SUM(parking_records.reimbursement) as parking_reimbursement
FROM employer_employees
JOIN (SELECT suggested_election, deduction, reimbursement, employee_id, pay_period_id FROM payroll_employee_pay_period_records WHERE pretax_category = 'mass_transit') AS mass_transit_records ON mass_transit_records.employee_id = employer_employees.id
JOIN (SELECT suggested_election, deduction, reimbursement, employee_id, pay_period_id FROM payroll_employee_pay_period_records WHERE pretax_category = 'parking') AS parking_records ON parking_records.pay_period_id = mass_transit_records.pay_period_id
GROUP BY (employer_employees.id, mass_transit_records.employee_id, parking_records.employee_id, mass_transit_records.pay_period_id, parking_records.pay_period_id)
HAVING mass_transit_records.pay_period_id = 96
ORDER BY employer_employees.id ASC limit 100;
create some test data
CREATE TABLE test_alter_charset (id int NOT NULL PRIMARY KEY AUTO_INCREMENT, message varchar(255)) CHARACTER SET utf8;
DELIMITER $$
DROP PROCEDURE IF EXISTS prepare_data;
CREATE PROCEDURE prepare_data()
BEGIN
DECLARE i INT DEFAULT 100;
WHILE i < 5000000 DO
INSERT INTO test_alter_charset (message) VALUES ("café");
SET i = i + 1;
END WHILE;
END$$
DELIMITER ;
show indexes
show indexes in table_name;
mysql profiling
set profiling=1; then, show profiles;
duplicate table
CREATE TABLE newtable LIKE oldtable; INSERT newtable SELECT * FROM oldtable;
postgres
show tables options
SELECT relname, reloptions FROM pg_class WHERE relname='tablename';
show db settings for autovacuum
select name, setting, unit, category, short_desc, source, min_val, max_val, boot_val from pg_settings where name like '%autovacuum%';
create user
commandline
createuser luke
from psql
CREATE USER name WITH SUPERUSER LOGIN;
alter user/role
ALTER ROLE name WITH NOLOGIN;
ALTER ROLE postgres WITH LOGIN;
add permissions
GRANT ALL ON DATABASE foo TO postgres;
create new db cluster
initdb -D /usr/local/pgsql/data
start postgres
postgres -D /usr/local/pgsql/data
pg_ctl -D /usr/local/pgsql/data -l logfile start
createdb -Oluke -Eutf8 foo_development
connect via client
psql -U luke foo_development
list tables, etc.
\d
\d users
list indexes
\di
list databases
\l
set schema to use from psql
SET SCHEMA 'schema_name';
show search path/schema
SHOW SEARCH_PATH;
delete database
dropdb dbname
stop pg
PGDATA=/usr/local/var/postgres pg_ctl stop (env var may not be necessary, if set)
PGDATA=/usr/local/var/postgres pg_ctl stop -m fast (force connections to be disrupted)
unpivot
SELECT * FROM (SELECT COUNT(CASE WHEN sal < 10000 THEN 1 END) AS test1, COUNT(CASE WHEN dept > 10 THEN 1 END) AS test2 FROM employees) t UNPIVOT (count FOR description IN ("TEST1", "TEST2", "TEST3", "TEST4"))
login to default db
psql -d postgres
arrays
select array_length(array[1,2,3], 1);
select array_length(array (select unnest(array[1,2,3]) intersect select unnest(array[2,3,4])), 1);
kill sessions
select pg_terminate_backend(pid) from pg_stat_activity where pid <> pg_backend_pid() AND datname = 'alice_dev';
generate data
select generate_series(1,10) as id, md5(random()::text) as desc;
show size of tables
SELECT nspname || '.' || relname AS "relation",
pg_size_pretty(pg_total_relation_size(C.oid)) AS "total_size"
FROM pg_class C
LEFT JOIN pg_namespace N ON (N.oid = C.relnamespace)
WHERE nspname NOT IN ('pg_catalog', 'information_schema')
AND C.relkind = 'i'
AND nspname !~ '^pg_toast'
ORDER BY pg_total_relation_size(C.oid) DESC
LIMIT 20;
show approx row count in table
SELECT reltuples AS approximate_row_count FROM pg_class WHERE relname = 'table_name';
long running queries
SELECT pid, now() - pg_stat_activity.query_start AS duration, query, state FROM pg_stat_activity WHERE (now() - pg_stat_activity.query_start) > interval '5 minutes';
kill running query
SELECT pg_cancel_backend(procpid);
kill idle query
SELECT pg_terminate_backend(procpid);
waiting locks
SELECT * FROM pg_locks WHERE NOT GRANTED;
lock activity
SELECT * FROM pg_locks pl LEFT JOIN pg_stat_activity psa ON pl.pid = psa.pid;
coerce timestamp to date
SELECT sent_at::TIMESTAMP::DATE AS sent_day FROM messaging_tasks;
cast in query
SELECT * from things join tickets on tickets.ticketable_id = cast(things.id as varchar);
lateral join
select q1.pprs_id, q2.pprs_id, q1.payday, q2.payday, employee_id, eeb_id, foo, q1.deduction, q1.reimbursement, q2.deduction, q2.reimbursement from (
SELECT pprs.id as pprs_id, employee_benefit_id as eeb_id, pprs.employee_id, pprs.deduction as deduction, pprs.reimbursement as reimbursement, payday from payroll_employee_pay_period_records pprs inner join payroll_pay_periods periods on pprs.pay_period_id = periods.id WHERE periods.id = 10
) q1 LEFT JOIN LATERAL (
select 1 as foo, deduction, reimbursement, pprs.id as pprs_id, payday
FROM payroll_employee_pay_period_records pprs inner join payroll_pay_periods periods ON pprs.pay_period_id = periods.id
WHERE periods.payday < q1.payday AND pprs.employee_id = q1.employee_id AND pprs.employee_benefit_id = q1.eeb_id
AND q1.deduction = pprs.deduction AND q1.reimbursement = pprs.reimbursement
ORDER BY payday desc
LIMIT 1
) q2 ON true;
ruby
gem development
gem build GEMNAME.gemspec
gem install gemname-version.gem
gem unpack ging-opengraph-0.1.0.gem
install specific version
gem install fog -v 1.8
rand N bytes as hex from urandom
f = File.open('/dev/urandom'); f.read(N).unpack('h*')
minitest
require 'minitest/autorun'
subclass from MiniTest::Test
run with ruby test_file.rb -n my_test_name to run single test
rails
time rails consoe startup
time echo exit | rails console > /dev/null
time bin/rails runner puts '1'
log to rails console
ActiveRecord::Base.connection.instance_variable_set :@logger, Logger.new(STDOUT) (rails 2, for sql output)
RAILS_DEFAULT_LOGGER = Logger.new(STDOUT) (rails 2, for ruby output)
ActiveRecord::Base.logger = Logger.new(STDOUT) (rails 3)
asset paths
Rails.application.config.assets.paths
url helpers in console
include Rails.application.routes.url_helpers # rails 3
log queries to rails console
ActiveRecord::Base.logger = Logger.new(STDOUT)
ActiveRecord::Base.connection_pool.clear_reloadable_connections!
reload something under lib in rails console
load "#{Rails.root}/lib/yourfile.rb"
generate shit
rails generate model Product name:string description:text quantitiy:integer
script/generate scaffold Post title:string body:text --skip-migration
rails generate migration AddIndexToUsers email:string:index
rails generate migration AddUserRefToProducts user:references
with unique
rails scaffold AuthUser name:string auth_token:string:uniq:index
migrations
status
rails 3 rake db:migrate:status
rails 2 rake db:version -- just shows you schema version
rake
list rake tasks
rake -T
generate stats in rails project
bundle exec rake stats
db
ActiveRecord::Base.connection
ActiveRecord::Base.connection_config
queries
sum for users
User.sum('total_fans', :conditions => {:id => [1,2,3,4]})
update attrs without mass assignment check
record.assign_attributes(attrs, without_protection: true)
delayed job
run: Delayed::Worker.new.run( Delayed::Job.find(x) )
passenger
passenger-memory-stats
get passenger version
locate passenger-config
/path/to/passenger-config --version
rspec
run single block within a single spec
(bundle exec) rake spec SPEC="/private/var/www/headlinerlite/spec/models/promotion_spec.rb":229
resque
pause workers parent processes
ps aux | grep -i resque | grep -e Waiting -e Forked | grep -v grep | awk '{print $2}' | xargs kill -s USR2
interact via rails console
Resque.info
Resque.peek(:queue_name)
Resque.queues
see jobs
queue = 'mimiupdate' # change this to whatever queue you want to look up
size = Resque.size queue
ids = Resque.peek(queue, 0, size).map do |job|
job['args'].first['id']
end
ids.count
ids.uniq.count
get active workers
Resque.workers.select{|w| w.working? }
on a given host
Resque.workers.select{|w| w.working? && w.id.match(/sd04/) }
unregister stale worker (when pid is not found, but resque still thinks it's running)
worker.unregister_worker
git
branch info
get current commit
git rev-parse HEAD
branch name
git rev-parse --abbrev-ref HEAD
when rebasing:
"us" is the branch you are rebasing off of; "them" is the branch you were on when you started to rebase
"HEAD" is the branch you are rebasing off of
rebase from one step before a commit
git rebase -i <commit>~1
compare files across branches
git diff mybranch master -- myfile.cs
working with upstreams for (e.g.) a fork
git remote add upstream https://github.com/octocat/Spoon-Knife.git
git fetch upstream
git merge upstream/master
dry run merge (ish)
git merge --no-commit --no-ff $BRANCH
log changes to file types
git log -- "\*.rb"
ignore changes to certain paths (requires > v1.9.0)
git log -p -Ssomestring -- . ":(exclude)public*hlcg*"
save passphrase
ssh-agent bash
ssh-add
OR
eval "$(ssh-agent -s)"
ssh-add -K ~/.ssh/id_rsa
track upstream branch
git push -u origin branchname
git branch -u origin/branchname
set current branch to track a remote
git branch -u upstream/foo
push to remote branch
git push remotename remotebranch:localbranch
show commit differences between branches
git log master.. (what's in this branch that is not in master)
git log ..feature (what's in feature that is not in this branch)
show deleted files
git log --diff-filter=D --summary
determine if a commit is a merge commit (has two parents)
git cat-file -p COMMIT_ID
fetch PR
git fetch upstream pull/PRID/head:BRANCHNAME
without feature branch:
git fetch REPO_URL refs/pull/PULLID/head
git checkout -b new-branch FETCH_HEAD
clean new, unstaged files (safer than `git clean`)
git status -s | cut -d' ' -f2 | xargs rm
find commits matching on regex, reverse ordered
git log -p --reverse --grep=statsd
rebase and autosquash
git add . && git commit --fixup SOMESHA1
git rebase -i --autosquash MASTER/DEV
github shows merge conflict but git does not
this is cuz github can't do/evaluate possibility of recursive 3-way merge. this can occurr when there are
multiple merge bases for the two commits you want to merge. check with
git merge-base --all branch/commit1 branch/commit2
search reflog for file
git rev-list --all -- employee_missing_fields.json
delete merged
git branch --merged | egrep -v "(^\*|master|develop)" | xargs git branch -d
bash
open files from last ack command in vim
vim -p $(!! -l)
do stuff to files
for i in *.rb; do head -n 5 $i; done
UTF8
export LC_ALL=en_US.UTF-8
export LANG=en_US.UTF-8
export LANGUAGE=en_US.UTF-8
default variable in shell script
MY_VAR=${1:-foo}
substitute in last command
$ vi /etc/Somefile.conf #(oops!)
$ ^f^F
brace expansion
$ cp filename{,-old}
$ cp filename{-old,}
$ cp filename{-v1,-v2}
word modifiers
h - Remove a trailing file name component, leaving only the head.
t - Remove all leading file name components, leaving the tail.
r - Remove a trailing suffix of the form .xxx, leaving the basename.
e - Remove all but the trailing suffix.
p - print last command that would be executed, instead of executing
de-dupe bash history
export HISTCONTROL=ignoreboth:erasedups
scp
to host
scp SourceFile user@host:directory/TargetFile
from host
scp user@host:directory/SourceFile TargetFile
scp -r user@host:directory/SourceFolder TargetFolder
rsync
folder, compressed, verbose, to pwd
rsync -avz --progress user@HOST:/etc/folder .
exclude git, use .gitignore
rsync -av --exclude='/.git' --filter=':- .gitignore' localpath server:/remote/path
vim
show non-printing characters
:set list
:set nolist
show filename
^G
list past registers
:reg
tabs
navigate with gt, gT in normal mode
:tabedit
open all remaining buffers in a new tab, up to `tabpagemax`, after which they will be opened in a new split window
:tab sball
open each buffer in a split view
:sball
set filetype for syntax highlighting
:setf jst
adjust window split size
:30winc > a
expand horiz
cmd-W |
expand vert
cmd-W _
equal size all window panes
cmd-W =
restart youcompleteme
:YcmRestartServer
hard wrap text at 80 chars
:set formatoptions+=w ; :set tw=80; gggqG
temporarily disable neocomplete (causes errors with python)
call neocomplete#init#disable()
tabs (instead of spaces)
:set tabstop=2 " To match the sample file
:set noexpandtab " Use tabs, not spaces
:%retab! " Retabulate the whole file
disable syntax highlighting
:syntax off
filetype
run detection
:filetype detect
show status
:filetype
show current filetype
:set filetype?
show all variables
:let
:let g: (show global vars)
redirect output of a command
:redir @a
:let g:
:redir END
"ap
quit with error code, no save (handy to abort `git commit --amend`)
:cq
downcase
vmode select then `gu`
upcase
vmode select then `gU` or `shift-u`
rename file
:NERDTreeFind, then m
show current buffer number
:echo bufnr('%')
swap panes
<C-w> <C-r>
netcat
sending
nc -v HOST PORT < filename.tgz
receiving
nc -v -l PORT > filename.tgz
via pipe
cat file - | nc host port
cat <(echo command) - | nc host port
misc
randomness
get 256 bits of randomness
dd if=/dev/urandom bs=32 count=1 of=rand.out
dates
see TZ info
ls /usr/share/zoneinfo/America/
view datetime for zone
TZ=America/Boise date
formatted
DATE=`date +%Y-%m-%d:%H:%M:%S`
determine PID of service given a port
lsof -i :PORT
netstat -an | grep 6379 | grep EST | awk '{print $4}' | awk -F ":" '{print $2}' | perl -i -pe 's/^/:/' | xargs -n1 lsof -i
show PIDs with open TCP sockets (optional port)
lsof -i TCP:8080
lsof -i TCP
list ports and pids
netstat -tulpn # linux
netstat -anv # bsd
get PID by process name
pname -f 'PROCESSNAME'
redirect traffic
redir traffic from 80 to 8080, 443 to 8443
echo "
rdr pass inet proto tcp from any to any port 80 -> 127.0.0.1 port 8080
rdr pass inet proto tcp from any to any port 443 -> 127.0.0.1 port 8443
" | sudo pfctl -ef -
throttle traffic
bsd
sudo ipfw add pipe 1 ip from any to any out dst-port 10000-80000
sudo ipfw pipe 1 config bw 8KBytes/s
osx (removed ipfw, now must use less ergonomic pf and dnctl)
1. enable pf: `sudo pfctl -E`
2. create pf anchor: `(cat /etc/pf.conf && echo "dummynet-anchor \"mop\"" && echo "anchor \"mop\"") | sudo pfctl -f -`
3. create pipe: `echo "dummynet in quick proto tcp from any to any port 2480 pipe 1" | sudo pfctl -a mop -f`
4. configure your pipe: `sudo dnctl pipe 1 config bw 1Mbit/s delay 1000`
when done, to flush:
1. sudo dnctl flush
2. sudo pfctl -f /etc/pf.conf
linux (is there an easier way?)
# create iptables mark rules
sudo iptables -A OUTPUT -p tcp --dport 6379 -j MARK --set-mark 115
sudo iptables -A INPUT -p tcp --dport 6379 -j MARK --set-mark 115
# create tc rules that reference mark created by iptables, adding 5s of latency to/from redis
sudo tc qdisc add dev lo handle 1: root prio
sudo tc qdisc add dev lo handle 30: parent 1:3 netem delay 5010ms 0ms
sudo tc filter add dev lo protocol ip parent 1:0 prio 3 handle 115 fw flowid 1:3
# when done
iptables -L --line-numbers
iptables -D INPUT/OUTPUT/ETC NUMBER
sudo iptables --flush
sudo tc qdisc del dev lo root
add group
groupadd developers
add existing user to group
usermod -a -G ftp tony
show users groups
groups USERNAME
remove user from group (debian/ubuntu)
deluser USER GROUP
calendar
cal
ascii
man ascii
compare binary files
xxd
treat ascii as hex
xxd -p -r FILENAME
find stuff
mdfind
can't delete file with no permissions?
lsattr filename
chattr -i filename (remove immutable attribute)
grep for unicode (non-ascii) characters, unix grep (not osx)
grep -r --color='auto' -P -n "[\x80-\xFF]" .
grep for unicode chars on osx (bsd grep)
pcregrep --color='auto' -n "[\x80-\xFF]"
list users
awk -F":" '{ print "username: " $1 "\t\tuid:" $3 }' /etc/passwd
apt-cache
# search
apt-cache search nginx-common
# show packages
apt-cache show nginx-common
apt-cache policy mysql
# show which repos apt-cache is generating package lists from
# these can be from /etc/apt/sources.list and /etc/apt/sources.list.d/
apt-cache policy
delete files from a certain date range
find . -type f -newermt 20160801 \! -newermt 20160830 | xargs rm
apt-get
# install specific version
apt-get install libmysqlclient18=5.5.44-0ubuntu0.12.04.1
# when you have no apt
apt update
apt install software-properties-common
read N bytes of file
head -c N FILENAME
tail -c N FILENAME
reverse a file
tail -r FILENAME
high-level monitor IO (disk and network) on linux
dstat
use interpreter path from env
#!/usr/bin/env ruby
monitoring
iostat
watch ifconfig eth0
nettop (osx)
iotop (uses dtrace, broken on recent OSX)
iptables
block outgoing udp traffic to statsd
sudo iptables -A OUTPUT -j DROP -p udp --destination-port 8125
delete rules by line number
iptables -L --line-numbers
iptables -D INPUT/OUTPUT/ETC NUMBER
dpgk
show installed packages
dpkg --get-selections | grep -v deinstall
ffmpeg
basic conversion
ffmpeg -i song.mp3 song.wav
loop an image into an mp4 video with audio.mp3
ffmpeg -loop_input -i imagine.jpg -i audio.mp3 -y -r 30 -b 2500k -acodec ac3 -ab 384k -vcodec mpeg4 result.mp4
tar
exclude folders
tar --exclude='./folder' --exclude='./upload/folder2' -zcvf /backup/filename.tgz .
extract to folder
tar -xavf file.tar.gz -C /some/folder
dtrace
list of unique providers on this machine
dtrace -l | perl -pe 's/^.*?\S+\s+(\S+?)([0-9]|\s).*/\1/' | sort -u
list syscalls
dtrace -ln 'syscall:::entry'
all syscalls for PID
sudo dtrace -qn 'syscall::write:entry, syscall::sendto:entry /pid == $target/ { printf("(%d) %s %s", pid, probefunc, arg1 != NULL ? copyinstr(arg1) : "<NULL>"); }' -p $SERVER_PID
dtruss (process syscall details using dtrace)
list open syscalls for PID
sudo dtruss -t open_nocancel -p PID
strace
find which files are being opened by what process
strace php 2>&1 | grep php.ini
strace -e open php 2>&1 | grep php.ini
find failing open access calls
strace -e read,access 2>&1 | grep yourfilename
what is process PID doing?
strace -f -p PID
profile what's using time
strace -c -p 11084
intercept PID's output (may work...)
strace -ewrite -p 24189
sed and ilk
double space a file
sed G
Insert a blank line above every line that matches "regex".
sed '/regex/{x;p;x;}'
number each line of a file
sed = filename | sed 'N;s/\n/\t/'
Substitute all occurrences of "foo" with "bar" on all lines that contain "baz".
sed '/baz/s/foo/bar/g'
Substitute all occurrences of "foo" with "bar" on all lines that DO NOT contain "baz".
sed '/baz/!s/foo/bar/g'
reverse order lines in a file
sed '1!G;h;$!d'
find, remove first log chars, replace array separator with newline, remove whitespace, dedupe, count unique lines
cat orient-server.log.1 | grep "2015-10-07 14:13:30:568 INFO" | cut -c 110- | sed 's/, /\'$'\n/g' | tr -d ' ' | sort | uniq -u | wc -l
count unique IP address
cat clients.txt | cut -d" " -f1 | sed 's/addr=//g' | sed 's/:[[:digit:]]\+//g' | sort | uniq -c
feed open local ports to lsof
netstat -an -t | grep 172.31.21.87 | awk '{print $4}' | cut -d: -f2 | xargs echo | sed 's/ /,/g' | lsof -i
sum dollar amount lines
cat sales.txt | grep \\$ | tr -d '$' | cut -d ' ' -f1 | awk '{ sum +=$1 } END {print sum}'
openssl
generate self-signed certificate
openssl genrsa -des3 -out server.key 1024 # generate RSA private key
openssl req -new -key server.key -out server.csr # generate CSR (certificate signing request)
cp server.key server.key.org && openssl rsa -in server.key.org -out server.key # remove passphrase from key
openssl x509 -req -days 365 -in server.csr -signkey server.key -out server.crt # generate self-signed certificate
next, move to spot where nginx or apache are looking for it, restart, BOOM
check expiry of 509
openssl x509 -enddate -noout -in foocert.pem
verify pem
openssl verify foocert.pem
verify connection
openssl s_client -connect IP.AD.DR.ES:443 -cert PATH_TO_CERT.pem -tls1_2
generate sha1 key for hmac
echo -n 'value' | openssl dgst -sha1 -hmac 'key'
inspect a x509 cert in PEM format
openssl x509 -inform pem -in mycert.cer -text
check for ssl version support
openssl s_client -connect example.com:443 -ssl3
geminabox
deploy
gem build gemname
gem inabox gem-1.1.1.gem
tcpdump
flags
-A print packet contents in ASCII
-s 0 capture all bytes (up to 64k)
-w FILENAME write to file (useful for later analysis with wshark)
capture traffic to a port, from an IP
tcpdump src 10.0.88.3 and dst port 443
ignore redis loopback iface
tcpdump -i lo not port 6379
capture udp to statsd port on loopback iface
tcpdump -n udp -i lo dst port 8125
dns queries (may be over UDP or TCP)
tcpdump -vvv -s 0 -l -n port 53
sed, awk
get line of file
sed 'Nq;d' file
sum lines
cat ... | awk '{ sum +=$1} END {print sum}'
join lines with comma
cat failures.txt | cut -f2 | awk '/START/{if (NR!=1)print "";next}{printf "%s,", $0;}END{print "";}'
print every other line
awk 'NR % 2 {print}' file.txt
ntp
when time is out of sync
sudo ntpdate time.nist.gov
(may need to stop ntp server first, with systemd/upstart)
or
sudo ntpd -gq
check ntpd status
ntpq -c lpeer
ntpq
as
wget
quiet, to stdout
wget -qO- file.txt
different filename
wget -O myfilename.tar.gz https://fo.com/bar.tar.gz
curl
make JSON GET
curl -H "Accept: application/json" http://localhost:3000/drinks/1
JSON POST
curl -k -X POST -H "Content-Type: application/json" -H "Accept: application/json" -d "{\"somkey\":123}" http://foo.com
ignore SSL error
curl -k
use HTTP headers from file (just cut the GET /path.html part)
curl -H "$(cat headers.txt)" http://host.com/foo
read from cookie file (old netscape tab-delimited format)
curl -b COOKIEJARFILE http://foo.bar
for piping, silence progress
curl -s URL | jq '.'
get just the headers
curl -s -D - https://foo.com/bar -o /dev/null
curl an IP for a specific host
curl -v 143.95.252.125 -H 'Host: www.foo.com'
use a proxy
curl --proxy http://basic:auth@host.com:port ...
gdb
redirect output, etc. with gdb
get PID
look at file descriptors in /proc/PID/fd
gdb -p PID
p close(TARGET_FD) # close file descriptor, with number
p open("/tmp/foo.og", 0600)
q
attach to ruby
gdb /usr/bin/ruby 25215
call rb_backtrace()
# more advanced:
define redirect_stdout
call rb_eval_string("$_old_stdout, $stdout = $stdout, File.open('/tmp/ruby-debug.' + Process.pid.to_s, 'a'); $stdout.sync = true")
end
define ruby_eval
call(rb_p(rb_eval_string_protect($arg0,(int*)0)))
end
break func # set breakpoint at func
info f # info for selected frame
info locals # local variables
info variables # all variables
info args # args for this function
bt # backtrace
select-frame N # select a frame from bt
gdb PROGRAM CORE # to start gdb for program with CORE dump
ulimit -c unlimited # allow core dumps of any size (probably bad idea)
x/3xw $sp # inspect 3 words above sp in hex; sp could also be any address
break *address # set breakpoint at address
disassemble # disassemble current function
disassemble 0x4404fa,0xff04ff # disassemble range
start # skip typing break main; run
C-x a # enter text UI mode
C-l # repain text UI mode
C-x 2 # multiple windows -- get assembly, registers -- can hit multiple times
C-x 1 # back to source code
C-p, C-n # back/forth in UI mode
python print('hello world') # gdb has built-in python interpreter after version 7, or something
python print(gdb.breakpoints())
command 2 # run list of commands when breakpoint 2 is hit
p $pc # print program counter
reverse debugging
reverse-stepi # go back one instruction
watch x # watch changes to memory location x
reverse-continue
lldb
help
help breakpoint set
disassemble function
disas -n function_name
set breakpoint at address
breakpoint set -a 0x100000f25
list breakpoints
br l
orientdb
select user_id, in('Follow').size() from CpUser WHERE user_id in [1,127528]
select user_id, in('Follow').size() from (select expand(in('Follow')) from 11:0)
drop db
disconnect, then: DROP DATABASE remote:localhost/demo user password
docker
connect to running docker
docker exec -it myr /bin/bash
start new docker linked to other
docker run -it --link NAME:IMAGE --rm IMAGE sh -c '/bin/bash'
run docker container daemonized, bind port
docker run --name NAME -p 6379 -d IMAGE
bind container port to host port
docker run --name myr -p 6379:6379 -d redis
restart and attach
docker start `docker ps -q -l` && docker attach `docker ps -q -l`
remove exited containers
docker ps -aq --no-trunc | xargs docker rm
quickstart docker with mount
docker run -it -v ~/Dropbox/luke/code/pwnable/:/pwnable ubuntu sh -c '/bin/bash'
rename your image (if you build image without a name)
docker tag 38cb0d846ab4 lukeasrodgers/docker-dev:latest
clean all exited docker containers
sudo docker ps --filter "status=exited" | awk '{print $1}' | xargs --no-run-if-empty sudo docker rm
docker-compose
remove a volume from a container
docker-compose rm -v CONTAINERALIAS
run and cleanup after run, using ports specified in compose yaml file
docker-compose run --rm --service-ports
docker-machine
reinstall
check for errors
docker-machine ls
docker-machine rm -f default
docker-machine create -d virtualbox default
docker-machine env default
set env
eval $(docker-machine env default)
try ping
ping $(docker-machine ip default)
try ps -a
docker $(docker-machine config default) ps -a
vagrant
speed up ssh
vagrant ssh-config > vagrant_ssh_config
ssh -F vagrant_ssh_config default
linux
install manpages
apt-get install manpages
apt-get install man-db
python
# install shit
curl -kLso /tmp/get-pip.py https://bootstrap.pypa.io/get-pip.py
sudo python /tmp/get-pip.py
sudo pip install virtualenv
mkdir -p ~/.virtualenvs
virtualenv ~/.virtualenvs/somevirtenv
. ~/.virtualenvs/lxmltext/bin/activate
pip install whateva
# simple serve current folder
python -m SimpleHTTPServer 8001
brew
switch to specific version
brew switch mysql 5.5.29
see what packages use what lib (e.g. readline)
brew uses --installed readline
relink readline after homebrew screws it up
ln -s /usr/local/opt/readline/lib/libreadline.dylib /usr/local/opt/readline/lib/libreadline.6.dylib
chef
shasum for file
shasum -a 256 /path/to/file
shasum -a 256 /path/to/file -c-12
aws
ecs
aws ecs list-clusters --profile babydidwhatnow
ec2
# query and filter instances
aws ec2 describe-instances --filters "Name=tag:environment,Values=prod" "Name=tag:service,Values=orientdb" --query "Reservations[*].Instances[*].{IP:PrivateIpAddress,Tags:Tags}"
# get instance IPs
aws ec2 describe-instances --instance-ids="ID" --query="Reservations[*].Instances[*].{IP:PublicIpAddress,PrivIP:PrivateIpAddress}"
elb
# get lb names
aws elb describe-load-balancers --query="LoadBalancerDescriptions[*].{name:LoadBalancerName}"
# instances for a given lb by name
aws elb describe-load-balancers --load-balancer-names="ELBNAME" --query="LoadBalancerDescriptions[*].{Instances:Instances}"
route53
aws route53 list-hosted-zones
# get resource record sets by zone id
aws route53 list-resource-record-sets --hosted-zone-id ZONEID --query "ResourceRecordSets[?Name == 'host.name.com.']"
iam
get iam info for current user, based on keys
aws iam get-user
get iam info for username
aws iam get-user --user-name foo
s3/s3api
get acl for bucket
aws s3api get-bucket-acl --bucket bucketname
add "folder" to bucket
aws s3api put-object --acl private --bucket bucketname --key 'foldername/'
get object from bucket by key, store in FILE
aws s3api get-object --bucket BUCKET --key FOLDER/file.ext FILE.ext
dig
quickly show IPs for a given domain
dig +short a DOMAIN.com
specify a given DNS server
dig a foo.com @ns1.somednsserver.com
specify given DNS server by ip
dig @xxx.xxx.xxx.xxx foo.com
reverse DNS
dig -x IPADRESS
telnet
make HTTP request
GET /index.html HTTP/1.1
host: thehost.com
gcc
strip debug, keep symbols
strip --strip-debug EXECUTABLE
perl
print hex from shell
perl -e 'print("\x41"x20 . "\n");'
neo4j
get HA info via HTTP with basic auth
curl http://u:pw@db.domain.com:7474/db/manage/server/ha
collect info on box
lscpu
uname -a
ldconfig -v
sysctl
read a value
sysctl VALUENAME, e.g. `sysctl vm.swappiness`
system stats
vmstat (RAM/cpu usage, linux); vmstat 5 (show output every 5 s)
vm_stat (osx)
RAM
cat /proc/sys/vm/swappiness (show swappiness, linux, when RAM hits this % use swap)
jq
format json into new file
jq '.' in.json > out.json
extract key Id from hash in doubly-nested array
OUTPUT | jq '.[][].Id'
aq -- tool for querying AWS resources with SQL
select e.public_ip_address, e.tags->'Name' from ec2_instances e
select e.public_ip_address, e.tags->'Name' from ec2_instances e WHERE e.tags->'service' = 'worker'
emacs
switch window
C-x o
delete other windnows
C-x 1
delete current window
C-x 0
split window horiz
C-x 2
split window vert
C-x 3
list buffers
C-x C-b
new buffer
C-x b
write buffer (save)
C-x C-s
open file
C-x C-f
reindent line
C-u TAB
reload file into buffer from disk (requires confirmation)
M-x revert-buffer
cut/kill
C-w
copy/kill-ring-save
M-w
paste/yank
C-y
make selected window wider
C-x }
Move forward over a balanced expression (forward-sexp).
C-M-f
Move backward over a balanced expression (backward-sexp).
C-M-b
Kill balanced expression forward (kill-sexp).
C-M-k
Quit emacs (save buffers, quit)
C-x C-c
eval last sexp (eval-last-sexp)
C-x C-e
org mode
toggle TODO
C-c C-t
cycle through overview
SHIFT-TAB, ...
schedule an item
C-c C-s
show agenda
C-c a a
add current file to agenda list
C-c [
insert new headline
M-RET
move headline up
M-up/down
promote/demote headline
M-right/left
cycle through TODO state
S-left/right
sort org-mode selection
C-c ^
sort org-mode TODOs
M-x org-sort-entries
enable visual line mode
(visual-line-mode t)
list contrib packages
M-x list-packages
mark package with `i` to install
`x` to finish and install marked packages
CIDER
set namespace in REPL to namespace at top of current file
C-c M-n
run last expression
C-x C-e
compile current buffer
C-c C-k
eval form and display result in echo area
C-c C-e
Evaluate the top level form under point and display the result in the echo area
C-c C-c
elixir/mix
start new mix project with supervisor
mix new --sup PROJNAME
qemu
install an OS via ISO for x64
1. create image for virtual disk: qemu-img create -f qcow2 alpine-test 3G (use cow2 format, takes less space than raw but slower)
2. figure out which machine to use for the architecture you want, e.g. qemu-system-arm -machine help
3. tell QEMU to use image for it's hard disk, 512 megs of RAM: qemu-system-x86_64 -hda alpine-test -cdrom ~/Downloads/alpine-3.4.4-x86_64.iso -m 512 -boot d
4. last step may complete then exit (read this online, didn't for me); if so, run it again without -boot d
install an OS for an arm architecture
xcode
check if dev tools installed (>= mavericks)
xcode-select -p (shoudl show path to apple dev folder)
install nokogiri with old ruby
gem install nokogiri -v '1.5.10' -- --with-xml2-include=/Applications/Xcode.app/Contents/Developer/Platforms/MacOSX.platform/Developer/SDKs/MacOSX10.11.sdk/usr/include/libxml2 --use-system-libraries
s3fs
quick unmount and mount (see `man fuse` for more)
fusermount -u s3/
s3fs BUCKET_NAME s3/ -o allow_other -o use_cache=/tmp -o uid=SOME_USER_NUMBER -o gid=SOME_GROUP_NUMBER -o iam_role=IAM_ROLE
osx/macos shitty shit
fix missing camera
sudo killall VDCAssistant && sudo killall AppleCameraAssistant
fix airport wifi stuck
sudo ifconfig en0 down/up
sudo networksetup -setairportpower en0 off/on
imagemagick
get orientation
identify -format '%[EXIF:Orientation]' <image.jpg>
create a blank image
convert -size 200x200 xc:white canvas.png
gnuplot
plot 'file.ext' with {lines,boxes,steps,points,impulses}
plot 'file.ext' using 2:1 with lines # if Y data is first
set pointsize 5 # increase point size
set offset graph 0.1, graph 0.1, graph 0.1, graph 0.1 # add margins
puppet
install module
puppet module install puppetlabs/apache --target-dir /puppet/modules/
inspect puppet resources
puppet resource -h
puppet resource host
puppet resource user root
heroku
view logs
heroku logs -t -r dev # r is shorthand for a heroku remote
add a remote
heroku git:remote -a myappname
nmap
check for ssl versions
nmap --script ssl-enum-ciphers example.com
fingerprint a single service/port
nmap -sV example.com -p 443
sqlite
show tables
.tables
desc
.schema entries
import csv
.mode csv
.import path/to/file.csv table_name_here
timetrap
find sum entries between certain dates
t d --start "April 15 2017" --end "April 30 2017" | grep '^\s*[0-9][^-]*$'
mark times as invoiced
t d -v --start "April 18 2017" --end "April 30 2017" | grep '^[0-9]' | awk '{print $1}' | xargs -I % t e -i % -z 'invoiced'
apache bench ab
ab -t 10 -c 2 http://127.0.0.1:3000/foo # 10 seconds concurrency 2
code climate
docker run --interactive --tty --rm --env CODECLIMATE_CODE="$PWD" --volume "$PWD":/code --volume /var/run/docker.sock:/var/run/docker.sock --volume /tmp/cc:/tmp/cc codeclimate/codeclimate analyze
ssh
generate fingerprint of key
ssh-keygen -lf /etc/ssh/ssh_host_ecdsa_key
sftp
with pem file
make sure pem is chmod 400
sftp -o IdentityFile=filename.pem SOME_HOST
yarn
yarn add pkg-name # this will save and update yarn lockfile
network timeout? default may be too small
yarn add foo --network-timeout 1000000
imap
test connection
telnet HOSTNAME 143
login (will be plaintext, may not work, probably you shouldnt do it)
01 login nam@domaine.com password
macvim
brew upgrade macvim --with-lua # need lua for neocomplete
dkim with amavis
sudo amavisd-new genrsa /var/db/dkim/brooklynparenttherapy.com.key.pem
sudo chown amavis:amavis brooklynparenttherapy.com.key.pem
# edit amavis file, may be named differently
sudo vim 21-ubuntu_defaults
# enter line like dkim_key('brooklynparenttherapy.com', 'main', '/var/db/dkim/brooklynparenttherapy-com.key.pem');
sudo amavisd-new showkeys
# copy the value you see onto your dns, as a TXT record, with host as main._domainkey
# value will look something like `v=DKIM1; p=MIGfMgQC02y/7cX8Ul2iPhwufzpPrEeNsb+Gdkk4FXVbAQAB` but longer
sudo service amavis restart
sudo amavisd-new testkeys # will take a while for new DNS to be reflected
macos
why did computer shutdown?
log show --style syslog --predicate 'eventMessage contains "shutdown cause"'
setting TZ
can do this to help prevent excessive stat
tzselect # uses a text-based wizard
modoboa bullshit
- is amavis running? sudo service amavis status
- is uwsgi running? service uwsgi restart
- sudo service postfix start
- sudo service uwsgi start
systemd
- list services: sudo service --status-all
r lang
quick histogram
data <- c(1,2,3,4) # create vector
hist(data, main="Char title", xlab="x axis label")
r stuff that seems like it should be auto included
purrr
install.packages("purrr")
library(purrr)
map_dbl(1:10, ~ . + 1)
generate random sample from normal distribution
a <- rnorm(100, 30, 5) # last param is stdev
summary stats
summary(a)
bigquery cli
export schema
bq show --format=json tilda_postgres.cards_authorizations | jq '.schema.fields' > cards_authorizations.json
create a new table, given JSON schema
bq mk --table --description "cards_authorizations" tilda_postgres.cards_authorizations cards_authorizations.json
Sign up for free to join this conversation on GitHub. Already have an account? Sign in to comment