Skip to content

Instantly share code, notes, and snippets.

Show Gist options
  • Star 1 You must be signed in to star a gist
  • Fork 0 You must be signed in to fork a gist
  • Save gplv2/3625736273e40095eeabf470117881a5 to your computer and use it in GitHub Desktop.
Save gplv2/3625736273e40095eeabf470117881a5 to your computer and use it in GitHub Desktop.
Useful PostgreSQL Queries and Commands
-- performance tools
-- https://www.vividcortex.com/resources/network-analyzer-for-postgresql
-- show running queries (pre 9.2)
SELECT procpid, age(query_start, clock_timestamp()), usename, current_query
FROM pg_stat_activity
WHERE current_query != '<IDLE>' AND current_query NOT ILIKE '%pg_stat_activity%'
ORDER BY query_start desc;
-- show running queries (post 9.2)
SELECT pid, age(query_start, clock_timestamp()), usename, query
FROM pg_stat_activity
WHERE query != '<IDLE>' AND query NOT ILIKE '%pg_stat_activity%'
ORDER BY query_start desc;
-- Show queries taking more than 5 minutes
SELECT
pid,
now() — pg_stat_activity.query_start AS duration,
query,
state
FROM pg_stat_activity
WHERE now() — pg_stat_activity.query_start > interval '5 minutes';
-- lock information (9.3 +)
SELECT a.datname,
c.relname,
l.transactionid,
l.mode,
l.GRANTED,
a.usename,
a.query,
a.query_start,
age(now(), a.query_start) AS "age",
a.pid
FROM pg_stat_activity a
JOIN pg_locks l ON l.pid = a.pid
JOIN pg_class c ON c.oid = l.relation
ORDER BY a.query_start;
-- lock
SELECT bl.pid AS blocked_pid,
a.usename AS blocked_user,
kl.pid AS blocking_pid,
ka.usename AS blocking_user,
a.query AS blocked_statement
FROM pg_catalog.pg_locks bl
JOIN pg_catalog.pg_stat_activity a ON a.pid = bl.pid
JOIN pg_catalog.pg_locks kl ON kl.transactionid = bl.transactionid AND kl.pid != bl.pid
JOIN pg_catalog.pg_stat_activity ka ON ka.pid = kl.pid
WHERE NOT bl.granted;
-- more lock detection
CREATE VIEW lock_monitor AS(
SELECT
COALESCE(blockingl.relation::regclass::text,blockingl.locktype) as locked_item,
now() - blockeda.query_start AS waiting_duration, blockeda.pid AS blocked_pid,
blockeda.query as blocked_query, blockedl.mode as blocked_mode,
blockinga.pid AS blocking_pid, blockinga.query as blocking_query,
blockingl.mode as blocking_mode
FROM pg_catalog.pg_locks blockedl
JOIN pg_stat_activity blockeda ON blockedl.pid = blockeda.pid
JOIN pg_catalog.pg_locks blockingl ON(
( (blockingl.transactionid=blockedl.transactionid) OR
(blockingl.relation=blockedl.relation AND blockingl.locktype=blockedl.locktype)
) AND blockedl.pid != blockingl.pid)
JOIN pg_stat_activity blockinga ON blockingl.pid = blockinga.pid
AND blockinga.datid = blockeda.datid
WHERE NOT blockedl.granted
AND blockinga.datname = current_database()
);
-- virtual transaction lock id search
SELECT locktype, relation::regclass, mode, transactionid AS tid,
virtualtransaction AS vtid, pid, granted
FROM pg_catalog.pg_locks l LEFT JOIN pg_catalog.pg_database db
ON db.oid = l.database WHERE (db.datname = 'sandbox' OR db.datname IS NULL)
AND NOT pid = pg_backend_pid();
-- Note that each transaction also hold an `ExclusiveLock on its own virtualxid that is their virtual transaction ID.
-- missing indexes
SELECT schemaname, relname, seq_scan-idx_scan AS too_much_seq, case when seq_scan-idx_scan>0 THEN 'Missing Index?' ELSE 'OK' END, pg_relation_size(format('%I.%I', schemaname, relname)::regclass) AS rel_size, seq_scan, idx_scan
FROM pg_stat_user_tables
WHERE pg_relation_size(format('%I.%I', schemaname, relname)::regclass)>80000 ORDER BY too_much_seq DESC;
-- Unused indexes
SELECT
PSUI.indexrelid::regclass AS IndexName
,PSUI.relid::regclass AS TableName
FROM pg_stat_user_indexes AS PSUI
JOIN pg_index AS PI
ON PSUI.IndexRelid = PI.IndexRelid
WHERE PSUI.idx_scan = 0
AND PI.indisunique IS FALSE;
-- duplicate indexes
SELECT
indrelid::regclass AS TableName
,array_agg(indexrelid::regclass) AS Indexes
FROM pg_index
GROUP BY
indrelid
,indkey
HAVING COUNT(*) > 1;
-- The following query may be helpful to see what processes are blocking SQL statements (these only find row-level locks, not object-level locks).
SELECT blocked_locks.pid AS blocked_pid,
blocked_activity.usename AS blocked_user,
blocking_locks.pid AS blocking_pid,
blocking_activity.usename AS blocking_user,
blocked_activity.query AS blocked_statement,
blocking_activity.query AS current_statement_in_blocking_process
FROM pg_catalog.pg_locks blocked_locks
JOIN pg_catalog.pg_stat_activity blocked_activity ON blocked_activity.pid = blocked_locks.pid
JOIN pg_catalog.pg_locks blocking_locks
ON blocking_locks.locktype = blocked_locks.locktype
AND blocking_locks.DATABASE IS NOT DISTINCT FROM blocked_locks.DATABASE
AND blocking_locks.relation IS NOT DISTINCT FROM blocked_locks.relation
AND blocking_locks.page IS NOT DISTINCT FROM blocked_locks.page
AND blocking_locks.tuple IS NOT DISTINCT FROM blocked_locks.tuple
AND blocking_locks.virtualxid IS NOT DISTINCT FROM blocked_locks.virtualxid
AND blocking_locks.transactionid IS NOT DISTINCT FROM blocked_locks.transactionid
AND blocking_locks.classid IS NOT DISTINCT FROM blocked_locks.classid
AND blocking_locks.objid IS NOT DISTINCT FROM blocked_locks.objid
AND blocking_locks.objsubid IS NOT DISTINCT FROM blocked_locks.objsubid
AND blocking_locks.pid != blocked_locks.pid
JOIN pg_catalog.pg_stat_activity blocking_activity ON blocking_activity.pid = blocking_locks.pid
WHERE NOT blocked_locks.GRANTED;
-- INSERT/UPDATE/DELETE statistics for tables:
SELECT relname,cast(n_tup_ins AS numeric) / (n_tup_ins + n_tup_upd + n_tup_del) AS ins_pct,cast(n_tup_upd AS numeric) / (n_tup_ins + n_tup_upd + n_tup_del) AS upd_pct, cast(n_tup_del AS numeric) / (n_tup_ins
+ n_tup_upd + n_tup_del) AS del_pct
FROM pg_stat_user_tables
WHERE (n_tup_ins + n_tup_upd + n_tup_del) > 0
ORDER BY relname;
-- Find commmonly accessed tables and their use of indexes:
SELECT relname,seq_tup_read,idx_tup_fetch,cast(idx_tup_fetch AS numeric) / (idx_tup_fetch + seq_tup_read) AS idx_tup_pct
FROM pg_stat_user_tables WHERE (idx_tup_fetch + seq_tup_read)>0
ORDER BY idx_tup_pct;
-- Analysis: For each row, because "idx_tup_pct" is low than it means that essentially no indexes are being used.
-- In the case of "facebook_oauths" it turns out we are commonly running a query like "SELECT * FROM facebook_oauths WHERE fb_user_id = X" and it turns out there isnt an index on "fb_user_id"
-- Table I/O
SELECT relname,cast(heap_blks_hit as numeric) / (heap_blks_hit + heap_blks_read) AS hit_pct,
heap_blks_hit,heap_blks_read
FROM pg_statio_user_tables WHERE (heap_blks_hit + heap_blks_read)>0 ORDER BY hit_pct;
-- 'heap_blks_hit' = the number of blocks that were satisfied from the page cache
-- 'heap_blks_read' = the number of blocks that had to hit disk/IO layer for reads
-- When 'heap_blks_hit' is significantly greater than 'heap_blks_read' than it means we have a well-cached DB and most of the queries can be satisfied from the cache
-- Table & Index sizes
SELECT
t.tablename,
indexname,
c.reltuples::integer AS num_rows,
pg_size_pretty(pg_relation_size(quote_ident(t.tablename)::text)) AS table_size,
pg_size_pretty(pg_relation_size(quote_ident(indexrelname)::text)) AS index_size,
CASE WHEN x.is_unique = 1 THEN 'Y'
ELSE 'N'
END AS UNIQUE,
idx_scan AS number_of_scans,
idx_tup_read AS tuples_read,
idx_tup_fetch AS tuples_fetched
FROM pg_tables t
LEFT OUTER JOIN pg_class c ON t.tablename=c.relname
LEFT OUTER JOIN
(SELECT indrelid,
max(CAST(indisunique AS integer)) AS is_unique
FROM pg_index
GROUP BY indrelid) x
ON c.oid = x.indrelid
LEFT OUTER JOIN
( SELECT c.relname AS ctablename, ipg.relname AS indexname, x.indnatts AS number_of_columns, idx_scan, idx_tup_read, idx_tup_fetch,indexrelname FROM pg_index x
JOIN pg_class c ON c.oid = x.indrelid
JOIN pg_class ipg ON ipg.oid = x.indexrelid
JOIN pg_stat_all_indexes psai ON x.indexrelid = psai.indexrelid )
AS foo
ON t.tablename = foo.ctablename
WHERE t.schemaname='public'
ORDER BY pg_relation_size(quote_ident(indexrelname)::text) desc;
-- Index Health
SELECT indexrelname,cast(idx_tup_read AS numeric) / idx_scan AS avg_tuples,idx_scan,idx_tup_read FROM pg_stat_user_indexes WHERE idx_scan > 0;
-- Index Size
SELECT
schemaname,
relname,
indexrelname,
idx_scan,
pg_size_pretty(pg_relation_size(i.indexrelid)) AS index_size
FROM
pg_stat_user_indexes i
JOIN pg_index USING (indexrelid)
WHERE
indisunique IS false
ORDER BY idx_scan,relname;
-- Index I/O - Same idea as Table I/O above
SELECT indexrelname,cast(idx_blks_hit as numeric) / (idx_blks_hit + idx_blks_read) AS hit_pct,
idx_blks_hit,idx_blks_read FROM pg_statio_user_indexes WHERE
(idx_blks_hit + idx_blks_read)>0 ORDER BY hit_pct;
-- Show sizes & usage of indexes that are not used very often:
-- NOTE: we define 'usage' by # of times used, in this case we use '200' - change accordingly
SELECT idstat.relname AS table_name, indexrelname AS index_name, idstat.idx_scan AS times_used,
pg_size_pretty(pg_relation_size(tabstat.relid)) AS table_size, pg_size_pretty(pg_relation_size(indexrelid)) AS index_size,
n_tup_upd + n_tup_ins + n_tup_del as num_writes, indexdef AS definition
FROM pg_stat_user_indexes AS idstat JOIN pg_indexes ON indexrelname = indexname
JOIN pg_stat_user_tables AS tabstat ON idstat.relname = tabstat.relname
WHERE idstat.idx_scan < 200 AND indexdef !~* 'unique'
ORDER BY idstat.relname, indexrelname;
-- kill running query (this will be a nice kill to the process)
SELECT pg_cancel_backend(procpid);
-- kill idle query (this is a nasty kill, possibly the database will restart, emergency use only)
SELECT pg_terminate_backend(procpid);
-- vacuum command
VACUUM (VERBOSE, ANALYZE);
-- all database users
select * from pg_stat_activity where current_query not like '<%';
-- all tables and their size, with indexes
select datname, pg_size_pretty(pg_database_size(datname))
from pg_database
order by pg_database_size(datname) desc;
-- cache hit rates (should not be less than 0.99)
SELECT sum(heap_blks_read) as heap_read, sum(heap_blks_hit) as heap_hit, (sum(heap_blks_hit) - sum(heap_blks_read)) / sum(heap_blks_hit) as ratio
FROM pg_statio_user_tables;
-- table index usage rates (should not be less than 0.99)
SELECT relname, 100 * idx_scan / (seq_scan + idx_scan) percent_of_times_index_used, n_live_tup rows_in_table
FROM pg_stat_user_tables
ORDER BY n_live_tup DESC;
-- how many indexes are in cache
SELECT sum(idx_blks_read) as idx_read, sum(idx_blks_hit) as idx_hit, (sum(idx_blks_hit) - sum(idx_blks_read)) / sum(idx_blks_hit) as ratio
FROM pg_statio_user_indexes;
-- Dump database on remote host to file
$ pg_dump -U username -h hostname databasename > dump.sql
-- Import dump into existing database
$ psql -d newdb -f dump.sql
-- Multiple instances, find the configuration file of the connected instance
SELECT * FROM pg_settings WHERE category LIKE 'File Locat%';
-- Find the connected slaves
SELECT * FROM pg_stat_replication;
-- On Linux - Find the open pg_log files for this month (needs updatedb/locate package)
$ fuser $(locate pg_log | grep $(date +'%Y-%m'))
-- Firewall rule to log incomming connections to PSQL port
-- enable
$ iptables -I INPUT -p tcp -m tcp --dport 5432 -m state --state NEW -j LOG --log-level 1 --log-prefix "New Connection"
-- delete
$ iptables -D INPUT -p tcp -m tcp --dport 5432 -m state --state NEW -j LOG --log-level 1 --log-prefix "New Connection"
-- Schema stuff
-- equivalent to mysql 'show create' , dump scheme, grep for INDEXES
pg_dump -st planet_osm_polygon grb_temp | grep -i INDEX | grep -v '\-\-'
-- Finding indexes belonging to table using metadata
SELECT
t.relname AS table_name,
i.relname AS index_name,
a.attname AS column_name
FROM
pg_class t,
pg_class i,
pg_index ix,
pg_attribute a
WHERE
t.oid = ix.indrelid
AND i.oid = ix.indexrelid
AND a.attrelid = t.oid
AND a.attnum = ANY(ix.indkey)
AND t.relkind = 'r'
AND t.relname LIKE '<tablename>'
-- src : https://stackoverflow.com/questions/2204058/list-columns-with-indexes-in-postgresql
-- Change owner globaly
-- This is a global equivalent of ALTER DATABASE command provided in Frank's answer, but instead
-- of updating a particular DB, it change ownership of all DBs owned by 'old_name'.
REASSIGN OWNED BY old_name TO new_name;
-- within a single DB
ALTER DATABASE old_owner OWNER TO new_owner;
-- Sniffing pgsql queries
$ tshark -i lo -p -f "(tcp[13] != 0x10) and dst port 5432" -t ad -w /tmp/output
Sign up for free to join this conversation on GitHub. Already have an account? Sign in to comment