Skip to content

Instantly share code, notes, and snippets.

@sdgluck
Forked from rgreenjr/postgres_queries_and_commands.sql
Last active June 28, 2024 14:13
Show Gist options
  • Save sdgluck/9f826382d4d5160c50817506bc928200 to your computer and use it in GitHub Desktop.
Save sdgluck/9f826382d4d5160c50817506bc928200 to your computer and use it in GitHub Desktop.
postgres queries
-- show running queries
SELECT pid, age(clock_timestamp(), query_start), usename, query FROM pg_stat_activity WHERE query != '<IDLE>' AND query NOT ILIKE '%pg_stat_activity%' ORDER BY age(clock_timestamp(), query_start) desc;
-- kill running query
SELECT pg_cancel_backend(procpid);
-- kill idle query
SELECT pg_terminate_backend(procpid);
-- vacuum command
VACUUM (VERBOSE, ANALYZE);
-- all database users
select * from pg_stat_activity where current_query not like '<%';
-- all databases and their sizes
select * from pg_user;
-- all tables and their size, with/without indexes
select datname, pg_size_pretty(pg_database_size(datname)) from pg_database order by pg_database_size(datname) desc;
-- cache hit rates (should not be less than 0.99)
SELECT sum(heap_blks_read) as heap_read, sum(heap_blks_hit) as heap_hit,(sum(heap_blks_hit) - sum(heap_blks_read)) / sum(heap_blks_hit) as ratio FROM pg_statio_user_tables;
-- table index usage rates (should not be less than 0.99)
SELECT relname, 100 * idx_scan /(seq_scan + idx_scan) percent_of_times_index_used, n_live_tup rows_in_table FROM pg_stat_user_tables ORDER BY n_live_tup DESC;
-- how many indexes are in cache
SELECT sum(idx_blks_read) as idx_read, sum(idx_blks_hit) as idx_hit,(sum(idx_blks_hit) - sum(idx_blks_read)) / sum(idx_blks_hit) as ratio FROM pg_statio_user_indexes;
-- find missing indexes
SELECT relname AS TableName, to_char(seq_scan, '999,999,999,999') AS TotalSeqScan, to_char(idx_scan, '999,999,999,999') AS TotalIndexScan, to_char(n_live_tup, '999,999,999,999') AS TableRows, pg_size_pretty(pg_relation_size(relname :: regclass)) AS TableSize FROM pg_stat_all_tables WHERE schemaname = 'public' AND 50 * seq_scan > idx_scan -- more than 2% AND n_live_tup > 10000 AND pg_relation_size(relname :: regclass) > 5000000 ORDER BY relname ASC;
-- find invalid indexes
SELECT * FROM pg_class, pg_index WHERE pg_index.indisvalid = false AND pg_index.indexrelid = pg_class.oid;
-- find unused indexes (https://dba.stackexchange.com/a/74892/239303)
WITH table_scans as( SELECT relid, tables.idx_scan + tables.seq_scan as all_scans, ( tables.n_tup_ins + tables.n_tup_upd + tables.n_tup_del) as writes, pg_relation_size(relid) as table_size FROM pg_stat_user_tables as tables ), all_writes as ( SELECT sum(writes) as total_writes FROM table_scans ), indexes as ( SELECT idx_stat.relid, idx_stat.indexrelid, idx_stat.schemaname, idx_stat.relname as tablename, idx_stat.indexrelname as indexname, idx_stat.idx_scan, pg_relation_size(idx_stat.indexrelid) as index_bytes, indexdef ~* 'USING btree' AS idx_is_btree FROM pg_stat_user_indexes as idx_stat JOIN pg_index USING (indexrelid) JOIN pg_indexes as indexes ON idx_stat.schemaname = indexes.schemaname AND idx_stat.relname = indexes.tablename AND idx_stat.indexrelname = indexes.indexname WHERE pg_index.indisunique = FALSE ), index_ratios AS ( SELECT schemaname, tablename, indexname, idx_scan, all_scans, round(( CASE WHEN all_scans = 0 THEN 0.0::NUMERIC ELSE idx_scan::NUMERIC/all_scans * 100 END),2) as index_scan_pct, writes, round((CASE WHEN writes = 0 THEN idx_scan::NUMERIC ELSE idx_scan::NUMERIC/writes END),2) as scans_per_write, pg_size_pretty(index_bytes) as index_size, pg_size_pretty(table_size) as table_size, idx_is_btree, index_bytes FROM indexes JOIN table_scans USING (relid) ), index_groups AS ( SELECT 'Never Used Indexes' as reason, *, 1 as grp FROM index_ratios WHERE idx_scan = 0 and idx_is_btree UNION ALL SELECT 'Low Scans, High Writes' as reason, *, 2 as grp FROM index_ratios WHERE scans_per_write <= 1 and index_scan_pct < 10 and idx_scan > 0 and writes > 100 and idx_is_btree UNION ALL SELECT 'Seldom Used Large Indexes' as reason, *, 3 as grp FROM index_ratios WHERE index_scan_pct < 5 and scans_per_write > 1 and idx_scan > 0 and idx_is_btree and index_bytes > 100000000 UNION ALL SELECT 'High-Write Large Non-Btree' as reason, index_ratios.*, 4 as grp FROM index_ratios, all_writes WHERE ( writes::NUMERIC / ( total_writes + 1 ) ) > 0.02 AND NOT idx_is_btree AND index_bytes > 100000000 ORDER BY grp, index_bytes DESC ) SELECT reason, schemaname, tablename, indexname, index_scan_pct, scans_per_write, index_size, table_size FROM index_groups;
-- get progress of indexing (https://dev.to/bolajiwahab/progress-reporting-in-postgresql-1i0d)
SELECT clock_timestamp() - a.xact_start AS duration_so_far, coalesce(a.wait_event_type ||'.'|| a.wait_event, 'false') AS waiting, a.state, p.phase, CASE p.phase WHEN 'initializing' THEN '1 of 12' WHEN 'waiting for writers before build' THEN '2 of 12' WHEN 'building index: scanning table' THEN '3 of 12' WHEN 'building index: sorting live tuples' THEN '4 of 12' WHEN 'building index: loading tuples in tree' THEN '5 of 12' WHEN 'waiting for writers before validation' THEN '6 of 12' WHEN 'index validation: scanning index' THEN '7 of 12' WHEN 'index validation: sorting tuples' THEN '8 of 12' WHEN 'index validation: scanning table' THEN '9 of 12' WHEN 'waiting for old snapshots' THEN '10 of 12' WHEN 'waiting for readers before marking dead' THEN '11 of 12' WHEN 'waiting for readers before dropping' THEN '12 of 12' END AS phase_progress, format( '%s(%s of %s)', coalesce(round(100.0 * p.blocks_done / nullif(p.blocks_total, 0), 2)::text || '%', 'not applicable'), p.blocks_done::text, p.blocks_total::text) AS scan_progress, format( '%s (%s of %s)', coalesce(round(100.0 * p.tuples_done / nullif(p.tuples_total, 0), 2)::text || '%', 'not applicable'), p.tuples_done::text, p.tuples_total::text ) AS tuples_loading_progress, format( '%s (%s of %s)', coalesce((100 * p.lockers_done / nullif(p.lockers_total, 0))::text || '%', 'not applicable'), p.lockers_done::text, p.lockers_total::text ) AS lockers_progress, format( '%s (%s of %s)', coalesce((100 * p.partitions_done / nullif(p.partitions_total, 0))::text || '%', 'not applicable'), p.partitions_done::text, p.partitions_total::text ) AS partitions_progress, p.current_locker_pid, trim(trailing ';' from l.query) AS current_locker_query FROM pg_stat_progress_create_index AS p JOIN pg_stat_activity AS a ON a.pid = p.pid LEFT JOIN pg_stat_activity AS l ON l.pid = p.current_locker_pid ORDER BY clock_timestamp() - a.xact_start DESC;
-- delete records in batches
DO $$ DECLARE tbl VARCHAR := '<tbl>'; batch_size INT := 10000; rows_deleted INT := 0; total_rows_deleted INT; BEGIN LOOP WITH del AS( DELETE FROM tbl WHERE id IN (SELECT id FROM tbl LIMIT batch_size) RETURNING id) SELECT COUNT(*) INTO rows_deleted FROM del; total_rows_deleted := total_rows_deleted + rows_deleted; raise notice 'Deleted: % (Total: %)', rows_deleted, total_rows_deleted; EXIT WHEN rows_deleted = 0; END LOOP; END $$;
Sign up for free to join this conversation on GitHub. Already have an account? Sign in to comment