Skip to content

Instantly share code, notes, and snippets.

Show Gist options
  • Save tiagomota/be93b9ff7fb4904700e37acf5c68c6f5 to your computer and use it in GitHub Desktop.
Save tiagomota/be93b9ff7fb4904700e37acf5c68c6f5 to your computer and use it in GitHub Desktop.
Useful PostgreSQL Queries and Commands
-- enter in postgres user
sudo su postgres
psql
-- clear machine/postgres cache to force fresh disk reads on queries
1. Shutdown the database server --(pg_ctl, sudo service postgresql stop, etc.)
2. echo 3 > /proc/sys/vm/drop_caches --This will clear out the OS file/block caches - very important though I don't know how to do that on other OSs.
3. Start the database server
-- *****************************
-- MONITORING
-- *****************************
-- show running queries
SELECT pid, application_name, backend_start, query_start, state, query
FROM pg_stat_activity
WHERE state IN ('active', 'idle in transaction')
ORDER BY query_start ASC;
-- kill running query
SELECT pg_cancel_backend(procpid);
-- kill idle query
SELECT pg_terminate_backend(procpid);
-- show mean_time and calls of db queries
SELECT query, mean_time, calls
FROM pg_stat_statements
JOIN pg_authid ON (pg_authid.oid = userid)
WHERE pg_authid.rolname = 'movercado' AND calls > 100
ORDER BY mean_time DESC;
-- show size and usage of the db indexes
SELECT
t.tablename,
indexname,
c.reltuples AS num_rows,
pg_size_pretty(pg_relation_size(quote_ident(t.tablename)::text)) AS table_size,
pg_size_pretty(pg_relation_size(quote_ident(indexrelname)::text)) AS index_size,
CASE
WHEN indisunique THEN 'Y'
ELSE 'N'
END AS UNIQUE,
idx_scan AS number_of_scans,
idx_tup_read AS tuples_read,
idx_tup_fetch AS tuples_fetched
FROM pg_tables t
LEFT OUTER JOIN pg_class c ON t.tablename=c.relname
LEFT OUTER JOIN (
SELECT c.relname AS ctablename, ipg.relname AS indexname, x.indnatts AS number_of_columns, idx_scan, idx_tup_read, idx_tup_fetch, indexrelname, indisunique FROM pg_index x
JOIN pg_class c ON c.oid = x.indrelid
JOIN pg_class ipg ON ipg.oid = x.indexrelid
JOIN pg_stat_all_indexes psai ON x.indexrelid = psai.indexrelid AND psai.schemaname = 'public'
) AS foo ON t.tablename = foo.ctablename
WHERE t.schemaname='public'
ORDER BY pg_relation_size(quote_ident(indexrelname)::text) DESC NULLS LAST, 7 DESC;
-- show duplicated indexes (same set of columns, same opclass, expression and predicate). Usually dropping one is ok.
SELECT
pg_size_pretty(SUM(pg_relation_size(idx))::BIGINT) AS SIZE,
(array_agg(idx))[1] AS idx1, (array_agg(idx))[2] AS idx2,
(array_agg(idx))[3] AS idx3, (array_agg(idx))[4] AS idx4
FROM (
SELECT
indexrelid::regclass AS idx,
(
indrelid::text ||E'\n'|| indclass::text ||E'\n'|| indkey::text ||E'\n'||
COALESCE(indexprs::text,'')||E'\n' || COALESCE(indpred::text,'')
) AS KEY FROM pg_index
) sub
GROUP BY KEY HAVING COUNT(*) > 1
ORDER BY SUM(pg_relation_size(idx)) DESC;
-- *****************************
-- OTHERS
-- *****************************
-- vacuum command
VACUUM (VERBOSE, ANALYZE);
-- all database users
select * from pg_stat_activity where current_query not like '<%';
-- all databases and their sizes
select * from pg_user;
-- all tables and their size, with/without indexes
select datname, pg_size_pretty(pg_database_size(datname))
from pg_database
order by pg_database_size(datname) desc;
-- cache hit rates (should not be less than 0.99)
SELECT sum(heap_blks_read) as heap_read, sum(heap_blks_hit) as heap_hit, (sum(heap_blks_hit) - sum(heap_blks_read)) / sum(heap_blks_hit) as ratio
FROM pg_statio_user_tables;
-- table index usage rates (should not be less than 0.99)
SELECT relname, 100 * idx_scan / (seq_scan + idx_scan) percent_of_times_index_used, n_live_tup rows_in_table
FROM pg_stat_user_tables
ORDER BY n_live_tup DESC;
-- how many indexes are in cache
SELECT sum(idx_blks_read) as idx_read, sum(idx_blks_hit) as idx_hit, (sum(idx_blks_hit) - sum(idx_blks_read)) / sum(idx_blks_hit) as ratio
FROM pg_statio_user_indexes;
-- Dump database on remote host to file
$ pg_dump -U username -h hostname databasename > dump.sql
-- Import dump into existing database
$ psql -d newdb -f dump.sql
Sign up for free to join this conversation on GitHub. Already have an account? Sign in to comment