Skip to content

Instantly share code, notes, and snippets.

Show Gist options
  • Star 10 You must be signed in to star a gist
  • Fork 6 You must be signed in to fork a gist
  • Save zafergurel/7e203b80b18b0791a45ce9d80e1a8b89 to your computer and use it in GitHub Desktop.
Save zafergurel/7e203b80b18b0791a45ce9d80e1a8b89 to your computer and use it in GitHub Desktop.
Useful PostgreSQL Queries and Commands
-- https://www.geekytidbits.com/performance-tuning-postgres/
-- http://www.craigkerstiens.com/2012/10/01/understanding-postgres-performance/
-- http://okigiveup.net/what-postgresql-tells-you-about-its-performance/
-- https://wiki.postgresql.org/wiki/Introduction_to_VACUUM,_ANALYZE,_EXPLAIN,_and_COUNT
-- https://devcenter.heroku.com/articles/postgresql-indexes#b-trees-and-sorting
-- http://www.databasesoup.com/2014/05/new-finding-unused-indexes-query.html
-- performance tools
-- https://www.vividcortex.com/resources/network-analyzer-for-postgresql
-- show running queries (pre 9.2)
SELECT procpid, age(clock_timestamp(), query_start), usename, current_query
FROM pg_stat_activity
WHERE current_query != '<IDLE>' AND current_query NOT ILIKE '%pg_stat_activity%'
ORDER BY query_start desc;
-- show running queries (>= 9.2, < 9.6)
SELECT pid,
age(clock_timestamp(), query_start) AS age,
usename, state, query, waiting,
age(clock_timestamp(), xact_start) AS xact_age
FROM pg_stat_activity
WHERE state != 'idle'
AND query NOT ILIKE '%pg_stat_activity%'
ORDER BY age DESC;
-- show running queries (>= 9.6)
SELECT pid,
age(clock_timestamp(), query_start) AS age,
usename, state, query, wait_event_type, wait_event,
age(clock_timestamp(), xact_start) AS xact_age
FROM pg_stat_activity
WHERE state != 'idle'
AND query NOT ILIKE '%pg_stat_activity%'
ORDER BY age DESC;
-- Total number of connections
SELECT count(*) FROM pg_stat_activity;
-- Number of connections by state
SELECT state, count(*) FROM pg_stat_activity GROUP BY state;
-- Connections waiting for a lock
SELECT count(distinct pid) FROM pg_locks WHERE granted = false;
-- Maximum transaction age
SELECT max(now() - xact_start) FROM pg_stat_activity
WHERE state IN ('idle in transaction', 'active');
-- kill running query (this will be a nice kill to the process)
SELECT pg_cancel_backend(procpid);
-- kill idle query (this is a nasty kill, possibly the database will restart, emergency use only)
SELECT pg_terminate_backend(procpid);
-- kill any active sessions
SELECT pg_terminate_backend(pg_stat_activity.pid)
FROM pg_stat_activity
WHERE pg_stat_activity.datname = 'TARGET_DB'
AND pid <> pg_backend_pid()
-- show locks
SELECT t.relname, l.locktype, page, virtualtransaction, pid, mode, granted
FROM pg_locks l, pg_stat_all_tables t
WHERE l.relation=t.relid
ORDER BY relation ASC;
-- lock
SELECT bl.pid AS blocked_pid,
a.usename AS blocked_user,
kl.pid AS blocking_pid,
ka.usename AS blocking_user,
a.query AS blocked_statement
FROM pg_catalog.pg_locks bl
JOIN pg_catalog.pg_stat_activity a ON a.pid = bl.pid
JOIN pg_catalog.pg_locks kl ON kl.transactionid = bl.transactionid AND kl.pid != bl.pid
JOIN pg_catalog.pg_stat_activity ka ON ka.pid = kl.pid
WHERE NOT bl.granted;
-- more lock detection
CREATE VIEW lock_monitor AS(
SELECT
COALESCE(blockingl.relation::regclass::text,blockingl.locktype) as locked_item,
now() - blockeda.query_start AS waiting_duration, blockeda.pid AS blocked_pid,
blockeda.query as blocked_query, blockedl.mode as blocked_mode,
blockinga.pid AS blocking_pid, blockinga.query as blocking_query,
blockingl.mode as blocking_mode
FROM pg_catalog.pg_locks blockedl
JOIN pg_stat_activity blockeda ON blockedl.pid = blockeda.pid
JOIN pg_catalog.pg_locks blockingl ON(
( (blockingl.transactionid=blockedl.transactionid) OR
(blockingl.relation=blockedl.relation AND blockingl.locktype=blockedl.locktype)
) AND blockedl.pid != blockingl.pid)
JOIN pg_stat_activity blockinga ON blockingl.pid = blockinga.pid
AND blockinga.datid = blockeda.datid
WHERE NOT blockedl.granted
AND blockinga.datname = current_database()
);
-- virtual transaction lock id search
SELECT locktype, relation::regclass, mode, transactionid AS tid,
virtualtransaction AS vtid, pid, granted
FROM pg_catalog.pg_locks l LEFT JOIN pg_catalog.pg_database db
ON db.oid = l.database WHERE (db.datname = 'sandbox' OR db.datname IS NULL)
AND NOT pid = pg_backend_pid();
-- lock information (9.3 +)
SELECT a.datname,
c.relname,
l.transactionid,
l.mode,
l.GRANTED,
a.usename,
a.query,
a.query_start,
age(now(), a.query_start) AS "age",
a.pid
FROM pg_stat_activity a
JOIN pg_locks l ON l.pid = a.pid
JOIN pg_class c ON c.oid = l.relation
ORDER BY a.query_start;
-- stats and blocking queries
SELECT blocked_locks.pid AS blocked_pid,
blocked_activity.usename AS blocked_user,
blocking_locks.pid AS blocking_pid,
blocking_activity.usename AS blocking_user,
blocked_activity.query AS blocked_statement,
blocking_activity.query AS blocking_statement
FROM pg_catalog.pg_locks blocked_locks
JOIN pg_catalog.pg_stat_activity blocked_activity ON blocked_activity.pid = blocked_locks.pid
JOIN pg_catalog.pg_locks blocking_locks
ON blocking_locks.locktype = blocked_locks.locktype
AND blocking_locks.DATABASE IS NOT DISTINCT FROM blocked_locks.DATABASE
AND blocking_locks.relation IS NOT DISTINCT FROM blocked_locks.relation
AND blocking_locks.page IS NOT DISTINCT FROM blocked_locks.page
AND blocking_locks.tuple IS NOT DISTINCT FROM blocked_locks.tuple
AND blocking_locks.virtualxid IS NOT DISTINCT FROM blocked_locks.virtualxid
AND blocking_locks.transactionid IS NOT DISTINCT FROM blocked_locks.transactionid
AND blocking_locks.classid IS NOT DISTINCT FROM blocked_locks.classid
AND blocking_locks.objid IS NOT DISTINCT FROM blocked_locks.objid
AND blocking_locks.objsubid IS NOT DISTINCT FROM blocked_locks.objsubid
AND blocking_locks.pid != blocked_locks.pid
JOIN pg_catalog.pg_stat_activity blocking_activity ON blocking_activity.pid = blocking_locks.pid
WHERE NOT blocked_locks.granted;
-- show blocking pids in a nice tree
-- query as blocked_query
select pid, usename,
age(query_start, clock_timestamp()),
pg_blocking_pids(pid) as blocked_by,
query
from pg_stat_activity
where cardinality(pg_blocking_pids(pid)) > 0
order by age(query_start, clock_timestamp());
-- add wait_event and wait_event_type to see why its slow
select pid, usename,
query_start,
wait_event, wait_event_type,
pg_blocking_pids(pid) as blocked_by
from pg_stat_activity
where cardinality(pg_blocking_pids(pid)) > 0
order by age(query_start, clock_timestamp());
-- non-idle with nice durations
SELECT pid, age(query_start, clock_timestamp()), usename, query
FROM pg_stat_activity
WHERE state <> 'idle' AND query NOT ILIKE '%pg_stat_activity%'
ORDER BY query_start desc;
-- vacuum command
-- run vacuum for available tables in the current schema
VACUUM (VERBOSE, ANALYZE);
-- run vacuum for a single table
VACUUM (VERBOSE, ANALYZE) <TABLENAME>;
-- List Vacuum Stat on a database
\c <databaseName>
SELECT psut.relname,
to_char(psut.last_vacuum, 'YYYY-MM-DD HH24:MI') as last_vacuum,
to_char(psut.last_autovacuum, 'YYYY-MM-DD HH24:MI') as last_autovacuum,
to_char(pg_class.reltuples, '9G999G999G999') AS n_tup,
to_char(psut.n_dead_tup, '9G999G999G999') AS dead_tup,
to_char(CAST(current_setting('autovacuum_vacuum_threshold') AS bigint)
+ (CAST(current_setting('autovacuum_vacuum_scale_factor') AS numeric)
* pg_class.reltuples), '9G999G999G999') AS av_threshold,
CASE
WHEN CAST(current_setting('autovacuum_vacuum_threshold') AS bigint)
+ (CAST(current_setting('autovacuum_vacuum_scale_factor') AS numeric)
* pg_class.reltuples) < psut.n_dead_tup
THEN '*'
ELSE ''
END AS expect_av
FROM pg_stat_user_tables psut
JOIN pg_class on psut.relid = pg_class.oid
ORDER BY 1;
-- all database users
select * from pg_stat_activity where current_query not like '<%';
-- all databases and their sizes
select * from pg_user;
-- table index size
select pg_size_pretty(pg_total_relation_size('TABLE NAME') - pg_relation_size('TABLE NAME'));
-- table size
select pg_size_pretty(pg_total_relation_size('TABLE NAME'));
-- table size without indexes
select pg_size_pretty(pg_relation_size('TABLE NAME'));
-- pg disk usage
-- https://wiki.postgresql.org/wiki/Disk_Usage
SELECT *, pg_size_pretty(total_bytes) AS total
, pg_size_pretty(index_bytes) AS INDEX
, pg_size_pretty(toast_bytes) AS toast
, pg_size_pretty(table_bytes) AS TABLE
FROM (
SELECT *, total_bytes-index_bytes-COALESCE(toast_bytes,0) AS table_bytes FROM (
SELECT c.oid,nspname AS table_schema, relname AS TABLE_NAME
, c.reltuples AS row_estimate
, pg_total_relation_size(c.oid) AS total_bytes
, pg_indexes_size(c.oid) AS index_bytes
, pg_total_relation_size(reltoastrelid) AS toast_bytes
FROM pg_class c
LEFT JOIN pg_namespace n ON n.oid = c.relnamespace
WHERE relkind = 'r'
) a
) a;
-- Find commmonly accessed tables and their use of indexes:
SELECT relname,seq_tup_read,idx_tup_fetch,cast(idx_tup_fetch AS numeric) / (idx_tup_fetch + seq_tup_read) AS idx_tup_pct
FROM pg_stat_user_tables WHERE (idx_tup_fetch + seq_tup_read)>0
ORDER BY idx_tup_pct;
-- Analysis: For each row, because "idx_tup_pct" is low than it means that essentially no indexes are being used.
-- In the case of "facebook_oauths" it turns out we are commonly running a query like "SELECT * FROM facebook_oauths WHERE fb_user_id = X" and it turns out there isnt an index on "fb_user_id"
-- Table I/O
SELECT relname,cast(heap_blks_hit as numeric) / (heap_blks_hit + heap_blks_read) AS hit_pct,
heap_blks_hit,heap_blks_read
FROM pg_statio_user_tables WHERE (heap_blks_hit + heap_blks_read)>0 ORDER BY hit_pct;
-- 'heap_blks_hit' = the number of blocks that were satisfied from the page cache
-- 'heap_blks_read' = the number of blocks that had to hit disk/IO layer for reads
-- When 'heap_blks_hit' is significantly greater than 'heap_blks_read' than it means we have a well-cached DB and most of the queries can be satisfied from the cache
-- Table & Index sizes
SELECT
t.tablename,
indexname,
c.reltuples::integer AS num_rows,
pg_size_pretty(pg_relation_size(quote_ident(t.tablename)::text)) AS table_size,
pg_size_pretty(pg_relation_size(quote_ident(indexrelname)::text)) AS index_size,
CASE WHEN x.is_unique = 1 THEN 'Y'
ELSE 'N'
END AS UNIQUE,
idx_scan AS number_of_scans,
idx_tup_read AS tuples_read,
idx_tup_fetch AS tuples_fetched
FROM pg_tables t
LEFT OUTER JOIN pg_class c ON t.tablename=c.relname
LEFT OUTER JOIN
(SELECT indrelid,
max(CAST(indisunique AS integer)) AS is_unique
FROM pg_index
GROUP BY indrelid) x
ON c.oid = x.indrelid
LEFT OUTER JOIN
( SELECT c.relname AS ctablename, ipg.relname AS indexname, x.indnatts AS number_of_columns, idx_scan, idx_tup_read, idx_tup_fetch,indexrelname FROM pg_index x
JOIN pg_class c ON c.oid = x.indrelid
JOIN pg_class ipg ON ipg.oid = x.indexrelid
JOIN pg_stat_all_indexes psai ON x.indexrelid = psai.indexrelid )
AS foo
ON t.tablename = foo.ctablename
WHERE t.schemaname='public'
ORDER BY pg_relation_size(quote_ident(indexrelname)::text) desc;
-- Index Health
SELECT indexrelname,cast(idx_tup_read AS numeric) / idx_scan AS avg_tuples,idx_scan,idx_tup_read FROM pg_stat_user_indexes WHERE idx_scan > 0;
-- Index Size
SELECT
schemaname,
relname,
indexrelname,
idx_scan,
pg_size_pretty(pg_relation_size(i.indexrelid)) AS index_size
FROM
pg_stat_user_indexes i
JOIN pg_index USING (indexrelid)
WHERE
indisunique IS false
ORDER BY idx_scan,relname;
-- Index I/O - Same idea as Table I/O above
SELECT indexrelname,cast(idx_blks_hit as numeric) / (idx_blks_hit + idx_blks_read) AS hit_pct,
idx_blks_hit,idx_blks_read FROM pg_statio_user_indexes WHERE
(idx_blks_hit + idx_blks_read)>0 ORDER BY hit_pct;
-- Show sizes & usage of indexes that are not used very often:
-- NOTE: usage=by # of times used
SELECT idstat.relname AS table_name, indexrelname AS index_name, idstat.idx_scan AS times_used,
pg_size_pretty(pg_relation_size(tabstat.relid)) AS table_size, pg_size_pretty(pg_relation_size(indexrelid)) AS index_size,
n_tup_upd + n_tup_ins + n_tup_del as num_writes, indexdef AS definition
FROM pg_stat_user_indexes AS idstat JOIN pg_indexes ON indexrelname = indexname
JOIN pg_stat_user_tables AS tabstat ON idstat.relname = tabstat.relname
WHERE idstat.idx_scan < 500 AND indexdef !~* 'unique'
ORDER BY idstat.relname, indexrelname;
-- finding unused indexes
-- http://www.databasesoup.com/2014/05/new-finding-unused-indexes-query.html
WITH table_scans as (
SELECT relid,
tables.idx_scan + tables.seq_scan as all_scans,
( tables.n_tup_ins + tables.n_tup_upd + tables.n_tup_del ) as writes,
pg_relation_size(relid) as table_size
FROM pg_stat_user_tables as tables
),
all_writes as (
SELECT sum(writes) as total_writes
FROM table_scans
),
indexes as (
SELECT idx_stat.relid, idx_stat.indexrelid,
idx_stat.schemaname, idx_stat.relname as tablename,
idx_stat.indexrelname as indexname,
idx_stat.idx_scan,
pg_relation_size(idx_stat.indexrelid) as index_bytes,
indexdef ~* 'USING btree' AS idx_is_btree
FROM pg_stat_user_indexes as idx_stat
JOIN pg_index
USING (indexrelid)
JOIN pg_indexes as indexes
ON idx_stat.schemaname = indexes.schemaname
AND idx_stat.relname = indexes.tablename
AND idx_stat.indexrelname = indexes.indexname
WHERE pg_index.indisunique = FALSE
),
index_ratios AS (
SELECT schemaname, tablename, indexname,
idx_scan, all_scans,
round(( CASE WHEN all_scans = 0 THEN 0.0::NUMERIC
ELSE idx_scan::NUMERIC/all_scans * 100 END),2) as index_scan_pct,
writes,
round((CASE WHEN writes = 0 THEN idx_scan::NUMERIC ELSE idx_scan::NUMERIC/writes END),2)
as scans_per_write,
pg_size_pretty(index_bytes) as index_size,
pg_size_pretty(table_size) as table_size,
idx_is_btree, index_bytes
FROM indexes
JOIN table_scans
USING (relid)
),
index_groups AS (
SELECT 'Never Used Indexes' as reason, *, 1 as grp
FROM index_ratios
WHERE
idx_scan = 0
and idx_is_btree
UNION ALL
SELECT 'Low Scans, High Writes' as reason, *, 2 as grp
FROM index_ratios
WHERE
scans_per_write <= 1
and index_scan_pct < 10
and idx_scan > 0
and writes > 100
and idx_is_btree
UNION ALL
SELECT 'Seldom Used Large Indexes' as reason, *, 3 as grp
FROM index_ratios
WHERE
index_scan_pct < 5
and scans_per_write > 1
and idx_scan > 0
and idx_is_btree
and index_bytes > 100000000
UNION ALL
SELECT 'High-Write Large Non-Btree' as reason, index_ratios.*, 4 as grp
FROM index_ratios, all_writes
WHERE
( case when total_writes = 0 THEN 0 ELSE (writes::NUMERIC / total_writes) END ) > 0.02
AND NOT idx_is_btree
AND index_bytes > 100000000
ORDER BY grp, index_bytes DESC )
SELECT reason, schemaname, tablename, indexname,
index_scan_pct, scans_per_write, index_size, table_size
FROM index_groups;
-- the ratio of index scans to all scans per table - ratio should be very close to 1
SELECT relname,idx_scan::float/(idx_scan+seq_scan+1) as idx_scan_ratio
FROM pg_stat_all_tables WHERE schemaname='public' ORDER BY idx_scan_ratio ASC;
-- the ratio of index scans to all scans for the whole database - ratio should be very close to 1
SELECT sum(idx_scan)/(sum(idx_scan) + sum(seq_scan)) as idx_scan_ratio
FROM pg_stat_all_tables WHERE schemaname='public';
-- No of Cache Hits / Disk for all Tables
with
all_tables as
(
SELECT *
FROM (
SELECT 'all'::text as table_name,
sum( (coalesce(heap_blks_read,0) + coalesce(idx_blks_read,0) + coalesce(toast_blks_read,0) + coalesce(tidx_blks_read,0)) ) as from_disk,
sum( (coalesce(heap_blks_hit,0) + coalesce(idx_blks_hit,0) + coalesce(toast_blks_hit,0) + coalesce(tidx_blks_hit,0)) ) as from_cache
FROM pg_statio_all_tables --> change to pg_statio_USER_tables if you want to check only user tables (excluding postgres's own tables)
) a
WHERE (from_disk + from_cache) > 0 -- discard tables without hits
),
tables as
(
SELECT *
FROM (
SELECT relname as table_name,
( (coalesce(heap_blks_read,0) + coalesce(idx_blks_read,0) + coalesce(toast_blks_read,0) + coalesce(tidx_blks_read,0)) ) as from_disk,
( (coalesce(heap_blks_hit,0) + coalesce(idx_blks_hit,0) + coalesce(toast_blks_hit,0) + coalesce(tidx_blks_hit,0)) ) as from_cache
FROM pg_statio_all_tables --> change to pg_statio_USER_tables if you want to check only user tables (excluding postgres's own tables)
) a
WHERE (from_disk + from_cache) > 0 -- discard tables without hits
)
SELECT table_name as "table name",
from_disk as "disk hits",
round((from_disk::numeric / (from_disk + from_cache)::numeric)*100.0,2) as "% disk hits",
round((from_cache::numeric / (from_disk + from_cache)::numeric)*100.0,2) as "% cache hits",
(from_disk + from_cache) as "total hits"
FROM (SELECT * FROM all_tables UNION ALL SELECT * FROM tables) a
ORDER BY (case when table_name = 'all' then 0 else 1 end), from_disk desc;
-- % times index is used
SELECT relname, 100 * idx_scan / (seq_scan + idx_scan) percent_of_times_index_used, n_live_tup rows_in_table
FROM pg_stat_user_tables WHERE seq_scan + idx_scan > 0 ORDER BY n_live_tup DESC;
-- Cache Usage ( should not be lower than .99)
SELECT sum(heap_blks_read) as heap_read, sum(heap_blks_hit) as heap_hit,
(sum(heap_blks_hit) - sum(heap_blks_read)) / sum(heap_blks_hit) as ratio
FROM pg_statio_user_tables;
-- all tables and their size, with/without indexes
SELECT datname, pg_size_pretty(pg_database_size(datname))
FROM pg_database
ORDER BY pg_database_size(datname) DESC;
-- more info on table sizes and indexes
SELECT nspname || '.' || relname AS "relation",
pg_size_pretty(pg_relation_size(C.oid)) AS "size"
FROM pg_class C
LEFT JOIN pg_namespace N ON (N.oid = C.relnamespace)
WHERE nspname NOT IN ('pg_catalog', 'information_schema')
ORDER BY pg_relation_size(C.oid) DESC
LIMIT 20;
-- find unused indexes
SELECT
schemaname || '.' || relname AS table,
indexrelname AS index,
pg_size_pretty(pg_relation_size(i.indexrelid)) AS index_size,
idx_scan as index_scans
FROM pg_stat_user_indexes ui
JOIN pg_index i ON ui.indexrelid = i.indexrelid
WHERE NOT indisunique AND idx_scan < 50 AND pg_relation_size(relid) > 5 * 8192
ORDER BY pg_relation_size(i.indexrelid) / nullif(idx_scan, 0) DESC NULLS FIRST,
pg_relation_size(i.indexrelid) DESC;
-- detect invalid indexes
SELECT n.nspname, c.relname
FROM pg_catalog.pg_class c, pg_catalog.pg_namespace n,
pg_catalog.pg_index i
WHERE (i.indisvalid = false OR i.indisready = false) AND
i.indexrelid = c.oid AND c.relnamespace = n.oid AND
n.nspname != 'pg_catalog' AND
n.nspname != 'information_schema' AND
n.nspname != 'pg_toast';
-- find missing indexes
SELECT relname, seq_scan-idx_scan AS too_much_seq, case when seq_scan-idx_scan>0 THEN 'Missing Index?' ELSE 'OK' END, pg_relation_size(relid::regclass) AS rel_size, seq_scan, idx_scan
FROM pg_stat_all_tables
WHERE schemaname='public' AND
pg_relation_size(relid::regclass)>80000
ORDER BY too_much_seq DESC;
-- all foreign keys
SELECT
tc.constraint_name, tc.table_name, kcu.column_name,
ccu.table_name AS foreign_table_name,
ccu.column_name AS foreign_column_name
FROM
information_schema.table_constraints AS tc
JOIN information_schema.key_column_usage AS kcu
ON tc.constraint_name = kcu.constraint_name
JOIN information_schema.constraint_column_usage AS ccu
ON ccu.constraint_name = tc.constraint_name
WHERE constraint_type = 'FOREIGN KEY';
-- Check the number of inserts, updates, and deletes in DB
SELECT
(SELECT sum(tup_inserted) FROM pg_stat_database WHERE datname = q.db_name) AS "Inserts",
(SELECT sum(tup_updated) FROM pg_stat_database WHERE datname = q.db_name) AS "Updates",
(SELECT sum(tup_deleted) FROM pg_stat_database WHERE datname = q.db_name) AS "Deletes"
FROM (SELECT '<db name>' as db_name) q
-- cache hit rates (should not be less than 0.99)
-- http://www.craigkerstiens.com/2012/10/01/understanding-postgres-performance/
SELECT sum(heap_blks_read) as heap_read, sum(heap_blks_hit) as heap_hit, (sum(heap_blks_hit) - sum(heap_blks_read)) / sum(heap_blks_hit) as ratio
FROM pg_statio_user_tables;
-- table index usage rates (should not be less than 0.99)
SELECT relname, 100 * idx_scan / (seq_scan + idx_scan) percent_of_times_index_used, n_live_tup rows_in_table
FROM pg_stat_user_tables
ORDER BY n_live_tup DESC;
-- how many indexes are in cache
SELECT sum(idx_blks_read) as idx_read, sum(idx_blks_hit) as idx_hit, (sum(idx_blks_hit) - sum(idx_blks_read)) / sum(idx_blks_hit) as ratio
FROM pg_statio_user_indexes;
--- Requires Extensions ---
--- https://www.postgresql.org/docs/10/pgstatstatements.html
--- Enable PG_STAT_STATEMENTS
--- shared_preload_libraries = pg_stat_statements
--- track_activity_query_size = 2048
--- pg_stat_statements.track = ALL
--- pg_stat_statements.max = 10000
--- List queries by total_time & see which query spends most time in the database:
SELECT round(total_time*1000)/1000 AS total_time,query
FROM pg_stat_statements
ORDER BY total_time DESC;
--- List queries with total no. of calls, total rows & rows returned etc:
SELECT query, calls, total_time, rows, 100.0 * shared_blks_hit / nullif(shared_blks_hit + shared_blks_read, 0) AS hit_percent
FROM pg_stat_statements
ORDER BY total_time
DESC LIMIT 5;
--- List queries on 'per execution' basis & try to sample them over time:
SELECT queryid, query, calls, total_time/calls, rows/calls, temp_blks_read/calls, temp_blks_written/calls
FROM pg_stat_statements
WHERE calls != 0
ORDER BY total_time
DESC LIMIT 10;
-- finding slow queries, works only if extension pg_stats_statements is enabled
SELECT queryid, regexp_replace(query, E'[\\n\\r]+', ' ', 'g' ), calls, total_time, total_time/calls as median_time, rows
FROM pg_stat_statements
ORDER BY total_time DESC;
-- See what access someone has to all tables
SELECT table_catalog, table_schema, table_name, privilege_type
FROM information_schema.table_privileges
WHERE grantee = 'username'
ORDER BY table_name;
-- Find the connected slaves
SELECT * FROM pg_stat_replication;
-- Change owner
-- globally
-- Instead of updating a particular DB, it change ownership of all DBs owned by 'old_name'.
REASSIGN OWNED BY old_name TO new_name;
-- within a single DB
ALTER DATABASE old_owner OWNER TO new_owner;
-- drop database while people are connected : https://dba.stackexchange.com/questions/11893/force-drop-db-while-others-may-be-connected
-- connect as SUPERUSER, do not USE the database to drop
ALTER DATABASE mydb CONNECTION LIMIT 1;
SELECT pg_terminate_backend(pid) FROM pg_stat_activity WHERE datname = 'mydb';
DROP DATABASE mydb;
-- drop all tables
DROP SCHEMA public CASCADE;
CREATE SCHEMA public;
-- see default privileges
\ddp
-- set default privileges so that users get access to new tables
ALTER DEFAULT PRIVILEGES IN SCHEMA public GRANT SELECT ON TABLES TO username;
-- Put a time limit to an user queries
ALTER ROLE guest SET statement_timeout=10000;
-- Dump table on remote host to file
pg_dump -h HOSTNAME -p 5432 -U USERNAME -d DATABASE -F c --table TABLE > TABLE.csql
-- Restore dump into existing database
pg_restore -h HOSTNAME -p 5432 -U USERNAME -d DATABASE -Fc --no-owner < TABLE.csql
-- Dump database on remote host to file
$ pg_dump -U username -h hostname databasename > dump.sql
-- Import dump into existing database
$ psql -d newdb -f dump.sql
-- Multiple instances, find the configuration file of the connected instance
SELECT * FROM pg_settings WHERE category LIKE 'File Locat%';
-- show db settings
SHOW max_connections;
SHOW ALL;
-- Firewall rule to log incomming connections to PSQL port
-- enable
$ iptables -I INPUT -p tcp -m tcp --dport 5432 -m state --state NEW -j LOG --log-level 1 --log-prefix "New Connection"
-- delete
$ iptables -D INPUT -p tcp -m tcp --dport 5432 -m state --state NEW -j LOG --log-level 1 --log-prefix "New Connection"
-- On Linux - Find the open pg_log files for this month (needs updatedb/locate package)
$ fuser $(locate pg_log | grep $(date +'%Y-%m'))
-- Sniffing pgsql queries
$ tshark -i lo -p -f "(tcp[13] != 0x10) and dst port 5432" -t ad -w /tmp/output
-- change user password
ALTER USER user_name WITH PASSWORD 'new_password';
Sign up for free to join this conversation on GitHub. Already have an account? Sign in to comment