Skip to content

Instantly share code, notes, and snippets.

@miradnan
Last active May 10, 2023 05:59
Show Gist options
  • Save miradnan/90f3b488ab36da0562faa1680ec7d757 to your computer and use it in GitHub Desktop.
Save miradnan/90f3b488ab36da0562faa1680ec7d757 to your computer and use it in GitHub Desktop.
-- show running queries (postgresql > 9.2)
SELECT pid, age(clock_timestamp(), query_start), usename, query
FROM pg_stat_activity
WHERE query != '<IDLE>' AND query NOT ILIKE '%pg_stat_activity%'
ORDER BY query_start desc;
-- kill running query
SELECT pg_cancel_backend(procpid);
-- kill idle query
SELECT pg_terminate_backend(procpid);
-- get current connections count
SELECT sum(numbackends) FROM pg_stat_database;
-- vacuum command full database
VACUUM (VERBOSE, ANALYZE);
-- vacuum a single table
VACUUM (VERBOSE, ANALYZE) your_table_name;
-- check if pg is in recovery mode / readonly mode
SELECT pg_is_in_recovery();
-- slow running query that takes more than 20 seconds
SELECT now() - query_start as "runtime", usename, datname, state, query
FROM pg_stat_activity
WHERE now() - query_start > '20 seconds'::interval
ORDER BY runtime DESC;
-- all database users
select * from pg_stat_activity where current_query not like '<%';
-- all databases and their sizes
select * from pg_user;
-- all tables and their size, with/without indexes
select datname, pg_size_pretty(pg_database_size(datname))
from pg_database
order by pg_database_size(datname) desc;
-- cache hit rates (should not be less than 0.99)
SELECT sum(heap_blks_read) as heap_read, sum(heap_blks_hit) as heap_hit, (sum(heap_blks_hit) - sum(heap_blks_read)) / sum(heap_blks_hit) as ratio
FROM pg_statio_user_tables;
-- table index usage rates (should not be less than 0.99)
SELECT relname, 100 * idx_scan / (seq_scan + idx_scan) percent_of_times_index_used, n_live_tup rows_in_table
FROM pg_stat_user_tables
ORDER BY n_live_tup DESC;
-- how many indexes are in cache
SELECT sum(idx_blks_read) as idx_read, sum(idx_blks_hit) as idx_hit, (sum(idx_blks_hit) - sum(idx_blks_read)) / sum(idx_blks_hit) as ratio
FROM pg_statio_user_indexes;
-- Get long running queies more than 2 minutes
SELECT now() - query_start as "runtime", usename, datname, state, query
FROM pg_stat_activity
WHERE now() - query_start > '2 minutes'::interval
ORDER BY runtime DESC;
-- Check size of all databases
SELECT d.datname AS Name, pg_catalog.pg_get_userbyid(d.datdba) AS Owner,
CASE WHEN pg_catalog.has_database_privilege(d.datname, 'CONNECT')
THEN pg_catalog.pg_size_pretty(pg_catalog.pg_database_size(d.datname))
ELSE 'No Access'
END AS SIZE
FROM pg_catalog.pg_database d
ORDER BY
CASE WHEN pg_catalog.has_database_privilege(d.datname, 'CONNECT')
THEN pg_catalog.pg_database_size(d.datname)
ELSE NULL
END;
-- Check size of all tables
SELECT nspname || '.' || relname AS "relation",
pg_size_pretty(pg_total_relation_size(C.oid)) AS "total_size"
FROM pg_class C
LEFT JOIN pg_namespace N ON (N.oid = C.relnamespace)
WHERE nspname NOT IN ('pg_catalog', 'information_schema')
AND C.relkind <> 'i'
AND nspname !~ '^pg_toast'
ORDER BY pg_total_relation_size(C.oid) DESC;
-- List all tables and show the owners on the table
SELECT t.table_name, t.table_type, c.relname, c.relowner, u.usename
FROM information_schema.tables t
JOIN pg_catalog.pg_class c ON (t.table_name = c.relname)
JOIN pg_catalog.pg_user u ON (c.relowner = u.usesysid)
WHERE t.table_schema='public';
-- Query a PSQL function by name
SELECT proname,prosrc FROM pg_proc WHERE proname= 'your_function_name';
-- Dump query output to CSV
\copy (YOUR_SQL_QUERY) to ~/Downloads/my.csv with csv;
-- Dump database on remote host to file
$ pg_dump -U username -h hostname databasename > dump.sql
-- Import dump into existing database
$ psql -d newdb -f dump.sql
Sign up for free to join this conversation on GitHub. Already have an account? Sign in to comment