Skip to content

Instantly share code, notes, and snippets.

@grantpullen
Last active July 12, 2018 09:21
Show Gist options
  • Save grantpullen/343d067bb04a612d9000ab8a40140b61 to your computer and use it in GitHub Desktop.
Save grantpullen/343d067bb04a612d9000ab8a40140b61 to your computer and use it in GitHub Desktop.
PostgreSQL related queries
PostgreSQL related queries
-- Upsert issue with unique index where we want a returning...
The ON CONFLICT needs to include the "constraint" columns and condition in the index (thus the 'where deleted = false' in the below example)
CREATE UNIQUE INDEX idx_manufacturer_active
ON public.manufacturer
USING btree
(manufacturer COLLATE pg_catalog."default", organisation_id)
WHERE deleted = false;
INSERT INTO manufacturer (manufacturer, deleted, organisation_id)
VALUES ($1,false,$2) ON CONFLICT (manufacturer, organisation_id) where deleted = false
DO UPDATE set deleted = false RETURNING id;
-- Bulk insert for test data includes random data gen
insert into iot (device_make, device_model, msisdn, imsi, iccid, device_lock_state, geo_lock_state, qos_state, nshield_state, qos_network_tech, qos_signal_strength_dbm)
select
'cradlepoint' device_make,
(ARRAY['IBR900','IBR600B','IBR600C', 'IBR200','IBR1700'])[floor(random()*4)+1] device_model,
2798763000000 + i msisdn,
602022155000000 + i imsi,
8920022021379000000 + i iccid,
trunc(random() * 3 + 1) device_lock_state,
trunc(random() * 3 + 1) geo_lock_state,
1 qos_state,
1 nshield_state,
(ARRAY['UMTS','LTE','GSM'])[trunc(random()*3)+1] qos_network_tech,
trunc(random() * -70 + 1) qos_signal_strength_dbm
FROM generate_Series(1,1000) as i;
-- update jsonb array
update iot set qos_incidence_summary = ('{"p1":' || id || ',"p2":' || right((id+1)::varchar,1)::int || ',"p3":' || right((id+2)::varchar,1)::int || ',"p7":' || right((id + 5 + (trunc(random()*99)))::varchar,2)::int||'}')::jsonb;
-- PG config
-- https://github.com/sebastianwebber/pgconfig
-- http://pgtune.leopard.in.ua/
-- Long running queries (longer than 1 minute)
SELECT pid, now() - query_start as age, usename, datname, waiting, state, query
FROM pg_stat_activity
WHERE now() - query_start > '1 minutes'::interval
ORDER BY state, age DESC;
-- Running non idle queries (tested on 9.6)
SELECT pid, now() - query_start as age, usename, datname, state, query
FROM pg_stat_activity where state != 'idle'
ORDER BY state, age DESC limit 20;
-- Cancel a query
SELECT pg_cancel_backend(PID);
-- Database connections
SELECT state, count(*) FROM pg_stat_activity GROUP BY state;
-- pgsql commands
--display history or save it to file
\s [FILE]
--describe table, view, sequence, or index
\d+ NAME
--show a function's definition
\sf[+] FUNCNAME
--list databases with details
\l[+] [PATTERN]
--Expanded display is used automatically.
\x auto
-- copy data...
COPY (SELECT ...)
TO '/absolute/path/export.csv'
WITH (FORMAT csv, HEADER true);
-- Largest tables and relations
SELECT relname AS "relation", pg_size_pretty(pg_relation_size(C.oid)) AS "size"
FROM pg_class C LEFT JOIN pg_namespace N ON (N.oid = C.relnamespace)
WHERE nspname NOT IN ('pg_catalog', 'information_schema')
ORDER BY pg_relation_size(C.oid) DESC
LIMIT 20;
-- Candidates for indexes
-- source: https://github.com/pgexperts/pgx_scripts
WITH
index_usage AS (
SELECT sut.relid,
current_database() AS database,
sut.schemaname::text as schema_name,
sut.relname::text AS table_name,
sut.seq_scan as table_scans,
sut.idx_scan as index_scans,
pg_total_relation_size(relid) as table_bytes,
round((sut.n_tup_ins + sut.n_tup_del + sut.n_tup_upd + sut.n_tup_hot_upd) /
(seq_tup_read::NUMERIC + 2), 2) as writes_per_scan
FROM pg_stat_user_tables sut
),
index_counts AS (
SELECT sut.relid,
count(*) as index_count
FROM pg_stat_user_tables sut LEFT OUTER JOIN pg_indexes
ON sut.schemaname = pg_indexes.schemaname AND
sut.relname = pg_indexes.tablename
GROUP BY relid
),
too_many_tablescans AS (
SELECT 'many table scans'::TEXT as reason,
database, schema_name, table_name,
table_scans, pg_size_pretty(table_bytes) as table_size,
writes_per_scan, index_count, table_bytes
FROM index_usage JOIN index_counts USING ( relid )
WHERE table_scans > 1000
AND table_scans > ( index_scans * 2 )
AND table_bytes > 32000000
AND writes_per_scan < ( 1.0 )
ORDER BY table_scans DESC
),
scans_no_index AS (
SELECT 'scans, few indexes'::TEXT as reason,
database, schema_name, table_name,
table_scans, pg_size_pretty(table_bytes) as table_size,
writes_per_scan, index_count, table_bytes
FROM index_usage JOIN index_counts USING ( relid )
WHERE table_scans > 100
AND table_scans > ( index_scans )
AND index_count < 2
AND table_bytes > 32000000
AND writes_per_scan < ( 1.0 )
ORDER BY table_scans DESC
),
big_tables_with_scans AS (
SELECT 'big table scans'::TEXT as reason,
database, schema_name, table_name,
table_scans, pg_size_pretty(table_bytes) as table_size,
writes_per_scan, index_count, table_bytes
FROM index_usage JOIN index_counts USING ( relid )
WHERE table_scans > 100
AND table_scans > ( index_scans / 10 )
AND table_bytes > 1000000000
AND writes_per_scan < ( 1.0 )
ORDER BY table_bytes DESC
),
scans_no_writes AS (
SELECT 'scans, no writes'::TEXT as reason,
database, schema_name, table_name,
table_scans, pg_size_pretty(table_bytes) as table_size,
writes_per_scan, index_count, table_bytes
FROM index_usage JOIN index_counts USING ( relid )
WHERE table_scans > 100
AND table_scans > ( index_scans / 4 )
AND table_bytes > 32000000
AND writes_per_scan < ( 0.1 )
ORDER BY writes_per_scan ASC
)
SELECT reason, database, schema_name, table_name, table_scans,
table_size, writes_per_scan, index_count
FROM too_many_tablescans
UNION ALL
SELECT reason, database, schema_name, table_name, table_scans,
table_size, writes_per_scan, index_count
FROM scans_no_index
UNION ALL
SELECT reason, database, schema_name, table_name, table_scans,
table_size, writes_per_scan, index_count
FROM big_tables_with_scans
UNION ALL
SELECT reason, database, schema_name, table_name, table_scans,
table_size, writes_per_scan, index_count
FROM scans_no_writes;
-- Unused indexes
-- source: https://gist.github.com/jberkus/6b1bcaf7724dfc2a54f3
WITH table_scans as (
SELECT relid,
tables.idx_scan + tables.seq_scan as all_scans,
( tables.n_tup_ins + tables.n_tup_upd + tables.n_tup_del ) as writes,
pg_relation_size(relid) as table_size
FROM pg_stat_user_tables as tables
),
all_writes as (
SELECT sum(writes) as total_writes
FROM table_scans
),
indexes as (
SELECT idx_stat.relid, idx_stat.indexrelid,
idx_stat.schemaname, idx_stat.relname as tablename,
idx_stat.indexrelname as indexname,
idx_stat.idx_scan,
pg_relation_size(idx_stat.indexrelid) as index_bytes,
indexdef ~* 'USING btree' AS idx_is_btree
FROM pg_stat_user_indexes as idx_stat
JOIN pg_index
USING (indexrelid)
JOIN pg_indexes as indexes
ON idx_stat.schemaname = indexes.schemaname
AND idx_stat.relname = indexes.tablename
AND idx_stat.indexrelname = indexes.indexname
WHERE pg_index.indisunique = FALSE
),
index_ratios AS (
SELECT schemaname, tablename, indexname,
idx_scan, all_scans,
round(( CASE WHEN all_scans = 0 THEN 0.0::NUMERIC
ELSE idx_scan::NUMERIC/all_scans * 100 END),2) as index_scan_pct,
writes,
round((CASE WHEN writes = 0 THEN idx_scan::NUMERIC ELSE idx_scan::NUMERIC/writes END),2)
as scans_per_write,
pg_size_pretty(index_bytes) as index_size,
pg_size_pretty(table_size) as table_size,
idx_is_btree, index_bytes
FROM indexes
JOIN table_scans
USING (relid)
),
index_groups AS (
SELECT 'Never Used Indexes' as reason, *, 1 as grp
FROM index_ratios
WHERE
idx_scan = 0
and idx_is_btree
UNION ALL
SELECT 'Low Scans, High Writes' as reason, *, 2 as grp
FROM index_ratios
WHERE
scans_per_write <= 1
and index_scan_pct < 10
and idx_scan > 0
and writes > 100
and idx_is_btree
UNION ALL
SELECT 'Seldom Used Large Indexes' as reason, *, 3 as grp
FROM index_ratios
WHERE
index_scan_pct < 5
and scans_per_write > 1
and idx_scan > 0
and idx_is_btree
and index_bytes > 100000000
UNION ALL
SELECT 'High-Write Large Non-Btree' as reason, index_ratios.*, 4 as grp
FROM index_ratios, all_writes
WHERE
( writes::NUMERIC / ( total_writes + 1 ) ) > 0.02
AND NOT idx_is_btree
AND index_bytes > 100000000
ORDER BY grp, index_bytes DESC )
SELECT reason, schemaname, tablename, indexname,
index_scan_pct, scans_per_write, index_size, table_size
FROM index_groups;
-- Bloat
-- source: https://github.com/ioguix/pgsql-bloat-estimation
-- current_database: name of the current database.
-- schemaname: schema of the table.
-- tblname: the table name.
-- real_size: real size of the table.
-- extra_size: estimated extra size not used/needed in the table. This extra size is composed by the fillfactor, bloat and alignment padding spaces.
-- extra_ratio: estimated ratio of the real size used by extra_size.
-- fillfactor: the fillfactor of the table.
-- bloat_size: estimated size of the bloat without the extra space kept for the fillfactor.
-- bloat_ratio: estimated ratio of the real size used by bloat_size.
-- is_na: is the estimation "Not Applicable" ? If true, do not trust the stats.
SELECT current_database(), schemaname, tblname, bs*tblpages AS real_size,
(tblpages-est_tblpages)*bs AS extra_size,
CASE WHEN tblpages - est_tblpages > 0
THEN 100 * (tblpages - est_tblpages)/tblpages::float
ELSE 0
END AS extra_ratio, fillfactor, (tblpages-est_tblpages_ff)*bs AS bloat_size,
CASE WHEN tblpages - est_tblpages_ff > 0
THEN 100 * (tblpages - est_tblpages_ff)/tblpages::float
ELSE 0
END AS bloat_ratio, is_na
-- , (pst).free_percent + (pst).dead_tuple_percent AS real_frag
FROM (
SELECT ceil( reltuples / ( (bs-page_hdr)/tpl_size ) ) + ceil( toasttuples / 4 ) AS est_tblpages,
ceil( reltuples / ( (bs-page_hdr)*fillfactor/(tpl_size*100) ) ) + ceil( toasttuples / 4 ) AS est_tblpages_ff,
tblpages, fillfactor, bs, tblid, schemaname, tblname, heappages, toastpages, is_na
-- , stattuple.pgstattuple(tblid) AS pst
FROM (
SELECT
( 4 + tpl_hdr_size + tpl_data_size + (2*ma)
- CASE WHEN tpl_hdr_size%ma = 0 THEN ma ELSE tpl_hdr_size%ma END
- CASE WHEN ceil(tpl_data_size)::int%ma = 0 THEN ma ELSE ceil(tpl_data_size)::int%ma END
) AS tpl_size, bs - page_hdr AS size_per_block, (heappages + toastpages) AS tblpages, heappages,
toastpages, reltuples, toasttuples, bs, page_hdr, tblid, schemaname, tblname, fillfactor, is_na
FROM (
SELECT
tbl.oid AS tblid, ns.nspname AS schemaname, tbl.relname AS tblname, tbl.reltuples,
tbl.relpages AS heappages, coalesce(toast.relpages, 0) AS toastpages,
coalesce(toast.reltuples, 0) AS toasttuples,
coalesce(substring(
array_to_string(tbl.reloptions, ' ')
FROM '%fillfactor=#"__#"%' FOR '#')::smallint, 100) AS fillfactor,
current_setting('block_size')::numeric AS bs,
CASE WHEN version()~'mingw32' OR version()~'64-bit|x86_64|ppc64|ia64|amd64' THEN 8 ELSE 4 END AS ma,
24 AS page_hdr,
23 + CASE WHEN MAX(coalesce(null_frac,0)) > 0 THEN ( 7 + count(*) ) / 8 ELSE 0::int END
+ CASE WHEN tbl.relhasoids THEN 4 ELSE 0 END AS tpl_hdr_size,
sum( (1-coalesce(s.null_frac, 0)) * coalesce(s.avg_width, 1024) ) AS tpl_data_size,
bool_or(att.atttypid = 'pg_catalog.name'::regtype) AS is_na
FROM pg_attribute AS att
JOIN pg_class AS tbl ON att.attrelid = tbl.oid
JOIN pg_namespace AS ns ON ns.oid = tbl.relnamespace
JOIN pg_stats AS s ON s.schemaname=ns.nspname
AND s.tablename = tbl.relname AND s.inherited=false AND s.attname=att.attname
LEFT JOIN pg_class AS toast ON tbl.reltoastrelid = toast.oid
WHERE att.attnum > 0 AND NOT att.attisdropped
AND tbl.relkind = 'r'
GROUP BY 1,2,3,4,5,6,7,8,9,10, tbl.relhasoids
ORDER BY 2,3
) AS s
) AS s2
) AS s3;
-- Rows (tuples) per page
-- Use this on an ofen updated table to figure out the fill factor to use, i.e. fit in at least an extra row..
SELECT rolname AS owner,
nspname AS schemaname
, relname AS tablename
, relpages, reltuples, (reltuples::FLOAT / relpages::FLOAT) AS tuples_per_page
FROM pg_class
JOIN pg_namespace ON relnamespace = pg_namespace.oid
JOIN pg_roles ON relowner = pg_roles.oid
WHERE relkind = 'r' AND relpages > 20 AND reltuples > 1000
AND nspname != 'pg_catalog'
ORDER BY tuples_per_page;
-- Change Fill Factor
ALTER TABLE table_name SET ( fillfactor = 90);
VACUUM FULL table_name;
-- Number of rows HOT-updated in a table
SELECT pg_stat_get_tuples_hot_updated('table_name'::regclass::oid)
Sign up for free to join this conversation on GitHub. Already have an account? Sign in to comment