Skip to content

Instantly share code, notes, and snippets.

@blokhin
Last active September 5, 2023 19:43
Show Gist options
  • Save blokhin/08cd7ebc9ea3322a4c597b3a8d29fb56 to your computer and use it in GitHub Desktop.
Save blokhin/08cd7ebc9ea3322a4c597b3a8d29fb56 to your computer and use it in GitHub Desktop.
\dt -- SHOW TABLES
\dT+ -- SHOW TYPES
\du -- SHOW USERS
\d table -- SHOW INFO
CREATE ROLE watcher PASSWORD 'secure' NOSUPERUSER NOCREATEDB NOCREATEROLE INHERIT LOGIN;
ALTER USER postgres WITH PASSWORD 'secure';
GRANT USAGE ON SCHEMA public TO watcher;
GRANT SELECT ON ALL TABLES IN SCHEMA public TO watcher;
SELECT * FROM pg_stat_database; (show processlist;)
SELECT setval('uploads_id_seq', (SELECT MAX(id) from "uploads"));
alter user user_name with password 'new_password';
GRANT ALL PRIVILEGES ON DATABASE piwik to piwik;
REVOKE ALL PRIVILEGES ON DATABASE tilde FROM public;
GRANT ALL PRIVILEGES ON DATABASE tilde to postgres;
SELECT pg_size_pretty(pg_database_size('DBNAME'));
select tags.checksum from tags inner join topics on topics.tid=tags.tid where topics.topic in ('Ag', 'Al', 'As') group by tags.checksum having count(tags.tid)=3
select filenames from metadata OFFSET random()*600000 limit 10;
select filenames from metadata m inner join ownerships o on o.checksum=m.checksum where o.user_id=125 OFFSET random()*200000 limit 10;
SHOW ALL
CREATE INDEX idxfinished ON cards ((data->>'finished'));
CREATE INDEX idxgintags ON cards USING gin ((data->'tags'));
-- sizes
SELECT *, pg_size_pretty(total_bytes) AS total
, pg_size_pretty(index_bytes) AS INDEX
, pg_size_pretty(toast_bytes) AS toast
, pg_size_pretty(table_bytes) AS TABLE
FROM (
SELECT *, total_bytes-index_bytes-COALESCE(toast_bytes,0) AS table_bytes FROM (
SELECT c.oid,nspname AS table_schema, relname AS TABLE_NAME
, c.reltuples AS row_estimate
, pg_total_relation_size(c.oid) AS total_bytes
, pg_indexes_size(c.oid) AS index_bytes
, pg_total_relation_size(reltoastrelid) AS toast_bytes
FROM pg_class c
LEFT JOIN pg_namespace n ON n.oid = c.relnamespace
WHERE relkind = 'r'
) a
) a;
-- size of indeces
SELECT
t.tablename,
indexname,
c.reltuples AS num_rows,
pg_relation_size(quote_ident(t.tablename)::text) AS table_size,
pg_relation_size(quote_ident(indexrelname)::text) AS index_size,
CASE WHEN indisunique THEN 'Y'
ELSE 'N'
END AS UNIQUE,
idx_scan AS number_of_scans,
idx_tup_read AS tuples_read,
idx_tup_fetch AS tuples_fetched
FROM pg_tables t
LEFT OUTER JOIN pg_class c ON t.tablename=c.relname
LEFT OUTER JOIN
( SELECT c.relname AS ctablename, ipg.relname AS indexname, x.indnatts AS number_of_columns, idx_scan, idx_tup_read, idx_tup_fetch, indexrelname, indisunique FROM pg_index x
JOIN pg_class c ON c.oid = x.indrelid
JOIN pg_class ipg ON ipg.oid = x.indexrelid
JOIN pg_stat_all_indexes psai ON x.indexrelid = psai.indexrelid )
AS foo
ON t.tablename = foo.ctablename
WHERE t.schemaname='public'
ORDER BY 5;
-- unused indeces
-- SELECT pg_stat_reset();
SELECT
idstat.relname AS table_name,
indexrelname AS index_name,
idstat.idx_scan AS times_used,
pg_size_pretty(pg_relation_size(idstat.relname::text)) AS table_size, pg_size_pretty(pg_relation_size(indexrelname::text)) AS index_size,
n_tup_upd + n_tup_ins + n_tup_del as num_writes,
indexdef AS definition
FROM pg_stat_user_indexes AS idstat JOIN pg_indexes ON indexrelname = indexname
JOIN pg_stat_user_tables AS tabstat ON idstat.relname = tabstat.relname
WHERE
idstat.idx_scan < 200
AND indexdef !~* 'unique'
ORDER BY idstat.relname, indexrelname;
SELECT c.value->'naxes' FROM phase_diagrams c where (c.value->'naxes')::text::int not in (2, 3)
-- aida recursive transitive closure
WITH RECURSIVE path_down(input_id, output_id, depth, path) AS (
SELECT link.input_id, link.output_id, 0::INT AS depth, (link.input_id || '->' || link.output_id::TEXT) AS path FROM db_dblink AS link WHERE link.input_id = 9999
UNION ALL
SELECT existing.input_id, newlink.output_id, existing.depth + 1 AS depth, (existing.path || '->' || newlink.output_id::TEXT) FROM path_down AS existing, db_dblink AS newlink WHERE newlink.input_id = existing.output_id
)
SELECT * FROM path_down AS path
ORDER BY path.depth, path.output_id ASC;
WITH RECURSIVE path_up(input_id, output_id, depth, path) AS (
SELECT link.input_id, link.output_id, 0::INT AS depth, (link.input_id || '->' || link.output_id::TEXT) AS path FROM db_dblink AS link WHERE link.output_id = 9999
UNION ALL
SELECT newlink.input_id, existing.output_id, existing.depth + 1 AS depth, (newlink.input_id::TEXT || '->' || existing.path) FROM path_up AS existing, db_dblink AS newlink WHERE newlink.output_id = existing.input_id
)
SELECT * FROM path_up AS path
ORDER BY path.depth, path.input_id ASC;
-- CSV
COPY table(f1, f2) FROM 'some.csv' DELIMITER ',' CSV;
-- substr
SUBSTRING(entry FROM 1 FOR 1)='P'
-- empty (null) strings comparison
select * from b_entries where coalesce(document, '') = '';
select * from b_entries where document <> '';
-- dump by condition
COPY (SELECT * FROM entries WHERE type = 1) TO STDOUT;
Sign up for free to join this conversation on GitHub. Already have an account? Sign in to comment