Skip to content

Instantly share code, notes, and snippets.

@vinnix
Last active January 14, 2022 13:38
Show Gist options
  • Save vinnix/056f6f4169fcdc8d13732d0e6fdffe22 to your computer and use it in GitHub Desktop.
Save vinnix/056f6f4169fcdc8d13732d0e6fdffe22 to your computer and use it in GitHub Desktop.
PostgreSQL usual monitoring and functions commands/queries/functions -- keep it safe on your heart <3

PostgreSQL Cheat Sheet

Not necessarily a cheat-sheet only, but also in historic of research and nice to have commands. I like to leave the complete URL address to make it easier to copy and paste and share with others when this is required.

  • Database ages:
 SELECT 
     oid
    ,datname
    , age(datfrozenxid)
    , current_setting('autovacuum_freeze_max_age')
FROM pg_database
ORDER BY 2 DESC;
  • Indexes and its types
SELECT tab.relname, cls.relname, am.amname
FROM pg_index idx 
JOIN pg_class cls ON cls.oid=idx.indexrelid
JOIN pg_class tab ON tab.oid=idx.indrelid
JOIN pg_am am ON am.oid=cls.relam;
  • What is causing age to increase
WITH q AS (
SELECT
  (SELECT max(age(backend_xmin))
      FROM pg_stat_activity  WHERE state != 'idle' )       AS oldest_running_xact_age,
  (SELECT max(age(transaction)) FROM pg_prepared_xacts)    AS oldest_prepared_xact_age,
  (SELECT max(age(xmin)) FROM pg_replication_slots)        AS oldest_replication_slot_age,
  (SELECT max(age(catalog_xmin)) FROM pg_replication_slots)        AS oldest_replication_slot_catalog_xmin_age,
  (SELECT max(age(backend_xmin)) FROM pg_stat_replication) AS oldest_replica_xact_age
)
SELECT *,
       2^31 - oldest_running_xact_age AS oldest_running_xact_left,
       2^31 - oldest_prepared_xact_age AS oldest_prepared_xact_left,
       2^31 - oldest_replication_slot_age AS oldest_replication_slot_left,
       2^31 - oldest_replication_slot_catalog_xmin_age ASoldest_replication_slot_catalog_xmin_left,
       2^31 - oldest_replica_xact_age AS oldest_replica_xact_left
FROM q;
  • Building for debug:
./configure --enable-debug --enable-cassert CFLAGS="-Wuninitialized -ggdb -O0 -g3 -fno-omit-frame-pointer" CPPFLAGS='-DWAL_DEBUG' --enable-tap-tests --with-openssl  --prefix=/home/postgres/pgsql13.3
  • Exporting all settings:
copy (select * from pg_settings ) TO '/tmp/all_settings.csv' WITH (FORMAT csv, HEADER true);
  • Monitoring I/O from catalog
-- perform a "select pg_stat_reset();" when you want to reset counter statistics
with 
all_tables as
(
SELECT  *
FROM    (
    SELECT  'all'::text as table_name, 
        sum( (coalesce(heap_blks_read,0) + coalesce(idx_blks_read,0) + coalesce(toast_blks_read,0) + coalesce(tidx_blks_read,0)) ) as from_disk, 
        sum( (coalesce(heap_blks_hit,0)  + coalesce(idx_blks_hit,0)  + coalesce(toast_blks_hit,0)  + coalesce(tidx_blks_hit,0))  ) as from_cache    
    FROM    pg_statio_all_tables  --> change to pg_statio_USER_tables if you want to check only user tables (excluding postgres's own tables)
    ) a
WHERE   (from_disk + from_cache) > 0 -- discard tables without hits
),
tables as 
(
SELECT  *
FROM    (
    SELECT  relname as table_name, 
        ( (coalesce(heap_blks_read,0) + coalesce(idx_blks_read,0) + coalesce(toast_blks_read,0) + coalesce(tidx_blks_read,0)) ) as from_disk, 
        ( (coalesce(heap_blks_hit,0)  + coalesce(idx_blks_hit,0)  + coalesce(toast_blks_hit,0)  + coalesce(tidx_blks_hit,0))  ) as from_cache    
    FROM    pg_statio_all_tables --> change to pg_statio_USER_tables if you want to check only user tables (excluding postgres's own tables)
    ) a
WHERE   (from_disk + from_cache) > 0 -- discard tables without hits
)
SELECT  table_name as "table name",
    from_disk as "disk hits",
    round((from_disk::numeric / (from_disk + from_cache)::numeric)*100.0,2) as "% disk hits",
    round((from_cache::numeric / (from_disk + from_cache)::numeric)*100.0,2) as "% cache hits",
    (from_disk + from_cache) as "total hits"
FROM    (SELECT * FROM all_tables UNION ALL SELECT * FROM tables) a
ORDER   BY (case when table_name = 'all' then 0 else 1 end), from_disk desc

ref.

  • Track the percentage of hot-updates:
WITH t as (SELECT  relname, n_tup_upd, n_tup_hot_upd, n_dead_tup, n_live_tup,
                   round((n_tup_hot_upd::numeric / nullif((n_tup_hot_upd + n_tup_upd),0)::numeric)*100.0,2) as perc_hot
              FROM pg_stat_all_tables
)
SELECT  relname, n_tup_upd, n_tup_hot_upd, n_dead_tup, n_live_tup, perc_hot as "% HOT"
  FROM t
 WHERE  perc_hot < 80
  ORDER BY n_live_tup desc limit 30;
select  name, (now() at time zone ptn.name )::timestamp as now_over_there from pg_timezone_names ptn order by 2 asc ;
select * from pg_control_checkpoint()
         t (  checkpoint_lsn
            , redo_lsn
            , redo_wal_file
            , timeline_id
            , prev_timeline_id
            , full_page_writes
            , next_xid
            , next_oid
            , next_multixact_id
            , next_multi_offset
            , oldest_xid
            , oldest_xid_dbid
            , oldest_active_xid
            , oldest_multi_xid
            , oldest_multi_dbid
            , oldest_commit_ts_xid
            , newest_commit_ts_xid
            , checkpoint_time );

-- https://dba.stackexchange.com/questions/167106/calculate-storage-size-of-rows?noredirect=1&lq=1


-- Bulding PostgreSQL from Source:


-- please note: if you are trying to do it, creating directories, users, groups -- should not be a problem. stay safe!

-- get your CentOS running: -- https://aws.amazon.com/marketplace/pp/B00O7WM7QW

yum -y install epel-release
yum install scl-utils.x86_64 centos-release-scl-rh.noarch 
yum install llvm-toolset-7.x86_64 llvm-toolset-7-build.x86_64 llvm-toolset-7-clang.x86_64 llvm-toolset-7-clang-devel.x86_64 llvm-toolset-7-clang-libs.x86_64  llvm-toolset-7-clang-tools-extra.x86_64 llvm-toolset-7-cmake.x86_64  llvm-toolset-7-cmake-data.noarch  llvm-toolset-7-cmake-doc.noarch llvm-toolset-7-compiler-rt.x86_64 llvm-toolset-7-libomp.x86_64 llvm-toolset-7-lldb.x86_64  llvm-toolset-7-lldb-devel.x86_64  llvm-toolset-7-llvm.x86_64  llvm-toolset-7-llvm-devel.x86_64  llvm-toolset-7-llvm-doc.noarch  llvm-toolset-7-llvm-libs.x86_64  llvm-toolset-7-llvm-static.x86_64   llvm-toolset-7-python2-lit.noarch  llvm-toolset-7-runtime.x86_64   

-- pre-requirements:
yum install bison.x86_64 flex.x86_64 openssl.x86_64 openssl-devel.x86_64 openssl-libs.x86_64 readline.x86_64 readline-devel.x86_64 zlib.x86_64 zlib-devel.x86_64 git.x86_64

scl enable llvm-toolset-7 bash
-- if comming from old build, 'make maintainer-clean'

mkdir ~/Sources/
cd ~/Sources
git clone https://github.com/postgres/postgres/
cd ~/Sources/postgres 

-- build PG
mkdir /home/postgres/pgsql-HEAD/
mkdir /home/postgres/pgsql-HEAD/data
chmod go-rwx /home/postgres/pgsql-HEAD/data
CXX=clang CFLAGS="-O0" CC=clang LLVM_CONFIG=llvm-config ./configure --prefix=/home/postgres/pgsql-HEAD/ --with-openssl --with-llvm --enable-debug --enable-dtrace --enable-cassert --enable-profiling
make -j3
make install 
/var/lib/pgsql/HEAD-test/pgsql_head/bin/initdb -k -D /home/postgres/pgsql-HEAD/data
/var/lib/pgsql/HEAD-test/pgsql_head/bin/pg_basebackup -R -h localhost -U user4replication -p 5433 -D /home/postgres/pgsql-HEAD/DATA_target -P -v
sed -e 's/^port.*/port = 5432/' /home/postgres/pgsql-HEAD/DATA_target/postgresql.conf > /home/postgres/pgsql-HEAD/DATA_target/postgresql.conf.test

-- Engine up-time and last start time


SELECT date_trunc('second', current_timestamp - pg_postmaster_start_time()) as uptime;
SELECT pg_postmaster_start_time();

--


https://dba.stackexchange.com/questions/129301/how-exactly-the-row-visibility-is-determined


--


with relage as (
select c.oid, relname, age(relfrozenxid) as xid_age,
    round((relpages/128::numeric),1) as mb_size,
    n.nspname as schema_name
    from pg_class c,
         pg_namespace n
where c.relnamespace = n.oid AND relkind IN ('r', 't','m')
),
av_max_age as (
    select setting::numeric as max_age from pg_settings where name = 'autovacuum_freeze_max_age'
),
wrap_pct AS (
select oid, relname, schema_name, xid_age,
    round(xid_age*100::numeric/max_age, 1) as av_wrap_pct,
    round(xid_age*100::numeric/2200000000, 1) as shutdown_pct,
    mb_size
from relage cross join av_max_age
)
select wrap_pct.*,  ' vacuum freeze verbose '||schema_name||'.'||relname||';' as vacuum_cmd
from wrap_pct
order by xid_age desc
limit 50;


with relage as (
 select  c.relnamespace
        ,s.nspname
        ,c.relname, age(c.relfrozenxid) as xid_age
        ,round((c.relpages/128::numeric),1) as mb_size
  from pg_class c, pg_namespace s where c.relnamespace = s.oid
   and  c.relkind IN ('r', 't','m')
),
av_max_age as (
    select setting::numeric as max_age from pg_settings where name = 'autovacuum_freeze_max_age'
),
wrap_pct AS (
select relnamespace,nspname, relname, xid_age,
    round(xid_age*100::numeric/max_age, 1) as av_wrap_pct,
    round(xid_age*100::numeric/2200000000, 1) as shutdown_pct,
    mb_size
from relage cross join av_max_age
)
select wrap_pct.*, ' vacuum freeze verbose '||nspname||'.'||relname||';' as vacuum_cmd
from wrap_pct
where ((av_wrap_pct >= 75
    or shutdown_pct >= 50)
    and mb_size > 1000)
    or
    (av_wrap_pct > 100
    or shutdown_pct > 80)
order by mb_size asc limit 10;

-- generating 2 bn rows, with multiple columns, primary key


-- remove the current table

BEGIN ; 
   DROP TABL3 my_new_table ;
COMMIT; 


-- create a table with take data for tests
BEGIN; 
    CREATE TABLE my_new_table AS 
    select * from 
    (
        WITH values AS 
        (
           SELECT * FROM generate_series(1, 2000000) as t(g)
        ) 
        SELECT 
            g as id
           ,g * random() as rand_number
           ,md5(g::text) as md5_value
           ,current_timestamp - make_interval(days := g) as dt_when
            -- add more columns here 
          FROM values
     ) as sub_gen; 
COMMIT;

-- if neeeded adding more data
INSERT INTO my_new_table 
    select * from 
    (
        WITH values AS 
        (
           SELECT * FROM generate_series(1, 2000000) as t(g)
        ) 
        SELECT 
            g as id
           ,g * random() as rand_number
           ,md5(g::text) as md5_value
           ,current_timestamp - make_interval(days := g) as dt_when
            
          FROM values
     ) as sub_gen 

-- create an unique index to use as primary key in the future

CREATE UNIQUE INDEX CONCURRENTLY idx_unique_id ON my_new_table (id);

BEGIN
-- associate the table's unique index as its primary key
   ALTER TABLE my_new_table ADD PRIMARY KEY USING INDEX idx_unique_id;
COMMIT;

--


-- (1) https://www.postgresql.org/docs/current/static/functions-admin.html#FUNCTIONS-RECOVERY-CONTROL

SELECT EXTRACT(EPOCH FROM (now() - pg_last_xact_replay_timestamp()))::INT;

SELECT
CASE
WHEN pg_last_xlog_receive_location() = pg_last_xlog_replay_location() THEN 0
ELSE EXTRACT (EPOCH FROM now() - pg_last_xact_replay_timestamp())::INTEGER
END
AS replication_lag;
	      

-- Master:
SELECT pg_current_xlog_location();

-- Slave (X times, to track progression):

SELECT pg_last_xlog_receive_location();

-- Master:

SELECT * FROM pg_stat_replication; 
	      

```bash
ps aux | egrep 'wal\sreceiver'
postgres  3858  0.0  0.0 2100112 3312 ?        Ss   19:35   0:01 postgres:
wal receiver process   streaming 36/900A738

--


https://dba.stackexchange.com/questions/103421/the-cache-hit-rate-using-pg-statio-user-tables

-- perform a "select pg_stat_reset();" when you want to reset counter statistics

WITH 
	all_tables AS
	(
	SELECT  *
	FROM    (
	    SELECT  'all'::text as table_name, 
	        sum( (coalesce(heap_blks_read,0) + coalesce(idx_blks_read,0) + coalesce(toast_blks_read,0) + coalesce(tidx_blks_read,0)) ) as from_disk, 
	        sum( (coalesce(heap_blks_hit,0)  + coalesce(idx_blks_hit,0)  + coalesce(toast_blks_hit,0)  + coalesce(tidx_blks_hit,0))  ) as from_cache    
	    FROM    pg_statio_all_tables  --> change to pg_statio_USER_tables if you want to check only user tables (excluding postgres's own tables)
	    ) a
	WHERE   (from_disk + from_cache) > 0 -- discard tables without hits
	),
	tables as 
	(
	SELECT  *
	FROM    (
	    SELECT  relname as table_name, 
	        ( (coalesce(heap_blks_read,0) + coalesce(idx_blks_read,0) + coalesce(toast_blks_read,0) + coalesce(tidx_blks_read,0)) ) as from_disk, 
	        ( (coalesce(heap_blks_hit,0)  + coalesce(idx_blks_hit,0)  + coalesce(toast_blks_hit,0)  + coalesce(tidx_blks_hit,0))  ) as from_cache    
	    FROM    pg_statio_all_tables --> change to pg_statio_USER_tables if you want to check only user tables (excluding postgres's own tables)
	    ) a
	WHERE   (from_disk + from_cache) > 0 -- discard tables without hits
	)
	SELECT  table_name as "table name",
	    from_disk as "disk hits",
	    round((from_disk::numeric / (from_disk + from_cache)::numeric)*100.0,2) as "% disk hits",
	    round((from_cache::numeric / (from_disk + from_cache)::numeric)*100.0,2) as "% cache hits",
	    (from_disk + from_cache) as "total hits"
	FROM    (SELECT * FROM all_tables UNION ALL SELECT * FROM tables) a
	ORDER   BY (case when table_name = 'all' then 0 else 1 end), from_disk desc ; 
	
	
--- ---------------------------	
CREATE EXTENSION pg_buffercache; 

	-- them the view 'pg_buffercache' view will be available
	-- an aggregation will be possible using schema or table
	SELECT   n.nspname as schema
	        ,c.relname as table
	        ,count(*) AS buffers_count
	        ,pg_size_pretty(count(*) * 8192) as buffered_size
	        ,round(100.0 * count(*) / ( select setting from pg_settings where name='shared_buffers')::integer,3) as buffer_percent
	        ,round(100.0*count(*)*8192 / pg_table_size(c.oid),1) as percent_of_relation
	  FROM  pg_buffercache b 
	       ,pg_class c
	       ,pg_namespace n 
	  WHERE c.relnamespace = n.oid
	    AND b.relfilenode = c.relfilenode
	    AND b.reldatabase IN (0, (SELECT oid FROM pg_database WHERE datname = current_database()))
	GROUP BY c.oid
	    , n.nspname
	    , c.relname
	 ORDER BY buffers_count DESC
	 LIMIT 10;

-- cache hit rates (should not be less than 0.99)

SELECT sum(heap_blks_read) as heap_read, sum(heap_blks_hit)  as heap_hit, (sum(heap_blks_hit) - sum(heap_blks_read)) / sum(heap_blks_hit) as ratio
FROM pg_statio_user_tables; 

-- table index usage rates (should not be less than 0.99)

SELECT relname, 100 * idx_scan / (seq_scan + idx_scan) percent_of_times_index_used, n_live_tup rows_in_table
FROM pg_stat_user_tables 
ORDER BY n_live_tup DESC; 

-- how many indexes are in cache

SELECT sum(idx_blks_read) as idx_read, sum(idx_blks_hit)  as idx_hit, (sum(idx_blks_hit) - sum(idx_blks_read)) / sum(idx_blks_hit) as ratio
FROM pg_statio_user_indexes;

-- database buffer cache hit ratio (shared_buffers)

select sum(blks_read)/sum(blk_read_time) from pg_stat_database 
CREATE FUNCTION ff(i int) returns void as $$ declare i int;  begin select now(); end; $$ language plpgsql ;
select * from pg_db_role_setting  ; 

SELECT now() - pg_postmaster_start_time();

SELECT pg_postmaster_start_time(); 

select relname from pg_class where reltoastrelid = ( select oid from pg_class where relname = 'toast_table_name' );

-- ## RTFM:

-- > -- https://www.postgresql.org/docs/current/static/functions-info.html -- https://www.postgresql.org/docs/current/static/functions-admin.html#FUNCTIONS-ADMIN-DBSIZE -- https://www.postgresql.org/docs/current/static/functions-admin.html (check it again) -- https://www.postgresql.org/docs/current/static/datatype-datetime.html -- https://www.postgresql.org/docs/current/static/runtime-config-logging.html -- https://www.postgresql.org/docs/current/static/functions-formatting.html (to_char(), etc.. ) -- https://www.postgresql.org/docs/current/static/runtime-config-autovacuum.html -- https://www.postgresql.org/docs/current/static/runtime-config-query.html -- https://www.postgresql.org/docs/current/static/datatype-oid.html -- https://www.postgresql.org/docs/current/static/runtime-config-resource.html#GUC-WORK-MEM -- https://www.postgresql.org/docs/current/static/sql-discard.html

-- https://www.postgresql.org/docs/current/static/protocol-flow.html#PROTOCOL-FLOW-EXT-QUERY (protocol)

-- https://www.postgresql.org/docs/current/static/monitoring-stats.html -- https://www.postgresql.org/docs/9.6/static/monitoring-stats.html#PG-STAT-DATABASE-VIEW -- https://wiki.postgresql.org/wiki/COPY

-- https://www.postgresql.org/docs/current/static/performance-tips.html -- https://www.postgresql.org/docs/current/static/auto-explain.html -- https://www.postgresql.org/docs/current/static/protocol-flow.html

-- https://wiki.postgresql.org/wiki/Tuning_Your_PostgreSQL_Server

-- ## INDEXES:

-- https://www.postgresql.org/docs/current/static/gist-intro.html -- https://www.postgresql.org/docs/current/static/indexes-partial.html -- https://www.postgresql.org/docs/current/static/indexes-bitmap-scans.html -- https://www.postgresql.org/docs/current/static/indexes-ordering.html -- https://www.postgresql.org/docs/current/static/indexes-partial.html

-- ## TODO:

-- http://postgres.cz/wiki/PostgreSQL_SQL_Tricks -- https://doxygen.postgresql.org/snapbuild_8c_source.html

-- https://www.postgresql.org/docs/current/static/queries-with.html -- http://www.postgresqltutorial.com/postgresql-cast/ #cast,

-- http://www.sqlinjectionwiki.com/Categories/4/postgresql-sql-injection-cheat-sheet/

-- http://postgres.cz/wiki/PostgreSQL_SQL_Tricks_II

-- ## SCHEMA DESIGN:

-- ## PostGIS:

-- http://postgis.refractions.net/docs/using_postgis_dbmanagement.html#gist_indexes -- http://revenant.ca/www/postgis/workshop/indexing.html


-- Linux / Related


-- https://access.redhat.com/solutions/29894 # No left space


$(top -b -c -n 1 | egrep -e " R | D " | grep -v grep && uptime )


-- https://wiki.postgresql.org/wiki/Monitoring


SELECT nspname || '.' || relname AS "relation",
       pg_size_pretty(pg_relation_size(C.oid)) AS "size"
  FROM pg_class C
  LEFT JOIN pg_namespace N ON (N.oid = C.relnamespace)
 WHERE nspname NOT IN ('pg_catalog', 'information_schema')
 ORDER BY pg_relation_size(C.oid) DESC
 LIMIT 2;
SELECT n.nspname, c.relname
FROM   pg_catalog.pg_class c, pg_catalog.pg_namespace n, 
       pg_catalog.pg_index i 
WHERE  (i.indisvalid = false OR i.indisready = false) AND
       i.indexrelid = c.oid AND c.relnamespace = n.oid AND
       n.nspname != 'pg_catalog' AND 
       n.nspname != 'information_schema' AND 
       n.nspname != 'pg_toast' ; 

-- https://wiki.postgresql.org/wiki/Lock_Monitoring


-- https://www.postgresql.org/docs/current/static/view-pg-locks.html -- https://www.postgresql.org/docs/current/static/mvcc.html

  SELECT blocked_locks.pid     AS blocked_pid,
         blocked_activity.usename  AS blocked_user,
         blocking_locks.pid     AS blocking_pid,
         blocking_activity.usename AS blocking_user,
         blocked_activity.query    AS blocked_statement,
         blocking_activity.query   AS current_statement_in_blocking_process
   FROM  pg_catalog.pg_locks         blocked_locks
    JOIN pg_catalog.pg_stat_activity blocked_activity  ON blocked_activity.pid = blocked_locks.pid
    JOIN pg_catalog.pg_locks         blocking_locks 
        ON blocking_locks.locktype = blocked_locks.locktype
        AND blocking_locks.DATABASE IS NOT DISTINCT FROM blocked_locks.DATABASE
        AND blocking_locks.relation IS NOT DISTINCT FROM blocked_locks.relation
        AND blocking_locks.page IS NOT DISTINCT FROM blocked_locks.page
        AND blocking_locks.tuple IS NOT DISTINCT FROM blocked_locks.tuple
        AND blocking_locks.virtualxid IS NOT DISTINCT FROM blocked_locks.virtualxid
        AND blocking_locks.transactionid IS NOT DISTINCT FROM blocked_locks.transactionid
        AND blocking_locks.classid IS NOT DISTINCT FROM blocked_locks.classid
        AND blocking_locks.objid IS NOT DISTINCT FROM blocked_locks.objid
        AND blocking_locks.objsubid IS NOT DISTINCT FROM blocked_locks.objsubid
        AND blocking_locks.pid != blocked_locks.pid
    JOIN pg_catalog.pg_stat_activity blocking_activity ON blocking_activity.pid = blocking_locks.pid
   WHERE NOT blocked_locks.GRANTED;

-- PG_CLASS


-- pg_toast

select  c1.oid
       ,c1.*
       ,c2.oid
       ,c2.*
 from pg_class c1 left join pg_class c2 on (c1.reltoastrelid = c2.oid ) 
 where c2.oid is not null; 

-- Checking users/roles


	SELECT r.rolname, r.rolsuper, r.rolinherit,
	  r.rolcreaterole, r.rolcreatedb, r.rolcanlogin,
	  r.rolconnlimit, r.rolvaliduntil,
	  ARRAY(SELECT b.rolname
			FROM pg_catalog.pg_auth_members m
			JOIN pg_catalog.pg_roles b ON (m.roleid = b.oid)
			WHERE m.member = r.oid) as memberof
	, r.rolreplication
	, r.rolbypassrls
	FROM pg_catalog.pg_roles r
	ORDER BY 1;	

-- Inner Links: -- https://gist.github.com/vinnix/3a9dea78f898079488061bea9d01aa3c (The vacuum experience - part1)


=> show rds.logical_replication ; DEBUG: StartTransactionCommand DEBUG: StartTransaction DEBUG: name: unnamed; blockState: DEFAULT; state: INPROGR, xid/subid/cid: 0/1/0, nestlvl: 1, children: DEBUG: ProcessUtility DEBUG: CommitTransactionCommand DEBUG: CommitTransaction DEBUG: name: unnamed; blockState: STARTED; state: INPROGR, xid/subid/cid: 0/1/0, nestlvl: 1, children: rds.logical_replication

0 (1 row)

ALTER TABLE public.a SET (autovacuum_vacuum_scale_factor = 0.05, autovacuum_analyze_scale_factor = 0.02);
=>\x
=> select * from pg_class where relname = 'a'; 
-[ RECORD 1 ]-------+---------------------------------------------------------------------------
relname             | a
relnamespace        | 16651
reltype             | 24580
reloftype           | 0
relowner            | 16386
relam               | 0
relfilenode         | 24578
reltablespace       | 0
relpages            | 0
reltuples           | 0
relallvisible       | 0
reltoastrelid       | 24581
relhasindex         | f
relisshared         | f
relpersistence      | p
relkind             | r
relnatts            | 3
relchecks           | 0
relhasoids          | f
relhaspkey          | f
relhasrules         | f
relhastriggers      | f
relhassubclass      | f
relrowsecurity      | f
relforcerowsecurity | f
relispopulated      | t
relreplident        | d
relfrozenxid        | 15390
relminmxid          | 1
relacl              | 
reloptions          | {autovacuum_vacuum_scale_factor=0.05,autovacuum_analyze_scale_factor=0.02}

-- ----------------------------------------------------------------------------------------------------------------
--
-- ----------------------------------------------------------------------------------------------------------------

```sql
select relname, reloptions from pg_class where 'autovacuum_enabled=false' = ANY (reloptions);

-- Tracking tables and materialized views age


    SELECT s.nspname
          ,c.oid::regclass as table_name
          ,age(c.relfrozenxid) as table_age
          ,age(t.relfrozenxid) as toast_age
FROM pg_namespace s
 INNER JOIN pg_class c on (c.relnamespace = s.oid)
LEFT JOIN pg_class t ON (c.reltoastrelid = t.oid)
WHERE c.relkind IN ('r', 'm')
 order by table_age desc, toast_age desc;

ALTER TABLE novell.workload_cost SET (autovacuum_vacuum_scale_factor = 0.0);
ALTER TABLE novell.workload_cost SET (autovacuum_vacuum_threshold = 1000);
ALTER TABLE novell.workload_cost SET (autovacuum_analyze_scale_factor = 0.0);
ALTER TABLE novell.workload_cost SET (autovacuum_analyze_threshold = 1000);

               SELECT pgs.attname, pgs.n_distinct, pgs.*
		       array_to_string(pgs.most_common_vals, E'\n') as most_common_vals
		FROM pg_stats pgs
		WHERE pgs.tablename IN ('workload_cost') ;

		SELECT * 
		  FROM pg_class
		 WHERE relname IN ('workload_cost') ;

		SELECT * 
		  FROM pg_stat_user_tables 
		  WHERE relname IN ('workload_cost');

		 SELECT *
		   FROM pg_stat_user_indexes
		  WHERE relname IN ('workload_cost') ;

select ac.procpid, ac.current_query from (
select  sa.datid
           ,sa.datname
           ,sa.procpid
           ,sa.usesysid
           ,sa.usename
           ,substring(sa.current_query from 1 for 50) current_query_
           ,sa.waiting
       ,age(sa.xact_start) age_xact_start
       ,age(sa.query_start) age_query_start
       ,age(sa.backend_start) age_backend_start
           ,sa.client_addr
           ,sa.client_port      
  from  pg_stat_activity sa
 where sa.client_addr =  sa.client_addr -- '10.250.82.33'
 order by  sa.backend_start
                  ,sa.xact_start desc
                  ,sa.query_start desc
) as stats 
, pg_stat_activity ac
where stats.procpid = ac.procpid;

--


select
    sum(pg_column_size(the_text_column)) as total_size,
    avg(pg_column_size(the_text_column)) as average_size,
    sum(pg_column_size(the_text_column)) * 100.0 / pg_relation_size('t') as percentage
from t;

--


-- show running queries (pre 9.2)

SELECT procpid, age(query_start, clock_timestamp()), usename, current_query 
FROM pg_stat_activity 
WHERE current_query != '<IDLE>' AND current_query NOT ILIKE '%pg_stat_activity%' 
ORDER BY query_start desc;

-- show running queries (9.2)
```sql
SELECT pid, age(query_start, clock_timestamp()), usename, query 
FROM pg_stat_activity 
WHERE query != '<IDLE>' AND query NOT ILIKE '%pg_stat_activity%' 
ORDER BY query_start desc;

-- kill running query

SELECT pg_cancel_backend(procpid);

-- kill idle query

SELECT pg_terminate_backend(procpid);

-- vacuum command

VACUUM (VERBOSE, ANALYZE);

-- all database users

select * from pg_user;

-- all databases and their sizes -- all tables and their size, with/without indexes

select datname, pg_size_pretty(pg_database_size(datname))
from pg_database
order by pg_database_size(datname) desc;

-- cache hit rates (should not be less than 0.99)

SELECT sum(heap_blks_read) as heap_read, sum(heap_blks_hit)  as heap_hit, (sum(heap_blks_hit) - sum(heap_blks_read)) / sum(heap_blks_hit) as ratio
FROM pg_statio_user_tables;

-- table index usage rates (should not be less than 0.99)

SELECT relname, 100 * idx_scan / (seq_scan + idx_scan) percent_of_times_index_used, n_live_tup rows_in_table
FROM pg_stat_user_tables 
ORDER BY n_live_tup DESC;

-- how many indexes are in cache

SELECT sum(idx_blks_read) as idx_read, sum(idx_blks_hit)  as idx_hit, (sum(idx_blks_hit) - sum(idx_blks_read)) / sum(idx_blks_hit) as ratio
FROM pg_statio_user_indexes;

-- Dump database on remote host to file

$ pg_dump -U username -h hostname databasename > dump.sql

-- Import dump into existing database

$ psql -d newdb -f dump.sql

--


select                         
      sat.relname
      ,to_char(sat.last_autovacuum, 'YYYY-MM-DD HH24:MI:SS') last_autovacuum
      ,to_char(sat.last_vacuum, 'YYYY-MM-DD HH24:MI:SS') last_vacuum
     -- ,to_char(sat.last_autoanalyze,'YYYY-MM-DD HH24:MI:SS') last_utoanalyze
     -- ,to_char(sat.last_analyze,'YYYY-MM-DD HH24:MI:SS') last_analyze
      ,sat.seq_scan
      ,sat.idx_scan
      ,sat.n_dead_tup
      ,sat.n_live_tup  
  from pg_stat_all_tables sat
 where sat.n_dead_tup != 0
order by sat.n_dead_tup;

--


SELECT psut.relname,
     to_char(psut.last_vacuum, 'YYYY-MM-DD HH24:MI') as last_vacuum,
     to_char(psut.last_autovacuum, 'YYYY-MM-DD HH24:MI') as last_autovacuum,
     to_char(pg_class.reltuples, '9G999G999G999') AS n_tup,
     to_char(psut.n_dead_tup, '9G999G999G999') AS dead_tup,
     to_char(CAST(current_setting('autovacuum_vacuum_threshold') AS bigint)
         + (CAST(current_setting('autovacuum_vacuum_scale_factor') AS numeric)
            * pg_class.reltuples), '9G999G999G999') AS av_threshold,
     CASE
         WHEN CAST(current_setting('autovacuum_vacuum_threshold') AS bigint)
             + (CAST(current_setting('autovacuum_vacuum_scale_factor') AS numeric)
                * pg_class.reltuples) < psut.n_dead_tup
         THEN '*'
         ELSE ''
     END AS expect_av
 FROM pg_stat_user_tables psut
     JOIN pg_class on psut.relid = pg_class.oid
 ORDER BY 1;

-- https://dba.stackexchange.com/questions/21068/aggressive-autovacuum-on-postgresql/35234#35234


cannot be dropped because some objects depend on it

select * from pg_database where datdba in (select usesysid from pg_user where usename = 'your_user_name') ;
select * from pg_class where relowner in (select usesysid from pg_user where usename = 'your_user_name'); 

select   *  
  from   pg_shdepend ; 

https://www.postgresql.org/docs/9.0/static/catalog-pg-shdepend.html

https://www.postgresql.org/docs/9.6/static/functions-admin.html#FUNCTIONS-REPLICATION


-- Check toast files of some table


select t1.oid
      ,t1.relname
      ,t1.relkind
      ,t2.relname
      ,t2.relkind
      ,t2.relpages
      ,t2.reltuples
from  pg_class t1
inner join pg_class t2
on t1.reltoastrelid = t2.oid
where t1.relkind = 'r'
  and t2.relkind = 't';

-- Disk Usage


https://wiki.postgresql.org/wiki/Disk_Usage https://stackoverflow.com/questions/13304572/how-can-pg-column-size-be-smaller-than-octet-length


-- Repack


https://www.depesz.com/2013/06/21/bloat-removal-by-tuples-moving/ https://github.com/grayhemp/pgtoolkit https://pgxn.org/dist/pg_repack/1.1.8/doc/pg_repack.html http://reorg.github.io/pg_repack/ https://postgrespro.com/docs/postgresproee/9.6/app-pgrepack.html http://travellingsysop.net/postgresql/2016/03/bloat-origins-monitoring-and-managing.html

http://www.rummandba.com/2013/12/postgresql-online-space-reclaim-using_3.html

https://www.depesz.com/2013/06/21/bloat-removal-by-tuples-moving/

https://github.com/reorg/pg_repack/blob/master/regress/expected/repack.out https://bucardo.org/check_postgres/check_postgres.pl.html


-- Object size:


Check size of tables and objects in PostgreSQL database Skip to end of metadata

Created by Rinn Bernd (ID SIS), last modified on Oct 11, 2011

Go to start of metadata

To get an overview about how much space is taken by what database, call:

SELECT
    pg_database.datname,
    pg_size_pretty(pg_database_size(pg_database.datname)) AS size
    FROM pg_database;

To get more details, call:
SELECT
   relname as "Table",
   pg_size_pretty(pg_total_relation_size(relid)) As "Size",
   pg_size_pretty(pg_total_relation_size(relid) - pg_relation_size(relid)) as "External Size"
   FROM pg_catalog.pg_statio_user_tables ORDER BY pg_total_relation_size(relid) DESC;

It will show 3 columns:

Table – The name of the table
Size – The total size that this table takes
External Size – The size that related objects of this table like indices take

If you want to dig in deeper and see the size of all objects (at least of 64kB):

SELECT
   relname AS objectname,
   relkind AS objecttype,
   reltuples AS "#entries", pg_size_pretty(relpages::bigint*8*1024) AS size
   FROM pg_class
   WHERE relpages >= 8
   ORDER BY relpages DESC;

This will show 4 columns:

objectname – The name of the object
objecttype – r for the table, i for an index, t for toast data, ...
#entries – The number of entries in the object (e.g. rows)
size – The size of the object

What are 'toast data'? Icon

From the documentation: PostgreSQL uses a fixed page size (commonly 8 kB), and does not allow tuples to span multiple pages. Therefore, it is not possible to store very large field values directly. To overcome this limitation, large field values are compressed and/or broken up into multiple physical rows. This happens transparently to the user, with only small impact on most of the backend code. The technique is affectionately known as TOAST (or "the best thing since sliced bread"). [...]

https://wiki-bsse.ethz.ch/display/ITDOC/Check+size+of+tables+and+objects+in+PostgreSQL+database


** Find commmonly accessed tables and their use of indexes:

SELECT relname,seq_tup_read,idx_tup_fetch,cast(idx_tup_fetch AS numeric) / (idx_tup_fetch + seq_tup_read) AS idx_tup_pct FROM pg_stat_user_tables WHERE (idx_tup_fetch + seq_tup_read)>0 ORDER BY idx_tup_pct;

Returns output like:

   relname        | seq_tup_read | idx_tup_fetch |      idx_tup_pct       

----------------------+--------------+---------------+------------------------ schema_migrations | 817 | 0 | 0.00000000000000000000 user_device_photos | 349 | 0 | 0.00000000000000000000 albums | 530701 | 379 | 0.00071364012954733750 facebook_oauths | 15250 | 36 | 0.00235509616642679576

Analysis: For each row, because "idx_tup_pct" is low than it means that essentially no indexes are being used. In the case of "facebook_oauths" it turns out we are commonly running a query like "SELECT * FROM facebook_oauths WHERE fb_user_id = X" and it turns out there isnt an index on "fb_user_id"

===============================================================================

** Find the INSERT/UPDATE/DELETE statistics for tables:

SELECT relname,cast(n_tup_ins AS numeric) / (n_tup_ins + n_tup_upd + n_tup_del) AS ins_pct,cast(n_tup_upd AS numeric) / (n_tup_ins + n_tup_upd + n_tup_del) AS upd_pct, cast(n_tup_del AS numeric) / (n_tup_ins
+ n_tup_upd + n_tup_del) AS del_pct 
FROM pg_stat_user_tables 
WHERE (n_tup_ins + n_tup_upd + n_tup_del) > 0
ORDER BY relname;

================================================================================

** Table I/O

SELECT relname,cast(heap_blks_hit as numeric) / (heap_blks_hit + heap_blks_read) AS hit_pct,
heap_blks_hit,heap_blks_read 
FROM pg_statio_user_tables WHERE (heap_blks_hit + heap_blks_read)>0 ORDER BY hit_pct;

'heap_blks_hit' = the number of blocks that were satisfied from the page cache 'heap_blks_read' = the number of blocks that had to hit disk/IO layer for reads

When 'heap_blks_hit' is significantly greater than 'heap_blks_read' than it means we have a well-cached DB and most of the queries can be satisfied from the cache

================================================================================

** Table & Index sizes

SELECT
    t.tablename,
    indexname,
    c.reltuples::integer AS num_rows,
    pg_size_pretty(pg_relation_size(quote_ident(t.tablename)::text)) AS table_size,
    pg_size_pretty(pg_relation_size(quote_ident(indexrelname)::text)) AS index_size,
    CASE WHEN x.is_unique = 1  THEN 'Y'
       ELSE 'N'
    END AS UNIQUE,
    idx_scan AS number_of_scans,
    idx_tup_read AS tuples_read,
    idx_tup_fetch AS tuples_fetched
FROM pg_tables t
LEFT OUTER JOIN pg_class c ON t.tablename=c.relname
LEFT OUTER JOIN
       (SELECT indrelid,
           max(CAST(indisunique AS integer)) AS is_unique
       FROM pg_index
       GROUP BY indrelid) x
       ON c.oid = x.indrelid
LEFT OUTER JOIN
    ( SELECT c.relname AS ctablename, ipg.relname AS indexname, x.indnatts AS number_of_columns, idx_scan, idx_tup_read, idx_tup_fetch,indexrelname FROM pg_index x
           JOIN pg_class c ON c.oid = x.indrelid
           JOIN pg_class ipg ON ipg.oid = x.indexrelid
           JOIN pg_stat_all_indexes psai ON x.indexrelid = psai.indexrelid )
    AS foo
    ON t.tablename = foo.ctablename
WHERE t.schemaname='public'
ORDER BY pg_relation_size(quote_ident(indexrelname)::text) desc;

================================================================================

** Index Health

SELECT indexrelname,cast(idx_tup_read AS numeric) / idx_scan AS avg_tuples,idx_scan,idx_tup_read FROM pg_stat_user_indexes WHERE idx_scan > 0;

** Index Size

SELECT
  schemaname,
  relname,
  indexrelname,
  idx_scan,
 pg_size_pretty(pg_relation_size(i.indexrelid)) AS index_size
FROM
  pg_stat_user_indexes i
  JOIN pg_index USING (indexrelid)
WHERE
  indisunique IS false
ORDER BY idx_scan,relname;

** Index I/O - Same idea as Table I/O above

SELECT indexrelname,cast(idx_blks_hit as numeric) / (idx_blks_hit + idx_blks_read) AS hit_pct,
idx_blks_hit,idx_blks_read FROM pg_statio_user_indexes WHERE 
(idx_blks_hit + idx_blks_read)>0 ORDER BY hit_pct;

** Show sizes & usage of indexes that are not used very often: NOTE: we define 'usage' by # of times used, in this case we use '200' - change accordingly

SELECT idstat.relname AS table_name, indexrelname AS index_name, idstat.idx_scan AS times_used,
pg_size_pretty(pg_relation_size(tabstat.relid)) AS table_size, pg_size_pretty(pg_relation_size(indexrelid)) AS index_size,
n_tup_upd + n_tup_ins + n_tup_del as num_writes, indexdef AS definition
FROM pg_stat_user_indexes AS idstat JOIN pg_indexes ON indexrelname = indexname
JOIN pg_stat_user_tables AS tabstat ON idstat.relname = tabstat.relname
WHERE idstat.idx_scan < 200 AND indexdef !~* 'unique'
ORDER BY idstat.relname, indexrelname;

-- Dealing with raster / postgis imports / shp2pgsql / gdalinfo / raster2pgsql


https://gist.github.com/vinnix/fe0b7dd13358f24c45cfd6b0d017e353


SELECT rolname, rolconnlimit FROM pg_roles WHERE rolconnlimit <> -1;
SELECT datname, datconnlimit FROM pg_database WHERE datconnlimit <> -1; 

https://www.postgresql.org/docs/current/catalog-pg-default-acl.html https://docs.timescale.com/latest/introduction


--


  • What columns reference pg_largeobjcts
SELECT s.nspname, c.relname, a.attname
FROM pg_class c, pg_attribute a, pg_namespace s, pg_type t
WHERE a.attnum > 0 AND NOT a.attisdropped
 AND a.attrelid = c.oid
 AND a.atttypid = t.oid
 AND c.relnamespace = s.oid
 AND t.typname in ('oid', 'lo')
 AND c.relkind in ('r','m')
 AND s.nspname !~ '^pg_' ; 
Sign up for free to join this conversation on GitHub. Already have an account? Sign in to comment