Skip to content

Instantly share code, notes, and snippets.

@stompro
Last active July 15, 2019 03:53
Show Gist options
  • Star 0 You must be signed in to star a gist
  • Fork 0 You must be signed in to fork a gist
  • Save stompro/b8b92a67772bf13750722e3bf40ac329 to your computer and use it in GitHub Desktop.
Save stompro/b8b92a67772bf13750722e3bf40ac329 to your computer and use it in GitHub Desktop.
Evergreen ILS Index and Table Bloat Notes
--------------------------------------
-- acq.acq_lineitem_history
-- Create index to cluster on
cluster verbose acq.acq_lineitem_history using acq_lineitem_history_pkey;
--Remove cluster marking
ALTER TABLE acq.acq_lineitem_history SET WITHOUT CLUSTER;
--Vacuum Analyze
vacuum verbose analyze acq.acq_lineitem_history;
-- Action Schema Reduce table and index bloat
-- action.hold_notification
-- Cluster action.hold_notification on primary key.
cluster verbose action.hold_notification using hold_notification_pkey;
--Remove cluster marking
ALTER TABLE action.hold_notification SET WITHOUT CLUSTER;
--Vacuum Analyze
vacuum verbose analyze action.hold_notification;
-----------------------
-- action.archive_actor_stat_cat
-- Cluster action.archive_actor_stat_cat on primary key.
cluster verbose action.archive_actor_stat_cat using archive_actor_stat_cat_pkey;
--Remove cluster marking
ALTER TABLE action.archive_actor_stat_cat SET WITHOUT CLUSTER;
--Vacuum Analyze
vacuum verbose analyze action.archive_actor_stat_cat;
------------------------------
-- action.archive_asset_stat_cat
cluster verbose action.archive_asset_stat_cat using archive_asset_stat_cat_pkey;
--Remove cluster marking
ALTER TABLE action.archive_asset_stat_cat SET WITHOUT CLUSTER;
--Vacuum Analyze
vacuum verbose analyze action.archive_asset_stat_cat;
--------------------
-- action.transit_copy
cluster verbose action.transit_copy using active_transit_cp_idx;
--Remove cluster marking
ALTER TABLE action.transit_copy SET WITHOUT CLUSTER;
--Vacuum Analyze
vacuum verbose analyze action.transit_copy;
--------------------
-- action.hold_transit_copy
cluster verbose action.hold_transit_copy using active_hold_transit_cp_idx;
--Remove cluster marking
ALTER TABLE action.hold_transit_copy SET WITHOUT CLUSTER;
--Vacuum Analyze
vacuum verbose analyze action.hold_transit_copy;
--------------------
-- action.usr_circ_history
-- Create an index for clustering
CREATE INDEX action_usr_circ_history_cluster_idx ON action.usr_circ_history USING btree (usr,id);
-- Cluster
cluster verbose action.usr_circ_history using action_usr_circ_history_cluster_idx;
--Drop cluster index
drop index action.action_usr_circ_history_cluster_idx;
--Remove cluster marking
ALTER TABLE action.usr_circ_history SET WITHOUT CLUSTER;
--Vacuum Analyze
vacuum verbose analyze action.usr_circ_history;
-----------------------------
-- action.hold_copy_map
cluster verbose action.hold_copy_map using copy_once_per_hold;
--Remove cluster marking
ALTER TABLE action.hold_copy_map SET WITHOUT CLUSTER;
--Vacuum Analyze
vacuum verbose analyze action.hold_copy_map;
-- Alter autovacuum for action.hold_copy_map so it only takes 500 deletes to trigger
ALTER TABLE action.hold_copy_map SET (autovacuum_vacuum_scale_factor = 0.0);
ALTER TABLE action.hold_copy_map SET (autovacuum_vacuum_threshold = 500);
----------------------------------------
-- Action.circulation bloat reduction
-- Set fill factor to 100% for cluster, we want to pack old rows in, they probably won't be updated that much.
alter table action.circulation SET ( fillfactor=100);
-- Cluster action.circulation on primary key
cluster verbose action.circulation using circulation_pkey;
-- Alter fill factor for future pages, to reduce fragmentation because of updates.
alter table action.circulation SET ( fillfactor=70);
--Remove cluster marking
ALTER TABLE action.circulation SET WITHOUT CLUSTER;
--Vacuum Analyze
vacuum verbose analyze action.circulation;
------
--- Autovacuum settings for action.circulation - Try to trigger vacuum twice a day, to allow for hot updates for checkins/renewals
-- Aprox 8000 circs a day, so set to 4000
-- Alter autovacuum for Action.circulation so it only takes 4000 updates to trigger
ALTER TABLE action.circulation SET (autovacuum_vacuum_scale_factor = 0.0);
ALTER TABLE action.circulation SET (autovacuum_vacuum_threshold = 4000);
---------------------------
--- action.hold_request
-- Set fill factor to 100% for cluster, we want to pack old rows in, they probably won't be updated as much.
alter table action.hold_request SET ( fillfactor=100);
-- Create index to cluster on, place fulfilled/canceled holds at the beginning, then order by hold id.
create index hold_request_cluster_idx on action.hold_request using btree((fulfillment_time IS NULL
and cancel_time IS NULL)
, (case WHEN (fulfillment_time IS not NULL OR cancel_time IS NOT null ) then usr end)
, (id));
-- Cluster on custom index
cluster verbose action.hold_request using hold_request_cluster_idx;
--Remove cluster marking
ALTER TABLE action.hold_request SET WITHOUT CLUSTER;
-- Drop cluster index
drop index action.hold_request_cluster_idx;
-- Alter fill factor for future pages, to reduce fragmentation because of updates.
alter table action.hold_request SET ( fillfactor=80 );
--Vacuum Analyze
vacuum verbose analyze action.hold_request;
-- Autovacuum - set to 4000 updates before trigger, should run about twice a day.
-- Alter autovacuum for action.hold_request so it only takes 4000 updates to trigger
ALTER TABLE action.hold_request SET (autovacuum_vacuum_scale_factor = 0.0);
ALTER TABLE action.hold_request SET (autovacuum_vacuum_threshold = 4000);
-- Action_Trigger Schema Bloat Cleanup
-- cluster action_trigger.event_output
cluster verbose action_trigger.event_output using event_output_pkey;
--Remove cluster marking
ALTER TABLE action_trigger.event_output SET WITHOUT CLUSTER;
--Vacuum Analyze
vacuum verbose analyze action_trigger.event_output;
---------------------------
-- action_trigger.event
-- Mess with action_trigger.event
-- Set fill factor to 100% for cluster, we want to pack old rows in, they probably won't be updated that much.
alter table action_trigger.event SET ( fillfactor=100);
-- Cluster action_trigger.event on primary key
cluster verbose action_trigger.event using event_pkey;
-- Alter fill factor for future pages, to reduce fragmentation because of updates.
alter table action_trigger.event SET ( fillfactor=70);
vacuum verbose analyze action_trigger.event;
-- Alter autovacuum for action_trigger.event so it only takes 250 updates to trigger
ALTER TABLE action_trigger.event SET (autovacuum_vacuum_scale_factor = 0.0);
ALTER TABLE action_trigger.event SET (autovacuum_vacuum_threshold = 250);
------------------------------
-- actor.usr_message
-- Cluster actor.usr_message on primary key.
cluster verbose actor.usr_message using usr_message_pkey;
--Remove cluster marking
ALTER TABLE actor.usr_message SET WITHOUT CLUSTER;
--Vacuum Analyze
vacuum verbose analyze actor.usr_message;
-------
-- asset.copy
-- Create an index to cluster on that places deleted copies first by id, then non deleted copies
-- Cluster on custom index
-- Drop custom index
-- Alter table to remove cluster mark
-- Vacuum analyze
-- Set fill factor to 100% for cluster, we want to pack old rows in, they probably won't be updated as much.
alter table asset.copy SET ( fillfactor=100);
-- Create an index to cluster on that places deleted copies first by id, then non deleted copies
-- Pack all deleted copies at the beginning since they will be accessed less often.
CREATE INDEX cp_cluster ON asset.copy USING btree ((not deleted),id);
-- Cluster on custom index
cluster verbose asset.copy using cp_cluster;
-- Drop custom index
drop index asset.cp_cluster;
-- Alter table to remove cluster mark
ALTER TABLE asset.copy SET WITHOUT CLUSTER;
-- Set fill factor to 80% after cluster, so new pages have room for updates to stay in the same page.
alter table asset.copy SET ( fillfactor=80);
-- Vacuum analyze
vacuum verbose analyze asset.copy;
-- Autovacuum - set to 4000 updates before trigger, should run about twice a day.
-- Alter autovacuum for action.hold_request so it only takes 4000 updates to trigger
ALTER TABLE asset.copy SET (autovacuum_vacuum_scale_factor = 0.0);
ALTER TABLE asset.copy SET (autovacuum_vacuum_threshold = 9000);
-------------------------------------------------
-- asset.call_number
-- Create an index to cluster on that places deleted copies first, then non deleted copies
-- Then sort by record id to place all call numbers for one record in the same pages, then by id.
-- Pack all deleted call_numbers at the beginning since they will be accessed less often.
CREATE INDEX cp_cn_cluster ON asset.call_number USING btree ((not deleted),record,id);
-- Cluster on custom index
cluster verbose asset.call_number using cp_cn_cluster;
-- Drop custom index
drop index asset.cp_cn_cluster;
-- Alter table to remove cluster mark
ALTER TABLE asset.call_number SET WITHOUT CLUSTER;
-- Vacuum analyze
vacuum verbose analyze asset.call_number;
-------------------------------------------------
-- cluster asset.opac_visible_copies
-- Fillfactor shouldn't matter because there are only inserts and deletes.
cluster verbose asset.opac_visible_copies using opac_visible_copies_idx1;
--Remove cluster marking
ALTER TABLE asset.opac_visible_copies SET WITHOUT CLUSTER;
--Vacuum Analyze
vacuum verbose analyze asset.opac_visible_copies;
-- Alter autovacuum for asset.opac_visible_copies so it only takes 2000 deletes to trigger
ALTER TABLE asset.opac_visible_copies SET (autovacuum_vacuum_scale_factor = 0.0);
ALTER TABLE asset.opac_visible_copies SET (autovacuum_vacuum_threshold = 2000);
-- Auditor Bloat and cluster
--------------------------------
-- cluster asset_copy_history
-- Create index to cluster on
create index aud_asset_cp_cluster on auditor.asset_copy_history using btree(id,audit_id);
cluster verbose auditor.asset_copy_history using aud_asset_cp_cluster;
--remove cluster index
drop index auditor.aud_asset_cp_cluster;
--Remove cluster marking
ALTER TABLE auditor.asset_copy_history SET WITHOUT CLUSTER;
--Vacuum Analyze
vacuum verbose analyze auditor.asset_copy_history;
--------------------------------------
--auditor.acq_invoice_history
-- Create index to cluster on
create index aud_asset_aud_inv_cluster on auditor.acq_invoice_history using btree(id,audit_id);
cluster verbose auditor.acq_invoice_history using aud_asset_aud_inv_cluster;
--remove cluster index
drop index auditor.aud_asset_aud_inv_cluster;
--Remove cluster marking
ALTER TABLE auditor.acq_invoice_history SET WITHOUT CLUSTER;
--Vacuum Analyze
vacuum verbose analyze auditor.acq_invoice_history;
-- authority bloat cleanup
-- authority.full_rec
-- authority.full_rec
-- Cluster authority.full_rec on record index, so all entries for a specific record are always together.
cluster verbose authority.full_rec using authority_full_rec_record_idx;
--Remove cluster marking
ALTER TABLE authority.full_rec SET WITHOUT CLUSTER;
--Vacuum Analyze
vacuum verbose analyze authority.full_rec;
-- authority.simple_heading
-- authority.simple_heading
-- Cluster authority.simple_heading on record index, so all entries for a specific record are always together.
cluster verbose authority.simple_heading using authority_simple_heading_record_idx;
--Remove cluster marking
ALTER TABLE authority.simple_heading SET WITHOUT CLUSTER;
--Vacuum Analyze
vacuum verbose analyze authority.simple_heading;
-- authority.record_entry
-- Cluster authority.record_entry on record index, so all entries for a specific record are always together.
cluster verbose authority.record_entry using record_entry_pkey;
--Remove cluster marking
ALTER TABLE authority.record_entry SET WITHOUT CLUSTER;
--Vacuum Analyze
vacuum verbose analyze authority.record_entry;
-- authority.bib_linking
-- Cluster authority.bib_linking on bib index, so all entries for a specific record are always together.
cluster verbose authority.bib_linking using authority_bl_bib_idx;
--Remove cluster marking
ALTER TABLE authority.bib_linking SET WITHOUT CLUSTER;
--Vacuum Analyze
vacuum verbose analyze authority.bib_linking;
-- Biblio Schema Bloat Cleanup
-------------------------
-- biblio.record_entry
-- create index to cluster bre, deleted, then id
CREATE INDEX bre_cluster ON biblio.record_entry USING btree ((not deleted),id);
-- Cluster on bre_cluster index, to pack deleted bibs at the beginning, then non deleted bibs by id.
cluster verbose biblio.record_entry using bre_cluster;
-- drop index
drop index biblio.bre_cluster;
--alter table to remove cluster
ALTER TABLE biblio.record_entry SET WITHOUT CLUSTER;
--Vacuum Analyze
vacuum verbose analyze biblio.record_entry;
-- Container schema bloat reduction
-- container.copy_bucket_item
-- Create index to cluster on
create index copy_bucket_item_bucket_id_idx on container.copy_bucket_item using btree(bucket,id);
-- Cluster container.copy_bucket_item on primary key.
cluster verbose container.copy_bucket_item using copy_bucket_item_bucket_id_idx;
--remove cluster index
drop index container.copy_bucket_item_bucket_id_idx;
--Remove cluster marking
ALTER TABLE container.copy_bucket_item SET WITHOUT CLUSTER;
--Vacuum Analyze
vacuum verbose analyze container.copy_bucket_item;
-- new table bloat query
-- still needs work; is often off by +/- 20%
WITH constants AS (
-- define some constants for sizes of things
-- for reference down the query and easy maintenance
SELECT current_setting('block_size')::numeric AS bs, 23 AS hdr, 8 AS ma
),
no_stats AS (
-- screen out table who have attributes
-- which dont have stats, such as JSON
SELECT table_schema, table_name,
n_live_tup::numeric as est_rows,
pg_table_size(relid)::numeric as table_size
FROM information_schema.columns
JOIN pg_stat_user_tables as psut
ON table_schema = psut.schemaname
AND table_name = psut.relname
LEFT OUTER JOIN pg_stats
ON table_schema = pg_stats.schemaname
AND table_name = pg_stats.tablename
AND column_name = attname
WHERE attname IS NULL
AND table_schema NOT IN ('pg_catalog', 'information_schema')
GROUP BY table_schema, table_name, relid, n_live_tup
),
null_headers AS (
-- calculate null header sizes
-- omitting tables which dont have complete stats
-- and attributes which aren't visible
SELECT
hdr+1+(sum(case when null_frac <> 0 THEN 1 else 0 END)/8) as nullhdr,
SUM((1-null_frac)*avg_width) as datawidth,
MAX(null_frac) as maxfracsum,
schemaname,
tablename,
hdr, ma, bs
FROM pg_stats CROSS JOIN constants
LEFT OUTER JOIN no_stats
ON schemaname = no_stats.table_schema
AND tablename = no_stats.table_name
WHERE schemaname NOT IN ('pg_catalog', 'information_schema')
AND no_stats.table_name IS NULL
AND EXISTS ( SELECT 1
FROM information_schema.columns
WHERE schemaname = columns.table_schema
AND tablename = columns.table_name )
GROUP BY schemaname, tablename, hdr, ma, bs
),
data_headers AS (
-- estimate header and row size
SELECT
ma, bs, hdr, schemaname, tablename,
(datawidth+(hdr+ma-(case when hdr%ma=0 THEN ma ELSE hdr%ma END)))::numeric AS datahdr,
(maxfracsum*(nullhdr+ma-(case when nullhdr%ma=0 THEN ma ELSE nullhdr%ma END))) AS nullhdr2
FROM null_headers
),
table_estimates AS (
-- make estimates of how large the table should be
-- based on row and page size
SELECT schemaname, tablename, bs,
reltuples::numeric as est_rows, relpages * bs as table_bytes,
CEIL((reltuples*
(datahdr + nullhdr2 + 4 + ma -
(CASE WHEN datahdr%ma=0
THEN ma ELSE datahdr%ma END)
)/(bs-20))) * bs AS expected_bytes,
reltoastrelid
FROM data_headers
JOIN pg_class ON tablename = relname
JOIN pg_namespace ON relnamespace = pg_namespace.oid
AND schemaname = nspname
WHERE pg_class.relkind = 'r'
),
estimates_with_toast AS (
-- add in estimated TOAST table sizes
-- estimate based on 4 toast tuples per page because we dont have
-- anything better. also append the no_data tables
SELECT schemaname, tablename,
TRUE as can_estimate,
est_rows,
table_bytes + ( coalesce(toast.relpages, 0) * bs ) as table_bytes,
expected_bytes + ( ceil( coalesce(toast.reltuples, 0) / 4 ) * bs ) as expected_bytes
FROM table_estimates LEFT OUTER JOIN pg_class as toast
ON table_estimates.reltoastrelid = toast.oid
AND toast.relkind = 't'
),
table_estimates_plus AS (
-- add some extra metadata to the table data
-- and calculations to be reused
-- including whether we cant estimate it
-- or whether we think it might be compressed
SELECT current_database() as databasename,
schemaname, tablename, can_estimate,
est_rows,
CASE WHEN table_bytes > 0
THEN table_bytes::NUMERIC
ELSE NULL::NUMERIC END
AS table_bytes,
CASE WHEN expected_bytes > 0
THEN expected_bytes::NUMERIC
ELSE NULL::NUMERIC END
AS expected_bytes,
CASE WHEN expected_bytes > 0 AND table_bytes > 0
AND expected_bytes <= table_bytes
THEN (table_bytes - expected_bytes)::NUMERIC
ELSE 0::NUMERIC END AS bloat_bytes
FROM estimates_with_toast
UNION ALL
SELECT current_database() as databasename,
table_schema, table_name, FALSE,
est_rows, table_size,
NULL::NUMERIC, NULL::NUMERIC
FROM no_stats
),
bloat_data AS (
-- do final math calculations and formatting
select current_database() as databasename,
schemaname, tablename, can_estimate,
table_bytes, round(table_bytes/(1024^2)::NUMERIC,3) as table_mb,
expected_bytes, round(expected_bytes/(1024^2)::NUMERIC,3) as expected_mb,
round(bloat_bytes*100/table_bytes) as pct_bloat,
round(bloat_bytes/(1024::NUMERIC^2),2) as mb_bloat,
table_bytes, expected_bytes, est_rows
FROM table_estimates_plus
)
-- filter output for bloated tables
SELECT databasename, schemaname, tablename,
can_estimate,
est_rows,
pct_bloat, mb_bloat,
table_mb
FROM bloat_data
-- this where clause defines which tables actually appear
-- in the bloat chart
-- example below filters for tables which are either 50%
-- bloated and more than 20mb in size, or more than 25%
-- bloated and more than 4GB in size
WHERE ( pct_bloat >= 25 AND mb_bloat >= 5 )
OR ( pct_bloat >= 25 AND mb_bloat >= 100 )
ORDER BY pct_bloat DESC;
-- btree index stats query
-- estimates bloat for btree indexes
WITH btree_index_atts AS (
SELECT nspname,
indexclass.relname as index_name,
indexclass.reltuples,
indexclass.relpages,
indrelid, indexrelid,
indexclass.relam,
tableclass.relname as tablename,
regexp_split_to_table(indkey::text, ' ')::smallint AS attnum,
indexrelid as index_oid
FROM pg_index
JOIN pg_class AS indexclass ON pg_index.indexrelid = indexclass.oid
JOIN pg_class AS tableclass ON pg_index.indrelid = tableclass.oid
JOIN pg_namespace ON pg_namespace.oid = indexclass.relnamespace
JOIN pg_am ON indexclass.relam = pg_am.oid
WHERE pg_am.amname = 'btree' and indexclass.relpages > 0
AND nspname NOT IN ('pg_catalog','information_schema')
),
index_item_sizes AS (
SELECT
ind_atts.nspname, ind_atts.index_name,
ind_atts.reltuples, ind_atts.relpages, ind_atts.relam,
indrelid AS table_oid, index_oid,
current_setting('block_size')::numeric AS bs,
8 AS maxalign,
24 AS pagehdr,
CASE WHEN max(coalesce(pg_stats.null_frac,0)) = 0
THEN 2
ELSE 6
END AS index_tuple_hdr,
sum( (1-coalesce(pg_stats.null_frac, 0)) * coalesce(pg_stats.avg_width, 1024) ) AS nulldatawidth
FROM pg_attribute
JOIN btree_index_atts AS ind_atts ON pg_attribute.attrelid = ind_atts.indexrelid AND pg_attribute.attnum = ind_atts.attnum
JOIN pg_stats ON pg_stats.schemaname = ind_atts.nspname
-- stats for regular index columns
AND ( (pg_stats.tablename = ind_atts.tablename AND pg_stats.attname = pg_catalog.pg_get_indexdef(pg_attribute.attrelid, pg_attribute.attnum, TRUE))
-- stats for functional indexes
OR (pg_stats.tablename = ind_atts.index_name AND pg_stats.attname = pg_attribute.attname))
WHERE pg_attribute.attnum > 0
GROUP BY 1, 2, 3, 4, 5, 6, 7, 8, 9
),
index_aligned_est AS (
SELECT maxalign, bs, nspname, index_name, reltuples,
relpages, relam, table_oid, index_oid,
coalesce (
ceil (
reltuples * ( 6
+ maxalign
- CASE
WHEN index_tuple_hdr%maxalign = 0 THEN maxalign
ELSE index_tuple_hdr%maxalign
END
+ nulldatawidth
+ maxalign
- CASE /* Add padding to the data to align on MAXALIGN */
WHEN nulldatawidth::integer%maxalign = 0 THEN maxalign
ELSE nulldatawidth::integer%maxalign
END
)::numeric
/ ( bs - pagehdr::NUMERIC )
+1 )
, 0 )
as expected
FROM index_item_sizes
),
raw_bloat AS (
SELECT current_database() as dbname, nspname, pg_class.relname AS table_name, index_name,
bs*(index_aligned_est.relpages)::bigint AS totalbytes, expected,
CASE
WHEN index_aligned_est.relpages <= expected
THEN 0
ELSE bs*(index_aligned_est.relpages-expected)::bigint
END AS wastedbytes,
CASE
WHEN index_aligned_est.relpages <= expected
THEN 0
ELSE bs*(index_aligned_est.relpages-expected)::bigint * 100 / (bs*(index_aligned_est.relpages)::bigint)
END AS realbloat,
pg_relation_size(index_aligned_est.table_oid) as table_bytes,
stat.idx_scan as index_scans
FROM index_aligned_est
JOIN pg_class ON pg_class.oid=index_aligned_est.table_oid
JOIN pg_stat_user_indexes AS stat ON index_aligned_est.index_oid = stat.indexrelid
),
format_bloat AS (
SELECT dbname as database_name, nspname as schema_name, table_name, index_name,
round(realbloat) as bloat_pct, round(wastedbytes/(1024^2)::NUMERIC) as bloat_mb,
round(totalbytes/(1024^2)::NUMERIC,3) as index_mb,
round(table_bytes/(1024^2)::NUMERIC,3) as table_mb,
index_scans
FROM raw_bloat
)
-- final query outputting the bloated indexes
-- change the where and order by to change
-- what shows up as bloated
SELECT *
FROM format_bloat
WHERE ( bloat_pct > 30 and bloat_mb > 5 )
ORDER BY bloat_mb DESC;
-- metabib_full_rec_record_idx
-- real_full_rec
-- Cluster real_full_rec on record index, so all entries for a specific record are together.
cluster verbose metabib.real_full_rec using metabib_full_rec_record_idx;
--Remove cluster marking
ALTER TABLE metabib.real_full_rec SET WITHOUT CLUSTER;
--Vacuum Analyze
vacuum verbose analyze metabib.real_full_rec;
-- Autovacuum - set to 10000 updates before trigger, should run about twice a day.
-- Alter autovacuum for metabib.real_full_rec so it only takes 10000 updates to trigger
ALTER TABLE metabib.real_full_rec SET (autovacuum_vacuum_scale_factor = 0.0);
ALTER TABLE metabib.real_full_rec SET (autovacuum_vacuum_threshold = 10000);
------------------------------------
-- Metabib schema bloat updates
-- cluster metabib.uncontrolled_record_attr_value
cluster verbose metabib.uncontrolled_record_attr_value using uncontrolled_record_attr_value_pkey;
--Remove cluster marking
ALTER TABLE metabib.uncontrolled_record_attr_value SET WITHOUT CLUSTER;
--Vacuum Analyze
vacuum verbose analyze metabib.uncontrolled_record_attr_value;
----------------------
-- metabib.keyword_field_entry
cluster verbose metabib.keyword_field_entry using metabib_keyword_field_entry_source_idx ;
--Remove cluster marking
ALTER TABLE metabib.keyword_field_entry SET WITHOUT CLUSTER;
--Vacuum Analyze
vacuum verbose analyze metabib.keyword_field_entry;
----------------------
-- metabib.combined_keyword_field_entry
cluster verbose metabib.combined_keyword_field_entry using metabib_combined_keyword_field_entry_fakepk_idx ;
--Remove cluster marking
ALTER TABLE metabib.combined_keyword_field_entry SET WITHOUT CLUSTER;
--Vacuum Analyze
vacuum verbose analyze metabib.combined_keyword_field_entry;
-----------------------
-- metabib.combined_subject_field_entry
cluster verbose metabib.combined_subject_field_entry using metabib_combined_subject_field_entry_fakepk_idx ;
--Remove cluster marking
ALTER TABLE metabib.combined_subject_field_entry SET WITHOUT CLUSTER;
--Vacuum Analyze
vacuum verbose analyze metabib.combined_subject_field_entry;
--------------------------
-- metabib.combined_series_field_entry
cluster verbose metabib.combined_series_field_entry using metabib_combined_series_field_entry_fakepk_idx ;
--Remove cluster marking
ALTER TABLE metabib.combined_series_field_entry SET WITHOUT CLUSTER;
--Vacuum Analyze
vacuum verbose analyze metabib.combined_series_field_entry;
------------------------------
-- metabib.browse_entry_simple_heading_map
-- Cluster metabib.browse_entry_simple_heading_map on primary key.
cluster verbose metabib.browse_entry_simple_heading_map using browse_entry_sh_map_entry_idx;
--Remove cluster marking
ALTER TABLE metabib.browse_entry_simple_heading_map SET WITHOUT CLUSTER;
--Vacuum Analyze
vacuum verbose analyze metabib.browse_entry_simple_heading_map;
----------------------
-- metabib.combined_author_field_entry
-- Cluster metabib.combined_author_field_entry on primary key
cluster verbose metabib.combined_author_field_entry using metabib_combined_author_field_entry_fakepk_idx;
--Remove cluster marking
ALTER TABLE metabib.combined_author_field_entry SET WITHOUT CLUSTER;
--Vacuum Analyze
vacuum verbose analyze metabib.combined_author_field_entry;
---------------------
-- metabib.combined_title_field_entry
-- Cluster metabib.combined_title_field_entry on primary key
cluster verbose metabib.combined_title_field_entry using metabib_combined_title_field_entry_fakepk_idx;
--Remove cluster marking
ALTER TABLE metabib.combined_title_field_entry SET WITHOUT CLUSTER;
--Vacuum Analyze
vacuum verbose analyze metabib.combined_title_field_entry;
-- Money Schema Reduce table and index bloat
-- money.billing
-- Cluster money.billing on record index, so all entries for a specific record are always together.
cluster verbose money.billing using billing_pkey;
--Remove cluster marking
ALTER TABLE money.billing SET WITHOUT CLUSTER;
--Vacuum Analyze
vacuum verbose analyze money.billing;
----------------------
-- money.materialized_billable_xact_summary
-- Autovacuum - set to 4000 updates before trigger, should run about twice a day.
-- Alter autovacuum for money.materialized_billable_xact_summary so it only takes 4000 updates to trigger
ALTER TABLE money.materialized_billable_xact_summary SET (autovacuum_vacuum_scale_factor = 0.0);
ALTER TABLE money.materialized_billable_xact_summary SET (autovacuum_vacuum_threshold = 10000);
-------------
-- money.grocery
cluster verbose money.grocery using grocery_pkey;
--Remove cluster marking
ALTER TABLE money.grocery SET WITHOUT CLUSTER;
--Vacuum Analyze
vacuum verbose analyze money.grocery;
-- Reporter schema bloat reduction
--reporter.materialized_simple_record
-- Cluster reporter.materialized_simple_record on primary key.
cluster verbose reporter.materialized_simple_record using materialized_simple_record_pkey;
--Remove cluster marking
ALTER TABLE reporter.materialized_simple_record SET WITHOUT CLUSTER;
--Vacuum Analyze
vacuum verbose analyze reporter.materialized_simple_record;
-----------------------------
-- extend_reporter.oclc_tracking
-- Cluster extend_reporter.oclc_tracking on primary key.
cluster verbose extend_reporter.oclc_tracking using mvlc_oclc_pkey;
--Remove cluster marking
ALTER TABLE extend_reporter.oclc_tracking SET WITHOUT CLUSTER;
--Vacuum Analyze
vacuum verbose analyze extend_reporter.oclc_tracking;
-- Show tables with dead tuples
select
schemaname,
relname,
n_dead_tup,
n_tup_ins,
n_tup_upd,
n_tup_del,
last_autoanalyze,
last_autovacuum,
autoanalyze_count,
*
from pg_stat_user_tables
--where last_autoanalyze is not null
WHERE n_dead_tup>0
order by n_dead_tup desc;
-- Vandelay schema bloat reduction
-- vandelay.queued_bib_record
-- Cluster reporter.materialized_simple_record on primary key.
cluster verbose vandelay.queued_bib_record using queued_bib_record_pkey;
--Remove cluster marking
ALTER TABLE vandelay.queued_bib_record SET WITHOUT CLUSTER;
--Vacuum Analyze
vacuum verbose analyze vandelay.queued_bib_record;
Sign up for free to join this conversation on GitHub. Already have an account? Sign in to comment