Skip to content

Instantly share code, notes, and snippets.

#!/bin/bash
echo -n "Starting backup... Are you sure ? y/(n) " ; read response
test ${response:-n} != "y" && exit 1
dotfiles=$(dirname $0)/dotfiles.txt
destination=$(dirname $0)/home
# create dotfiles archives or append new files to it
tar -czf $HOME/dotfiles.tar.gz -C $HOME --files-from=$dotfiles
SELECT schemaname, relname, pg_size_pretty(pg_relation_size(relname::regclass)) relsize, last_vacuum, last_autovacuum, last_analyze, last_autoanalyze
FROM pg_stat_user_tables
WHERE greatest(last_autoanalyze,last_analyze) < now()-interval '5d'
AND pg_relation_size(relname::regclass) > 10e6;
@fljdin
fljdin / check_cluster.sh
Last active June 21, 2019 16:07
Perform health check on clusterware and associated databases
#! /usr/bin/env bash
#
# comment : perform health check on clusterware and associated databases
# deploy : chmod +x /usr/local/bin/check_cluster
export version="20170426.0930"
cif () { printf "[INFO]\t%s\n" "$1" ; }
ttl () { printf "\033[1;34m----\t%s\033[0m\n" "$1" ; }
cok () { printf "\033[1;32m[OK]\033[0m\t%s\n" "$1" ; }
@fljdin
fljdin / awr audit over past week.sql
Last active June 11, 2019 13:11
Get dbtime (seconds) from statspack or awr
-- Average DB Time over the past week
select instance_number, avg(dbtime_min) avg_dbtime_min
from (
select a.instance_number, begin_interval_time, end_interval_time,
round((a.value-lag(a.value) over (order by a.instance_number, a.snap_id ))/1000000/60,2) dbtime_min
from dba_hist_sys_time_model a, dba_hist_snapshot b
where a.snap_id = b.snap_id and a.instance_number = b.instance_number
and a.stat_name = 'DB time'
)
SELECT
sum(heap_blks_read) as heap_read,
sum(heap_blks_hit) as heap_hit,
sum(heap_blks_hit) / (sum(heap_blks_hit) + sum(heap_blks_read)) as ratio
FROM
pg_statio_user_tables;
set lines 255
col path for a35
col Diskgroup for a15
col DiskName for a25
col disk# for 999
col total_mb for 999,999,999
col free_mb for 999,999,999
compute sum of total_mb on DiskGroup
compute sum of free_mb on DiskGroup
break on DiskGroup skip 1 on report -
@fljdin
fljdin / 10g_cluster_snap.sql
Last active February 8, 2019 14:17
Install statspack tools and shedules
-- exécuter un snapshot sur les DEUX INSTANCES
EXEC perfstat.statspack.snap;
variable jobno NUMBER;
BEGIN
FOR inst IN (SELECT dbid, inst_id AS id FROM gv$database) LOOP
STATSPACK.MODIFY_STATSPACK_PARAMETER(
i_snap_level => 7,
i_dbid => inst.dbid,
i_instance_number => inst.id
awk '/Hugepagesize:/{p=$2}/ 0 /{next}/ kB$/{v[sprintf("%9d GB %-s",int($2/1024/1024),$0)]=$0;next}{h[$0]=$2}END{for(k in v) print k;for (k in h) print sprintf("%9d GB %-s",p*h[k]/1024/1024,k)}' /proc/meminfo|sort -nr|grep --color=auto -E "^|.*(Huge.*_[TF]|Mem).*:|"
select count(sid) sessions, block, max(round(ctime/60)) duration
from gv$lock where type = 'TX' and lmode > 0 group by block;
@fljdin
fljdin / start_trace.sql
Created September 28, 2018 15:59
Add a new job with sql agent start schedule
-- Create the trace
DECLARE @TraceID int
DECLARE @stoptime_init datetime = NULL
DECLARE @maxfilesize_init bigint = 50
DECLARE @filecount_init int = 20
DECLARE @tracefile_init nvarchar(200) = N'D:\MSSQL\TRACE\Trace_1second'
DECLARE @duration bigint = 1000000 -- microseconds
-- Stop and clear it if running
SELECT @TraceID = traceid FROM ::fn_trace_getinfo(NULL) WHERE property = 2 AND CONVERT(varchar, value) LIKE @tracefile_init + '%';