Skip to content

Instantly share code, notes, and snippets.

@VolMi
Last active December 22, 2015 17:59
Show Gist options
  • Star 1 You must be signed in to star a gist
  • Fork 0 You must be signed in to fork a gist
  • Save VolMi/6509677 to your computer and use it in GitHub Desktop.
Save VolMi/6509677 to your computer and use it in GitHub Desktop.
stream precious random data to all intelligence services in the universe
#!/bin/bash
# Bashisms are embraced here and therefore used quite heavily.
#
# This script is meant to keep some folks a bit busy, who were never invited
# by yourself to share your data.
# The idea is that you use cloud storage to store your encrypted (real) files
# and additionally generate some randomized junk which should be
# indistinguishable from the encrypted data.
# We now do this by encrypting the junk as well.
#
# Currently: "cloud storage" == "Dropbox"
#
# While your cloud provider offers only limited storage, your intelligence
# friends try to keep a copy of each version of each file ever uploaded, hence
# "removing" or "deleting" files has only a symbolic meaning.
#
# What it does
# ============
# We read data from /dev/urandom and store them in your decrypted cloud folder.
# The encrypted folder is syncronized with $YourCloudProvider.
# In order to be less oviously noticed as an automatically created pile of poo,
# we also randomize...
# ... the file size
# ... the file name
# ... the time between successive file operations
# ... the path of the file (up to 9 levels within your base folder)
# We pause the work if we run on battery or if $YourCloudProgram is busy
# or the computer is on a somewhat high load level (>= 10% of maximum).
# If this script is terminated, it tries to clean all the mess it created.
# This seems to work reliably in case of shutdown, kill, Ctrl+C, but sudden
# power losses or similar unpleasant events will lead to remaining
# crap in your folder.
#
# What you have to do
# ===================
# You need to look at the first few lines of the code to define...
# ... which folder to litter
# ... the maximum allowed size of data (hard limit of $YourCloudProvider)
# ... (optional) the range of filename lengths
# ... (optional) the allowed characters to be used for files and folders
#
# Improvements of the current version over first one
# ==================================================
# If $YourCloudProgram is getting busy synchronising real files, we detect this,
# remove our junk from the disk and free all resources for the real workload.
#
# By default, we encrypt the random data now instead of trying to create data
# that look as if they were encrypted.
#
#
# License: Public Domain
# Base folder to junkify
# ======================
# Should be the decrypted sync'ed folder
#
baseDir_user=/home/${USER}/Dropbox # decrypted
baseDir_encr=/home/${USER}/.dropbox_encr/Dropbox # encrypted and sync'ed
if [ ! -d "$baseDir_user" ]; then
echo "The path \"$baseDir_user\" " >&2
echo "is not a valid directory. Change it manually in $0 !" >&2
exit 1
fi
# Dropbox total limit in kB
# =========================
# For N Gibibytes, use $((N*1024*1024).
# We will use at most 10% of available free space, that is
# (szMaxKB - szCurrKB) / 10
#
szMaxKB=$((15*1024*1024)) # read: "size max in kB"
# Min/Max length of names of files and folders
# ============================================
# For me, it seems that in my EncFS folder, names are at
# least 24 characters long. To see and adapt it for yourself, try:
# ls -1 path/to/dropbox | awk '{print length, $0;}' | sort -nr
#
minLenFilename=24
maxLenFilename=80
# Allowed characters for files and folders
# ========================================
# (from observation in an EncFS encrypted folder)
# You can specify it manually, like so
#
# allowedChar='a-zA-Z0-9,_.-' # If you want "-", be sure to have it at the very end!
#
# or you let us determine which characters were already used in
# names of files/folders and only make sure which characters we
# should *not* use, like so:
#
disAllowedChar='~ +:()/äöüÄÖÜßñéèáàБсФfl' # If you want to exclude "-", be sure to have it at the very end!
allowedChar="$(ls -R "$baseDir_user" | sed -e 's#.#\0\n#g' -e "s#[$disAllowedChar]##g" | sort -u | while read c; do echo -n "$c" ; done)"
[ "$allowedChar" != "${allowedChar//-/}" ] && allowedChar=${allowedChar//-/} && allowedChar="${allowedChar}-" # remove & append "-"
# Actual beginning of the script
# ==============================
#
# Check dependencies
for tool in bc dd dropbox grep head id pgrep ionice renice seq tr schedtool cachedel
do
if [ ! "$(which $tool)" ]; then
echo "Program '$tool' not found in your path" >&2
if [ "$tool" = "cachedel" ]; then
echo "Search for the 'nocache' package to have cachedel available." >&2
echo >&2
fi
missingDep=yep
fi
done
if [ "$missingDep" ]; then
echo "Please make sure to have the missing dependencies met and try again." >&2
exit 3
fi
SCRIPTNAME=$(basename $0)
if [ "$(pgrep --exact $SCRIPTNAME)" ] && [ "$(pgrep --oldest $SCRIPTNAME)" != "$$" ]
then
echo "Already found a running instance of the script. Abort." >&2
exit 0
fi
if [ ! -O "$baseDir_user" ]; then
echo "The directory \"$baseDir_user\" " >&2
echo "is not yours!. Choose another one in $0 !" >&2
exit 2
fi
if [ "$(id -u)" == 0 ]; then
echo 'DO NOT RUN THIS AS ROOT!' >&2
exit 4
fi
renice -n 19 -p $$ &> /dev/null # minimal CPU priority, usually available
schedtool -n 19 -D $$ &> /dev/null # try really, really minimal CPU priority
ionice -c 3 -p $$ &> /dev/null # minimal I/O priority
# Modifying related processes' priority
# =====================================
# You may want to let $YourCloudProgram have a lower CPU priority:
niceLevel=19
dbPID="$(pgrep --exact dropbox)"
renice -n $niceLevel "$dbPID" &> /dev/null
schedtool -n 19 -D "$dbPID" &> /dev/null # try really, really minimal CPU priority
ionice -c2 -n7 -p "$dbPID" &> /dev/null
repeatPeriodically=true # true/false
RND_MAX=32767 # max. value of $RANDOM
# be sure to have no trailing '/' at baseDir_user's end
baseDir_user="$(echo $baseDir_user | sed 's,/$,,')"
function isCloudClientIdle () {
return $(dropbox status | grep -Fi idle > /dev/null)
}
function isOnBattery () {
[ -d /proc/acpi/battery ] && grep -Fq 'off-line' /proc/acpi/ac_adapter/*/state
return $?
}
function delayIfOnBattery () {
BeenHere=""
while isOnBattery
do
[ "$BeenHere" ] || echo 'On Battery. Waiting for AC power...'
sleep 5
BeenHere=yes
done
[ "$BeenHere" ] && echo 'AC plugged in now. Continue.'
}
function isHighLoad () {
# condtidions: RAM usage and CPU load
# To find the current load, we call "uptime" and select the last number,
# which indicates the average load over the last 15 minutes.
# We normalize the load ourselves by the number of available CPUs, so
# load --> load / NumCPU
freeMemMin=1/2 # share of total memory that should be free
memTot="$(free -m | awk '/Mem:/ {print $2}')"
memFree="$(free -m | awk '/buffers.cache:/ {print $4}')"
loadMaxPerc=10
NumCPU=$(grep -c ^processor /proc/cpuinfo)
load="$(uptime | sed -e 's#.*\([0-9]\+[,.][0-9]*$\)#\1#' -e 's#,#.#')"
load=$(bc -l <<< "100 * $load / $NumCPU" | sed 's#\..*##')
return $([ "$memFree" -lt "$((${memTot}*$freeMemMin))" ] || [ "$load" -gt "$loadMaxPerc" ])
}
function getRandomName () {
if [ ! "$allowedChar" ] || [ ! "$minLenFilename" ] || [ ! "$maxLenFilename" ]; then
echo "min/max filename length and allowed characters must be specified in $0" >&2
exit 8
fi
randLen="$(( $maxLenFilename - $minLenFilename ))"
currLen="$(( $minLenFilename + $RANDOM % $randLen))"
if [ "$currLen" -ge 1 ]; then
tr -dc "$allowedChar" < /dev/urandom | head -c "$currLen"
else
echo "Somehow, the current length for a random name is smaller than 1." >&2
echo "currLen = $currLen" >&2
exit 9
fi
}
function getRandomPath () {
# return a random number of randomly named folders, e.g.
# dir1/dir2/dir3/
N_max=9
myPath=""
for n in $(seq $N_max); do
# Define a probability that we will create a folder at this level.
# The deeper we get, the more likely we go one level deeper:
# prob = n / (N_max + 1)
#
probPerc=$(bc -l <<< "100 * ${n}/($N_max + 1) + 0.5" | sed 's#\.[0-9]*##')
rand0_100=$(( ${RANDOM} % 100 ))
if [ "$probPerc" -ge "$rand0_100" ]; then
myPath="${myPath}/$(getRandomName)" || exit $?
else
break
fi
done
echo "$myPath"
}
function cleanup () {
# delete file
if [ "$fOut" ] && [ -f "${baseDir_user}${rndPath}/${fOut}" ]; then
rm -f "${baseDir_user}${rndPath}/${fOut}"
else
echo "No file with full path" >&2
echo "${baseDir_user}${rndPath}/${fOut}" >&2
echo "found... :-(" >&2
fi
if [ "${rndPath}" ]; then
first="$rndPath"
while [ "$first" ] && [ 1 -lt $(grep -o '/' <<< "$first" | grep -c .) ]
do
#first="$(sed -e 's,\(/[^/]*\).*,\1,' <<< $rndPath)"
first="$(dirname $first)"
done
if [ "$first" ] && [ "$first" != / ] && [ -d "${baseDir_user}${first}" ]; then
rm -rf "${baseDir_user}${first}"
else
echo "Tried to delete directory \"$first\"" >&2
echo "in \"$baseDir_user\"" >&2
echo "but a weird error occurred due to the bastard who implemented this... :-(" >&2
exit 10
fi
fi
}
function clearfilecache () {
if [ "$(which cachedel)" ] && dropbox status | grep -Fi idle > /dev/null; then
echo cache cleanup...
find "${baseDir_encr}" -type f -exec cachedel '{}' &> /dev/null \; # somehow, '+' instead of '\;' doesn't work
fi
}
# Make sure to be clean if we hit Ctrl+C or the process is killed
trap 'pkill -SIGTERM -P $$ & cleanup; exit 0' SIGABRT SIGINT SIGPIPE SIGTERM SIGQUIT
echo "We calmly start to litter ${baseDir_user}"
echo 'Allowed Characters for names of files and folders (everything within the " "):'
echo " \"${allowedChar}\""
echo
while sleep $(bc -l <<< "0.5+$RANDOM/2000")
do
# If the client is not idle *and* it is currently operating on !=1 file(s),
# then we are very likely doing real stuff with it!
# So clear the random junk and unlock all resources for real workload.
if [ "${fOut}" ] && [ 1 != "$(dropbox status | grep -c '^1')" ]
then
echo
echo 'Found "real" workload. Removing all random mess and wait until dropbox is idle again.'
cleanup || exit $?
fOut=""
continue
fi
delayIfOnBattery
if isHighLoad
then
isCloudClientIdle && [ ! "$beenHere" ] && echo -n "Delaying due to high load... " && beenHere=yo
[ "${fOut}" ] && cleanup
fOut=""
continue
fi
[ "$beenHere" ] && echo "continue." && beenHere=""
if [ "$repeatPeriodically" == true ]
then
dbPID="$(pgrep --exact dropbox)"
renice -n $niceLevel "$dbPID" &> /dev/null
schedtool -n 19 -D "$dbPID" &> /dev/null
ionice -c2 -n7 -p "$dbPID" &> /dev/null
fi
clearfilecache # may be slow (lists all files in $baseDir_encr and runs cachedel on them)
if isCloudClientIdle
then
if [ "$RANDOM" -lt "$((${RND_MAX}/6))" ] || [ ! "${fOut}" ] || [ ! -f "${baseDir_user}${rndPath}/${fOut}" ]; then
echo making new file ...
# If we wrote to another file before, delete it now
if [ "${fOut}" ]; then
cleanup || exit $?
fi
# Build a random path below $baseDir_user:
rndPath="$(getRandomPath)" # It either starts with "/" or is completely empty.
# It does never have a "/" at the end.
while [ "$rndPath" ] && [ -d "${baseDir_user}${rndPath}" ]; do
echo "Collision found for folder $rndPath..." >&2
echo "Will make a new random path." >&2
rndPath="$(getRandomPath)"
done
mkdir -p "${baseDir_user}${rndPath}"
# random file name
fOut=$(getRandomName) || exit $?
fi
szCurrKB=$(du -s "$baseDir_user" | awk '{print $1}')
if [ "$szMaxKB" -gt "$szCurrKB" ]; then
szTmpKB="$(( ($szMaxKB-$szCurrKB) * (${RANDOM}+1) / 32768 / 10))"
else
echo "Not enough free space in $baseDir_user" >&2
continue
fi
if [ "$fOut" ] && [ "$szTmpKB" ]
then
echo "$(date '+%Y %b %d, %H:%M:%S:') writing $((szTmpKB/1024)) MiB to ".${rndPath}/${fOut}" ..."
BS=4096 # block size
CNT=$(($szTmpKB * 1024 / $BS - 1)) # count
(
# First, write in BS-blocks (pretty fast)
dd if=/dev/urandom of="${baseDir_user}${rndPath}/${fOut}" bs="$BS" count="$CNT" 2>&1 | sed -n 's|[0-9]\+ Bytes.*|\0|p' 1>&2
# We don't want to be restricted to files with N*BS bytes, so append up to BS-1 bytes:
dd if=/dev/urandom bs=$(($RANDOM%$BS)) count=1 2>/dev/null >> "${baseDir_user}${rndPath}/${fOut}"
)
else
echo "Weird error. Check that..." >&2
echo "szTmpKB = $szTmpKB" >&2
echo "fOut = $fOut" >&2
exit 11
fi
fi
done
Sign up for free to join this conversation on GitHub. Already have an account? Sign in to comment