Skip to content

Instantly share code, notes, and snippets.

@sney2002 sney2002/mget
Created May 6, 2011

Embed
What would you like to do?
script para descargar automáticamente archivos alojados en Megaupload
#!/usr/bin/env bash
# Copyright (c) 2011, Jhonathan Sneider Salguero Villa (http://www.novatoz.com/)
# Permission is hereby granted, free of charge, to any person obtaining a copy
# of this software and associated documentation files (the "Software"), to deal
# in the Software without restriction, including without limitation the rights
# to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
# copies of the Software, and to permit persons to whom the Software is
# furnished to do so, subject to the following conditions:
# The above copyright notice and this permission notice shall be included in
# all copies or substantial portions of the Software.
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
# IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
# FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
# AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
# LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
# OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN
# THE SOFTWARE
# El nuevo tiempo de espera es de 60s pero al perecer hay que esperar un poco mas
WAIT=65
GREEN="\033[0;32m"
RED="\033[0;31m"
END="\033[0m"
PROG=$(basename $0)
USAGE="Uso: $PROG [OPCIÓN]... [URL]"
HELP="$USAGE
Descarga automática de archivos alojados en Megaupload
ARGUMENTOS:
URL URL de megaupload
OPCIONES:
-h, --help mostrar este mensaje de ayuda
-i FILE, --input-file FILE
Obtener links desde un archivo de texto
-c URL, --crawler URL
Extraer links de archivo ubicado en URL"
# Extraer link de Megaupload
# ==========================
get_link () {
if [ $# -gt 0 ]; then
echo -e "$1" | sed -n '/download_regular_usual/ s:.*href="\([^"]*\).*:\1:p'
fi
}
# Extraer nombre de dominio de una URL
# ====================================
get_domain_name () {
if [ $# -gt 0 ]; then
# El nombre de dominio está:
# después de http:// (si lo hay) y antes del primer slash
echo "$1" | sed 's_\(http://\)\?\([^/]*\).*_\2_'
fi
}
# Obtener IP de un nombre de dominio
# ===================================
get_ip () {
if [ $# -gt 0 ]; then
ping -c1 $1 | head -1 | sed "s:.*(\([0-9]*\.[0-9]*\.[0-9]*\.[0-9]*\)).*:\1:g"
fi
}
# Desplegar mensaje de espera con cuenta regresiva
# ================================================
please_wait () {
for i in `seq 0 $1`; do
printf "\rPor favor espere: %-2d" "$(($1-$i))"
sleep 1
done
echo
}
# Sustituir nombre de dominio en una URL por su IP
# ================================================
dn_to_ip () {
if [ $# -gt 0 ]; then
dn=$(get_domain_name "$1")
ip=$(get_ip $dn)
echo "$1" | sed "s:$dn:$ip:"
fi
}
# Filtrar links de Megaupload
# ===========================
link_filter () {
echo -e "$1" | sed 's:</a>:\n:gi' | sed -n 's_.*\(\(http://\)\?www.megaupload.com/?d=[a-z0-9]\+\).*_\1_igp' | uniq
}
# Descargar un Archivo de Megaupload
# ==================================
download () {
# 1. Descargar pagina de megaupload y obtener link de descarga
echo -e "${GREEN}Procesando $1${END}"
link_mega=$(dn_to_ip $1)
link=$(get_link "$(wget -O - $link_mega 2>/dev/null)")
if [ "$link" ]; then
# 2. sustituir nombre de dominio con ip
link=$(dn_to_ip "$link")
# 3. Esperar 45 segundos
please_wait $WAIT
# Corregir error que impedía descargar archivos cuyos nombres contuvieran caracteres no ascii
# los cuales son codificados usando html entites (http://www.w3.org/TR/REC-html40/sgml/entities.html)
# antes http://www465.megaupload.com/files/f4c0e7f8f1f/Capacitaci&#00243;n.mp4
# despues http://www465.megaupload.com/files/f4c0e7f8f1f/Capacitación.mp4
link="$(echo $link | w3m -cols 1000 -T text/html -dump)"
# 4. descargar Archivo
wget -c "$link"
else
echo -e "${RED}Error: Favor compruebe el link $1${END}"
fi
}
# Solo es posible usar una opción
# ===============================
case "$1" in
-i|--input-file)
input=$(cat "$2" 2>&1)
error=$?
;;
-c|--crawler)
input=$(wget -O - "$2" 2>&1)
error=$?
;;
*www.megaupload.com*)
input=$(echo $* | tr " " "\n")
error=$?
;;
-h|--help)
echo -e "$HELP"
exit 0
;;
* )
echo "$USAGE"
echo "Intente $PROG --help"
exit 1
;;
esac
if [ $error != 0 ]; then
echo -e "$input"
echo "Error: Intente $PROG --help"
exit 1
fi
# Descargar todos los archivos
for link in `link_filter "$input"`; do
download "$link"
done
Sign up for free to join this conversation on GitHub. Already have an account? Sign in to comment
You can’t perform that action at this time.