Skip to content

Instantly share code, notes, and snippets.

@UVClay
Created April 20, 2017 17:19
Show Gist options
  • Star 0 You must be signed in to star a gist
  • Fork 0 You must be signed in to fork a gist
  • Save UVClay/8896e89a160ba28470a44fbdc4d0004d to your computer and use it in GitHub Desktop.
Save UVClay/8896e89a160ba28470a44fbdc4d0004d to your computer and use it in GitHub Desktop.
Uhh Yeah Dude episode downloader
#!/usr/bin/python3
#uyd archive scraper
from htmldom import htmldom
import urllib
links = ""
epcount = 2
try:
countpage = htmldom.HtmlDom("http://archive.uhhyeahdu.de/episodes/").createDom()
epcount = countpage.find("div.page-info").children( "b", True ).eq(1).text()
except:
print("Couldn't connect to UYD archive")
exit()
for x in range(1,int(epcount) - 1):
try:
page = htmldom.HtmlDom( "http://archive.uhhyeahdu.de/episodes/episode-" + str(x)).createDom()
link = page.find("a[title='Download']").attr("href")
if link[0] == "/":
link = "http://archive.uhhyeahdu.de" + link
print("found link " + link)
links += link + "\n"
try:
urllib.request.urlretrieve(link, "./uyd/uyd-episode"+str(x)+".mp3")
print("downloaded uyd-episode"+str(x)+".mp3")
except:
print("couldn't download episode " + str(x))
except:
print("Error fetching " + str(x))
f = open( "download_links.txt", mode = "w")
f.write(links)
f.close()
Sign up for free to join this conversation on GitHub. Already have an account? Sign in to comment