Skip to content

Instantly share code, notes, and snippets.

@say4n
Last active May 9, 2017 06:37
Show Gist options
  • Save say4n/4b8d6ea56100d075558800423f42f076 to your computer and use it in GitHub Desktop.
Save say4n/4b8d6ea56100d075558800423f42f076 to your computer and use it in GitHub Desktop.
Just a plain simple boring xkcd comic downloader with download progress indicator.
DO WHAT THE FUCK YOU WANT TO PUBLIC LICENSE
Version 2, December 2004
Copyright (C) 2016 Sayan Goswami <goswami.sayan47@gmail.com>
Everyone is permitted to copy and distribute verbatim or modified
copies of this license document, and changing it is allowed as long
as the name is changed.
DO WHAT THE FUCK YOU WANT TO PUBLIC LICENSE
TERMS AND CONDITIONS FOR COPYING, DISTRIBUTION AND MODIFICATION
0. You just DO WHAT THE FUCK YOU WANT TO.
"""
Use this Python 3 script to download all the comics from xkcd.com website.
Brought to you by Sayan (c) 2016
https://linkedin.com/in/sayan-goswami
"""
import requests
# import re
from bs4 import BeautifulSoup
import wget
def get_image_url(num):
"""Returns image URL from a number."""
url = 'http://www.xkcd.com/' + str(num)
r = requests.get(url)
data = r.text
if '404' in data:
print('Page not found for',num+'.')
exit()
soup = BeautifulSoup(data)
d = str(soup.find_all('div',{'id':'comic'})[0])
arr = d.split()
s = ''
for string in arr:
if 'src' in string:
s = string
s = s[7:-1]
return s
def download_image(img_url):
"""Downloads image using wget."""
arr = img_url.split('/')
wget.download('http://'+img_url,arr[-1])
print('\n\nDownloaded image for',arr[-1])
if __name__ == '__main__':
num = 1
while(True):
try:
download_image(get_image_url(num))
except Exception:
pass
num += 1
@Lewiscowles1986
Copy link

Lewiscowles1986 commented Oct 23, 2016

requirements.txt

beautifulsoup4==4.5.1
requests==2.11.1
wget==3.2

scrape.py

"""
    Use this Python 3 script to download all the comics 
    from xkcd.com website.
    Brought to you by Sayan (c) 2016
    https://twitter.com/codersayan
"""
import requests
from bs4 import BeautifulSoup
import wget

def get_image_url(num):
    """Returns image URL from a number."""
    url = 'http://www.xkcd.com/' + str(num)
    r = requests.get(url)
    data = r.text
    if '404' in data:
        print('Page not found for',num+'.')
        exit()
    soup = BeautifulSoup(data)
    d = str(soup.find_all('div',{'id':'comic'})[0])
    arr = d.split()
    s = ''
    for string in arr:
        if 'src' in string:
            s = string
            s = s[7:-1]
    return s

def download_image(img_url, num):
    """Downloads image using wget."""
    arr = img_url.split('/')
    filename = '{}_{}'.format(
        str(num).zfill(6),
        arr[-1].replace('(1)', '').replace('_.','.'))
    wget.download('http://'+img_url, filename)
    print('\n\nDownloaded image for', filename)

if __name__ == '__main__':
    num = 1
    while(True):
        try:
            download_image(get_image_url(num), num)
        except Exception:
            pass
        num += 1

@Lewiscowles1986
Copy link

@say4n
Copy link
Author

say4n commented May 9, 2017

The JSON API would've saved a lot of work. Thanks a ton!
Apologies for being so late.

Sign up for free to join this conversation on GitHub. Already have an account? Sign in to comment