Skip to content

Instantly share code, notes, and snippets.

@duhaime
Last active July 12, 2019 18:54
Show Gist options
  • Star 0 You must be signed in to star a gist
  • Fork 0 You must be signed in to fork a gist
  • Save duhaime/73adb2c45a2fb44c7b89169c36688f05 to your computer and use it in GitHub Desktop.
Save duhaime/73adb2c45a2fb44c7b89169c36688f05 to your computer and use it in GitHub Desktop.
Download Yale Digital Collection Images
from selenium import webdriver
from selenium.webdriver.common.keys import Keys
import time, os, json, glob
if not os.path.exists('records'):
os.makedirs('records')
max_page_fetched = 0
for i in glob.glob('records/*.json'):
page = int(os.path.basename(i).split('-')[0])
if page > max_page_fetched:
max_page_fetched = page
page_id = max_page_fetched
total_pages = page_id + 1
# first url is for hogarth, second is for all images
url = 'https://orbis.library.yale.edu/vwebv/search?searchArg1=Hogarth+william&argType1=all&searchCode1=NKEY&combine2=and&searchArg2=&argType2=all&searchCode2=GKEY&combine3=and&searchArg3=&argType3=all&searchCode3=GKEY&year=2018-2019&fromYear=&toYear=&location=.Lewis+Walpole+Library&place=all&type=k%3F&status=all&medium=all&language=all&content=all&media=all&carrier=all&recCount=50&searchType=2&page.search.search.button=Search'
url = 'http://findit.library.yale.edu/?f%5Bdigital_collection_sim%5D%5B%5D=Lewis+Walpole+Library&page={0}'
ints = [str(i) for i in range(10)]
driver = webdriver.Chrome()
while page_id < total_pages:
print(' * processing', page_id)
driver.get(url.format(page_id))
pagination = driver.find_element_by_css_selector('.pagination')
button = pagination.find_elements_by_css_selector('a')[-1]
# determine the total number of pages for the current query
total_pages = int(''.join([i for i in button.text if i in ints]))
print(' * total pages:', total_pages)
results = driver.find_element_by_css_selector('#documents')
links = []
for i in results.find_elements_by_css_selector('.document'):
links.append(i.find_element_by_css_selector('a').get_attribute('href'))
# navigate to the result for a single object
for idx, i in enumerate(links):
driver.get(i)
# store for this document's metadata attributes
metadata = {'url': i, 'images': []}
meta_elem = driver.find_element_by_css_selector('#document')
dts = meta_elem.find_elements_by_css_selector('dt')
dds = meta_elem.find_elements_by_css_selector('dd')
if len(dts) != len(dds):
print(len(dts), len(dds))
for i in range(len(dts)):
metadata[ dts[i].text ] = dds[i].text
# find the images
sidebar = driver.find_element_by_css_selector('#sidebar')
links = sidebar.find_elements_by_css_selector('a')
for i in links:
href = i.get_attribute('href')
if 'imageserver' in href:
metadata['images'].append(href)
# store this record's information
file_name = '{0}-{1}.json'.format(page_id, idx)
with open(os.path.join('records', file_name), 'w') as out:
json.dump(metadata, out)
# go to the next page
page_id += 1
import glob, json, os
if not os.path.exists('images'):
os.makedirs('images')
for idx, i in enumerate(glob.glob('records/*.json')):
with open(i) as f:
j = json.load(f)
for kdx, k in enumerate(j['images']):
name = os.path.basename(i).replace('.json', '')
name += '-{0}.jpg'.format(kdx)
os.system('wget "' + k.replace('.jpe', '.jpg') + '" -O images/' + name)
Sign up for free to join this conversation on GitHub. Already have an account? Sign in to comment