Skip to content

Instantly share code, notes, and snippets.

@elhardoum
Last active December 30, 2017 20:47
Show Gist options
  • Star 0 You must be signed in to star a gist
  • Fork 0 You must be signed in to fork a gist
  • Save elhardoum/c730ed31d6bd5da103eb38d3b2501951 to your computer and use it in GitHub Desktop.
Save elhardoum/c730ed31d6bd5da103eb38d3b2501951 to your computer and use it in GitHub Desktop.
import json
import urllib
downloader = urllib.URLopener()
# start by getting the full list of videos
with open('items.json') as json_data:
items = json.load(json_data)
# where to save the videos (directory)
save_to_dir = './'
def batch(startAt, stopAt):
index=0
for item in items:
index+=1
if index < startAt or index > stopAt: continue
print( '[%d] Downloading %s..' % (index, item['name']) )
downloader.retrieve(item['url'], '%s%s.mp4' % (
save_to_dir, item['name']
))
# download videos from 1 to 999
batch(1, 999)
import urllib2
import json
api_token = '<REST-TOKEN>'
def fetch_videos(page):
try:
return urllib2.urlopen('https://api.wistia.com/v1/medias.json?per_page=100&page=%d&api_password=%s' % (
page, api_token
)).read()
except:
pass
items = []
for i in xrange(1, 99):
print( "Fetching videos at page %d.." % i )
res = fetch_videos(i)
if not res or not len( str(res) ):
print( "Pagination has stopped at %d." % i )
break;
data = json.loads(res)
for item in data:
items.append({
'name': item['name'],
'url' : item['assets'][0]['url']
})
print( 'Successfully fetched %d items.' % len(items) )
print( 'Saving to items.json file..' )
with open('items.json', 'w') as outfile:
json.dump(items, outfile)
Sign up for free to join this conversation on GitHub. Already have an account? Sign in to comment