Created
December 31, 2014 03:39
-
-
Save vladris/8fe3755bb60c9f46fdc2 to your computer and use it in GitHub Desktop.
Tumblr migration script to move content from a blog to another
This file contains bidirectional Unicode text that may be interpreted or compiled differently than what appears below. To review, open the file in an editor that reveals hidden Unicode characters.
Learn more about bidirectional Unicode characters
import pytumblr | |
import yaml | |
import os | |
import urllib | |
# stolen from interactive_console.py, because I'm too lazy to oauth | |
# you'll need to register an app with Tumblr and use interactive_console.py for initial oauth | |
def create_client(): | |
yaml_path = os.path.expanduser('~') + '/.tumblr' | |
if not os.path.exists(yaml_path): | |
raise Exception('Need to authenticate, use interactive_console.py') | |
else: | |
yaml_file = open(yaml_path, "r") | |
tokens = yaml.safe_load(yaml_file) | |
yaml_file.close() | |
client = pytumblr.TumblrRestClient( | |
tokens['consumer_key'], | |
tokens['consumer_secret'], | |
tokens['oauth_token'], | |
tokens['oauth_token_secret'] | |
) | |
return client | |
# download all posts of a given type, without callers having to worry about the offset+limit | |
def get_posts(blog, post_type): | |
offset = 0 | |
while True: | |
posts = client.posts(blog, post_type, offset=offset)["posts"] | |
if not posts: | |
break | |
for post in posts: | |
yield post | |
offset += len(posts) | |
# cleanup blog (very useful while I'm writing this script and figuring out where posting fails) | |
def cleanup(blog, post_type): | |
print("Cleaning up %s posts on %s" % (post_type, blog)) | |
new_posts = [] | |
for post in get_posts(blog, post_type): | |
new_posts.append(post) | |
for post in new_posts: | |
client.delete_post(new_blog, post["id"]) | |
print("Done") | |
# retrieve relevant data from all posts of given type | |
def post_list(blog, post_type, keys): | |
post_list = [] | |
for post in get_posts(blog, post_type): | |
post_data = dict() | |
for key in keys: | |
post_data[key] = post[key] | |
try: | |
post_data[key] = post_data[key].encode("utf-8") | |
except: | |
pass | |
post_list.append(post_data) | |
return post_list | |
client = create_client() | |
old_blog = "your-old-blog" | |
new_blog = "your-new-blog" | |
# migrate text posts | |
for post in post_list(old_blog, "text", ["title", "body", "date", "tags"]): | |
print(client.create_text(new_blog, title=post["title"], body=post["body"], date=post["date"], tags=post["tags"])) | |
# migrate quote posts | |
for post in post_list(old_blog, "quote", ["text", "source", "date", "tags"]): | |
print(client.create_quote(new_blog, quote=post["text"], source=post["source"], date=post["date"], tags=post["tags"])) | |
# migrate audio posts | |
for post in post_list(old_blog, "audio", ["caption", "audio_url", "date", "tags"]): | |
print(client.create_audio(new_blog, caption=post["caption"], external_url=post["audio_url"], date=post["date"], tags=post["tags"])) | |
# migrate video posts | |
for post in post_list(old_blog, "video", ["caption", "player", "date", "tags"]): | |
print(client.create_video(new_blog, caption=post["caption"], embed=post["player"][0]["embed_code"], date=post["date"], tags=post["tags"])) | |
# migrate link posts | |
for post in post_list(old_blog, "link", ["title", "url", "description", "date", "tags"]): | |
print(client.create_link(new_blog, title=post["title"], url=post["url"], description=post["description"], date=post["date"], tags=post["tags"])) | |
# migrate link posts | |
for i, post in enumerate(post_list(old_blog, "photo", ["photos", "caption", "date", "tags"])): | |
# hacky way of downloading all images and re-uploading them | |
img_path = "./images/%d.jpg" % (i,) | |
if not os.path.exists(img_path): | |
url = post[0][0]["original_size"]["url"] | |
print("Downloading %s" % (url,)) | |
urllib.urlretrieve(url, img_path) | |
print(client.create_photo(new_blog, caption=post["caption"], data=img_path, date=post["date"], tags=post["tags"])) | |
# note I didn't have any chat posts to worry about |
Sign up for free
to join this conversation on GitHub.
Already have an account?
Sign in to comment