Skip to content

Instantly share code, notes, and snippets.

@mikeyk
Created December 24, 2008 11:26
Show Gist options
  • Star 0 You must be signed in to star a gist
  • Fork 0 You must be signed in to fork a gist
  • Save mikeyk/39661 to your computer and use it in GitHub Desktop.
Save mikeyk/39661 to your computer and use it in GitHub Desktop.
#!/usr/local/bin/python2.5
from BeautifulSoup import BeautifulSoup
import urllib
import cgitb
import cgi
cgitb.enable()
MAX_PAGES = 50
def find_pagination_for_id(user, target_id):
if not target_id:
return None
counter = 1
while(counter <= MAX_PAGES):
timeline = urllib.urlopen("http://twitter.com/statuses/user_timeline/%s.xml?page=%s" % (user, counter))
soup = BeautifulSoup( timeline.read() )
for tweet_id in soup('id'):
if tweet_id.contents[0] == target_id:
return "http://twitter.com/%s/?page=%s" % (user, counter)
counter += 1
return None
# returns a (user, tweet_id) tuple
def get_user_and_id_from_url(url):
if not url:
return None
else:
url_split = url.split('/')
user = url_split[3]
target_id = url_split[5]
return (user, target_id)
parameters = cgi.FieldStorage()
if "url" in parameters:
url = parameters['url'].value
user, target_id = get_user_and_id_from_url(url)
redirect_url = find_pagination_for_id(user, target_id)
print 'Content-type: script/javascript\n\n'
print
if redirect_url:
print "window.location.href = '%s';" % (redirect_url)
else:
print "alert('Sorry, this tweet was beyond the page limit');"
Sign up for free to join this conversation on GitHub. Already have an account? Sign in to comment