Skip to content

Instantly share code, notes, and snippets.

@kk6
Last active December 8, 2016 18:48
Show Gist options
  • Star 0 You must be signed in to star a gist
  • Fork 0 You must be signed in to fork a gist
  • Save kk6/7b94321ca2849fe43a0a2d7abda83fc3 to your computer and use it in GitHub Desktop.
Save kk6/7b94321ca2849fe43a0a2d7abda83fc3 to your computer and use it in GitHub Desktop.
メディアツイートをいいねやRT順に並べて表示
# -*- coding: utf-8 -*-
"""
メディアツイートをいいねやRT順に並べて表示
必要なファイルとか:
- twitterのConsumer Key, Consumer Secret
- 全ツイート履歴のCSV (twitter公式の「設定 > ユーザー情報」の一番下にある「全ツイート履歴」より)
インストールが必要なパッケージ:
- arrow
- tweepy
"""
import csv
import datetime
import arrow
import tweepy
CONSUMER_KEY = '<Your Consumer Key>'
CONSUMER_SECRET = '<Your Consumer Secret>'
csvpath = 'path/to/tweets.csv'
def timestamp2arrow(timestamp):
return arrow.get(timestamp, 'YYYY-MM-DD HH:mm:ss Z')
class CsvParser(object):
def __init__(self, path, screen_name):
self.path = path
self.screen_name = screen_name
def filter_images_by_since(self, since):
with open(self.path) as csvfile:
reader = csv.DictReader(csvfile)
for row in reader:
if timestamp2arrow(since) <= timestamp2arrow(row['timestamp']):
url = "https://twitter.com/{screen_name}/status/{tweet_id}/photo/1".format(
screen_name=self.screen_name,
tweet_id=row['tweet_id'],
)
if url == row['expanded_urls']:
yield row
def fetch_tweet_data(api, media_tweets, text_trancate_to=30):
for tweet in media_tweets:
tweet = api.get_status(id=tweet['tweet_id'])
yield {
'tweet_id': tweet.id,
'likes': tweet.favorite_count,
'retweets': tweet.retweet_count,
'text': tweet.text.replace('\n', '')[:text_trancate_to],
'created_at': arrow.get(tweet.created_at).to('JST').format('MM/DD HH:mm'),
}
def sort_by(key, data_list, reverse=False):
return sorted(data_list, key=lambda d: d[key], reverse=reverse)
def display_data(data_list, limit=10):
for i, data in enumerate(data_list):
print("ID:{tweet_id}, likes:{likes}, RTs:{retweets}, date:{created_at}, text:{text}".format(**data))
if i == limit:
break
if __name__ == '__main__':
parser = CsvParser(csvpath, screen_name='kk6')
media_tweets = parser.filter_images_by_since('2016-01-01 00:00:00 +0900')
auth = tweepy.AppAuthHandler(CONSUMER_KEY, CONSUMER_SECRET)
api = tweepy.API(auth, wait_on_rate_limit=True)
data_list = fetch_tweet_data(api, media_tweets)
sorted_data = sort_by('likes', data_list, reverse=True)
display_data(sorted_data, 100)
Sign up for free to join this conversation on GitHub. Already have an account? Sign in to comment