-
-
Save shubhamcodez/0e51a5ee9a8d3115ddad96edaca7e1ac to your computer and use it in GitHub Desktop.
This file contains bidirectional Unicode text that may be interpreted or compiled differently than what appears below. To review, open the file in an editor that reveals hidden Unicode characters.
Learn more about bidirectional Unicode characters
# Make sure you pip install snscrape pandas | |
import snscrape.modules.twitter as sntwitter | |
import pandas as pd | |
#Enter usersnames of the users whose tweets you need in the list | |
users = ['elonmusk'] | |
#Enter the number of tweets per user you'd like. Note: In case user has less than limit, all the tweets get downloaded. | |
limit = 10000 | |
for user in users: | |
until = "2022-12-15" #Enter the date until you want the tweet for | |
since = "2022-01-01" #Enter the date from the tweets start getting queried | |
query = f"(from:{user}) until:{until} since:{since}" | |
tweets = [] | |
for tweet in sntwitter.TwitterSearchScraper(query).get_items(): | |
#print(vars(tweet)) #gets all the attributes retrieved. | |
if len(tweets) == limit: | |
break | |
else: | |
contents = [tweet.user.displayname, tweet.user.username, tweet.url, tweet.date, tweet.content, tweet.retweetCount, tweet.likeCount] #Instruction: Add more attributes here | |
tweets.append(contents) | |
df = pd.DataFrame(tweets, columns= ["Username","User handle", "Tweet", "Date of posting" ,"Text","Retweet count","Like count"]) #Instruction: add a new name for column added | |
df.to_csv(user+'tweets.csv') |
script isn't working again
I am sorry to inform you that twitter has updated its APIs, and none of the third-party scrapers will work.
script isn't working again
I am sorry to inform you that twitter has updated its APIs, and none of the third-party scrapers will work.
Yeah, snscraper still works though.
Sign up for free
to join this conversation on GitHub.
Already have an account?
Sign in to comment
script isn't working again