This file contains bidirectional Unicode text that may be interpreted or compiled differently than what appears below. To review, open the file in an editor that reveals hidden Unicode characters.
Learn more about bidirectional Unicode characters
import pandas as pd | |
import datetime as dt | |
import time | |
#tickers as on yahoo finance | |
tickers = ['BAJFINANCE.NS','CHOLAFIN.NS','CUB.BO','DCBBANK.NS','DHANBANK.BO','EDELWEISS.NS','HDFCBANK.NS','ICICIBANK.NS', | |
'IDFC.NS','KOTAKBANK.BO','L&TFH.NS','MANAPPURAM.BO','MUTHOOTFIN.BO','RELCAPITAL.BO','SBIN.BO','KTKBANK.BO', | |
'KARURVYSYA.NS','SOUTHBANK.BO','YESBANK.BO','MOTILALOFS.NS'] | |
#interval: 1d or 1mo | |
interval = '1d' |
This file contains bidirectional Unicode text that may be interpreted or compiled differently than what appears below. To review, open the file in an editor that reveals hidden Unicode characters.
Learn more about bidirectional Unicode characters
from nltk.tokenize import sent_tokenize | |
from nltk.stem import WordNetLemmatizer | |
from nltk.corpus import stopwords | |
import unicodedata | |
import nltk | |
import re | |
from vaderSentiment.vaderSentiment import SentimentIntensityAnalyzer | |
#function |
This file contains bidirectional Unicode text that may be interpreted or compiled differently than what appears below. To review, open the file in an editor that reveals hidden Unicode characters.
Learn more about bidirectional Unicode characters
from bs4 import BeautifulSoup | |
import requests | |
import re | |
import pandas as pd | |
baseurl ="https://www.thewhiskyexchange.com/" | |
url ="https://www.thewhiskyexchange.com/c/338/gin" | |
source = requests.get(url) | |
soup = BeautifulSoup(source.content,'lxml') |
This file contains bidirectional Unicode text that may be interpreted or compiled differently than what appears below. To review, open the file in an editor that reveals hidden Unicode characters.
Learn more about bidirectional Unicode characters
# Importing the libraries | |
import snscrape.modules.twitter as sntwitter | |
import pandas as pd | |
# Query and creating an empty list for storing tweets | |
query = "MSFT Microsoft min_faves:100 lang:en until:2022-06-30 since:2022-01-01" | |
tweets = [] | |
# A simple for loop | |
for tweet in sntwitter.TwitterSearchScraper(query).get_items(): |