Skip to content

Instantly share code, notes, and snippets.

@ibraizQazi
Created January 25, 2018 11:49
Show Gist options
  • Star 0 You must be signed in to star a gist
  • Fork 0 You must be signed in to fork a gist
  • Save ibraizQazi/bf43d878cc3ea8710d46a08d30e82aa4 to your computer and use it in GitHub Desktop.
Save ibraizQazi/bf43d878cc3ea8710d46a08d30e82aa4 to your computer and use it in GitHub Desktop.
Scraper to extract name and dosage from druginfosys.com
import sys
from urllib2 import urlopen, URLError
from argparse import ArgumentParser
from bs4 import BeautifulSoup
import csv
def parse_arguments():
""" Process command line arguments """
parser = ArgumentParser(description='Grabs tables from html')
parser.add_argument('-u', '--url', help='url to grab from',
required=True)
args = parser.parse_args()
return args
def parse_rows(rows):
""" Get data from rows """
results = []
for row in rows:
table_data = row.find_all('tr')
if table_data:
results.append([row.get_text() for row in table_data])
table_data = row.find_all('td')
if table_data:
results.append([data.get_text() for data in table_data])
return results
def save_data_csv(results):
""" Save data to csv file """
filename = 'drugsinfo.csv'
with open (filename, 'wb') as f:
w = csv.writer(f)
for result in results:
w.writerow(result)
def main():
# Get arguments
args = parse_arguments()
url = args.url
try:
resp = urlopen(url)
except URLError as e:
print 'An error occured fetching %s \n %s' % (url, e.reason)
return 1
soup = BeautifulSoup(resp.read(), "html.parser")
# Get table
try:
table = soup.find('table')
rows = table.find_all('tr')
except AttributeError as e:
raise ValueError("No valid table found")
# Get data
table_data = parse_rows(rows)
#Save data
save_data_csv(table_data)
# Print data
for i in table_data:
print '\t'.join(i)
if __name__ == '__main__':
status = main()
sys.exit(status)
Sign up for free to join this conversation on GitHub. Already have an account? Sign in to comment