View target.py
import requests
from lxml import html
import re
from time import time
import json
import argparse
def get_store(store):
store_name = store['Name']
store_timings = store['OperatingHours']['Hours']
View tripadvisor_scraper.py
#!/usr/bin/env python
from datetime import datetime
from time import time
from lxml import html,etree
import requests,re
import os,sys
import unicodecsv as csv
import argparse
def parse(locality,checkin_date,checkout_date,sort):
View tripadvisor_scraper_hotel.py
from lxml import html
import requests
from collections import OrderedDict
import pprint
import json
import argparse
import re
def parse(url):
print "Fetching "+url
View viator.py
import requests
import json
import unicodecsv as csv
import argparse
from argparse import RawTextHelpFormatter
def parse(location,sort):
print "Retrieving Location Details"
location_details_url = "https://www.viator.com/ajaxSegmentSearch.jspa?term=%s"%(location)
location_response = requests.get(location_details_url).text
View amazon_reviews.py
#!/usr/bin/env python
# -*- coding: utf-8 -*-
# Written as part of https://www.scrapehero.com/how-to-scrape-amazon-product-reviews-using-python/
from lxml import html
import json
import requests
import json,re
from dateutil import parser as dateparser
from time import sleep
View yelp_bussines_details.py
from lxml import html
import json
import requests
from exceptions import ValueError
from time import sleep
import re,urllib
import argparse
def parse(url):
# url = "https://www.yelp.com/biz/frances-san-francisco"
View yelp_reviews_parser.py
from lxml import html
import unicodecsv as csv
import requests
from exceptions import ValueError
from time import sleep
import re,urllib
import argparse
def parse(url):
response = requests.get(url).text
View amazon_product.py
from lxml import html
import csv,os,json
import requests
from exceptions import ValueError
from time import sleep
def parse(url):
headers = {'User-Agent': 'Mozilla/5.0 (X11; Linux x86_64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/42.0.2311.90 Safari/537.36'}
page = requests.get(url,headers=headers)
for i in range(20):
View zillow.py
from lxml import html
import requests
import unicodecsv as csv
from exceptions import ValueError
import argparse
def parse(zipcode,filter=None):
if filter=="newest":
url = "https://www.zillow.com/homes/for_sale/{0}/0_singlestory/days_sort".format(zipcode)
View yelp_search.py
from lxml import html
import unicodecsv as csv
import requests
from exceptions import ValueError
from time import sleep
import re
import argparse
def parse(url):
response = requests.get(url).text