Skip to content

Instantly share code, notes, and snippets.

@lelandbatey
Last active December 13, 2015 19:58
Show Gist options
  • Star 0 You must be signed in to star a gist
  • Fork 0 You must be signed in to fork a gist
  • Save lelandbatey/4966247 to your computer and use it in GitHub Desktop.
Save lelandbatey/4966247 to your computer and use it in GitHub Desktop.
Short and dirty page scraper to tell you the different hats that are available on http://scrap.tf. Loops on a ten second delay. If this in any way causes problems for the scrap.tf guys, I'll take it down. However, it's a super useful tool for other fellow hat-hounds.
#scrap.tf-scraper.py
import urllib2
from bs4 import BeautifulSoup
import time
def getItemDivs(url):
opener = urllib2.build_opener()
# For this to work, you need to paste your scrap.tf cookies into here. I used the following javascript bookmarklet to get the cookies:
# javascript:void(document.cookie=prompt(document.cookie,document.cookie));
opener.addheaders.append(("Cookie","PASTE YOUR COOKIES HERE!"))
scrap1 = opener.open(url).read()
soup = BeautifulSoup(scrap1)
try:
divChil = soup.find('div', {'class': 'items-wrapper'})
children = divChil.findChildren()
strChildren = []
for child in children:
strChildren.append(str(child))
except:
strChildren = ['title="no hats :(']
return strChildren
def getHatNames():
urllist = ["http://scrap.tf/hats?bot=8","http://scrap.tf/hats?bot=9","http://scrap.tf/hats?bot=9"]
hatArray = []
for url in urllist:
divs = getItemDivs(url)
for div in divs:
hatArray.append(div.split('title="')[1].split('"')[0]) # Extracts that hat name from the div string, storing it in hatArray
return hatArray
def main():
# Yeah, I realize this never actually stops running. Just CTRL-C it like a man!
while True:
hats = getHatNames()
print
for hat in hats:
print hat
time.sleep(10) # If you want the delay to be longer or shorter, edit this number!
main()
Sign up for free to join this conversation on GitHub. Already have an account? Sign in to comment