Skip to content

Instantly share code, notes, and snippets.

@vetri02
Created September 6, 2013 18:11
Show Gist options
  • Star 0 You must be signed in to star a gist
  • Fork 0 You must be signed in to fork a gist
  • Save vetri02/6467671 to your computer and use it in GitHub Desktop.
Save vetri02/6467671 to your computer and use it in GitHub Desktop.
From Neckbeard Republic, learnt this from the web scraping section and changed the code according to new Github Explore pages
import requests
from bs4 import BeautifulSoup
GITHUB_EXPLORE_PAGE = 'https://github.com/explore'
def get_html_explore():
response = requests.get(GITHUB_EXPLORE_PAGE)
return response.content
def parse_html(content):
soup = BeautifulSoup(content)
trending_repo_div = soup.find('div', {'class' : 'repo-collection'})
return trending_repo_div.findAll('li', {'class' : 'collection-item'})
def repo_breakdown(repo):
header = repo.find('a', {'class' : 'repo-name'})
forks, stars = repo.findAll('span', {'class' : 'collection-stat'})
repo_details = header.text.split("/")
return {'user': repo_details[0], 'project': repo_details[1], 'forks': forks.text, 'stars': 'stars.text' }
def get_data(repos):
treding_repos = []
for repo in repos:
data = repo_breakdown(repo)
treding_repos.append(data)
return treding_repos
def main():
content = get_html_explore()
repos = parse_html(content)
print get_data(repos)
if __name__ == '__main__':
main()
Sign up for free to join this conversation on GitHub. Already have an account? Sign in to comment