Skip to content

Instantly share code, notes, and snippets.

@tav
Last active December 17, 2015 03:48
Show Gist options
  • Star 2 You must be signed in to star a gist
  • Fork 0 You must be signed in to fork a gist
  • Save tav/5545779 to your computer and use it in GitHub Desktop.
Save tav/5545779 to your computer and use it in GitHub Desktop.
Hacker News Top 1,000 Page Generator
#! /usr/bin/env python
"""Hacker News Top 1,000 Page Generator.
First grab the top 1,000 submissions as JSON files using the HN API, i.e.
curl -g 'http://api.thriftdb.com/api.hnsearch.com/items/_search?pretty_print=true&filter[fields][type]=submission&sortby=points+desc&limit=100' > 1.json
curl -g 'http://api.thriftdb.com/api.hnsearch.com/items/_search?pretty_print=true&filter[fields][type]=submission&sortby=points+desc&limit=100&start=100' > 2.json
curl -g 'http://api.thriftdb.com/api.hnsearch.com/items/_search?pretty_print=true&filter[fields][type]=submission&sortby=points+desc&limit=100&start=200' > 3.json
... all the way up to ...
curl -g 'http://api.thriftdb.com/api.hnsearch.com/items/_search?pretty_print=true&filter[fields][type]=submission&sortby=points+desc&limit=100&start=800' > 9.json
curl -g 'http://api.thriftdb.com/api.hnsearch.com/items/_search?pretty_print=true&filter[fields][type]=submission&sortby=points+desc&limit=100&start=900' > 10.json
Then, run this script in the same directory as the JSON files to generate the
HN Top 1,000 items as an HTML page!
"""
from datetime import datetime
from json import loads as decode_json
comments = []
dataset = []
months = ['', 'jan', 'feb', 'mar', 'apr', 'may', 'jun', 'jul',
'aug', 'sep', 'oct', 'nov', 'dec']
days = ['', '1st', '2nd', '3rd', '4th', '5th', '6th', '7th', '8th', '9th', '10th',
'11th', '12th', '13th', '14th', '15th', '16th', '17th', '18th', '19th', '20th',
'21st', '22nd', '23rd', '24th', '25th', '26th', '27th', '28th', '29th', '30th',
'31st']
def parse(data, append=dataset.append, rank=comments.append):
for result in decode_json(data)['results']:
item = result['item']
append((
item['id'], item['create_ts'].split('T')[0], item['domain'],
item['num_comments'], item['points'], item['title'], item['url'],
item['username']
))
rank((item['num_comments'], item['id']))
def print_rank(n):
if n < 20:
return ['', '1st', '2nd', '3rd', '4th', '5th', '6th', '7th', '8th', '9th', '10th',
'11th', '12th', '13th', '14th', '15th', '16th', '17th', '18th', '19th', '20th'][n]
d = n % 10
if d == 1:
return '%dst' % n
if d == 2:
return '%dnd' % n
if d == 3:
return '%drd' % n
return '%dth' % n
for i in range(1, 11):
f = open('%s.json' % i, 'rb')
parse(f.read())
f.close()
ranks = {}
for idx, i in enumerate(reversed(sorted(comments))):
ranks[i[1]] = print_rank(idx + 1)
# print dataset
html = []; out = html.append
out("""<!doctype html><meta charset=utf-8><title>Hacker News Top 1000</title><style>
body {
background: #fff;
font-family: Verdana;
}
a {
text-decoration: none;
color: #000;
}
a:hover {
text-decoration: underline;
}
a:visited {
color: #828282;
}
#heading {
background: #ff6600;
margin-bottom: 5px;
padding: 5px;
}
#logo {
border: 1px solid #fff;
vertical-align: bottom;
}
#main {
background: #f6f6ef;
margin: 10px auto 20px auto;
width: 800px;
}
#title {
font-weight: bold;
}
.clear {
clear: both;
}
.domain {
color: #828282;
font-size: 0.8em;
padding-left: 5px;
}
.item {
padding: 5px 10px 5px 15px;
}
.rank {
float: left;
font-size: 0.5em;
padding: 1px 1px 2px 1px;
margin: 9px 4px 4px 4px;
text-align: center;
width: 20px;
}
.subline {
color: #828282;
font-size: 0.7em;
margin-top: 3px;
margin-left: 17px;
}
.subline a {
color: #828282;
}
</style><body><div id=main>
<div id=heading><img id=logo src="https://news.ycombinator.com/y18.gif">
<span id=title>Hacker News Top 1,000</span></div>""")
i = 1
for (id, ts, domain, comments, points, title, url, username) in dataset:
year, month, day = map(int, ts.split('-'))
item_url = "https://news.ycombinator.com/item?id=%d" % id
out('<div class=rank>%d</div><div class=item><a href="%s">%s</a><span class=domain>(%s)</span><div class=subline>%d points by <a href="https://news.ycombinator.com/user?id=%s">%s</a> on %s %s, %s | <a href="%s">%d comments (%s)</a></div></div><div class=clear></div>' % (i, url or item_url, title, domain or 'Ask HN', points, username, username, days[day], months[month], year, item_url, comments, ranks[id]))
i += 1
out('</div>')
out('<!-- Generated using the HN API at https://www.hnsearch.com/api on %s -->' % datetime.utcnow().strftime('%c'))
f = open('hacker-news-top-1000.html', 'wb')
f.write(''.join(html).encode('utf-8'))
f.close()
print "DONE!"
Sign up for free to join this conversation on GitHub. Already have an account? Sign in to comment