Created
May 12, 2015 14:51
-
-
Save jkpl/6e9bb64795da8fa9c87c to your computer and use it in GitHub Desktop.
Macros for Poole
This file contains bidirectional Unicode text that may be interpreted or compiled differently than what appears below. To review, open the file in an editor that reveals hidden Unicode characters.
Learn more about bidirectional Unicode characters
import os | |
import json | |
import iso8601 | |
from datetime import datetime | |
from lesscss import LessCSS | |
# Defaults | |
page = {} | |
pages = [] | |
# Site info | |
site_url = 'http://your.site.org' | |
site_name = 'my homepage' | |
site_author = 'John Doe' | |
# Templates | |
SITE_TITLE = '%s « %s' | |
HEAD_TITLE = '<a id="titlea" href="%s">%s</a>' | |
PAGE_TITLE = '<h1 class="pagetitle"><a href="%s">%s</a></h1>' | |
TIME_TAG = '<time datetime="%s">%s</time>' | |
CRUMB = '<li>» <a href="%s">%s</a></li>' | |
BREADCRUMBS = '<ul class="breadcrumbs">%s</ul>' | |
FILE_LINK = '[%s](%s) <span class="filesize">(%s)</span>' | |
LIST_ITEM_LINK = '* [%s](%s)' | |
LIST_ITEM_LINK_UPDATED = '* [%s](%s) (Updated: %s)' | |
def site_title(page): | |
if page['title'] == 'index': | |
return site_name | |
else: | |
return SITE_TITLE % (page['title'], site_name) | |
def head_title(): | |
return HEAD_TITLE % (site_url, site_name) | |
def page_title(page): | |
if page['title'] != 'index': | |
return PAGE_TITLE % (page['u'], page['title']) | |
return '' | |
def render_last_modified(page, plain=False): | |
iso = page['lastmodified'] | |
if plain: | |
return iso | |
ymdr = iso8601.parse_date(iso).strftime('%Y-%m-%d %R') | |
return TIME_TAG % (iso, ymdr) | |
def last_modified(page): | |
mtime = os.path.getmtime(page['fname']) | |
return datetime.fromtimestamp(mtime).isoformat() | |
def breadcrumbs(page): | |
crumbs = [CRUMB % (x['u'], x['title']) | |
for x in page['crumbs']] | |
links = '\n'.join(crumbs) | |
if links: | |
return BREADCRUMBS % links | |
return '' | |
def filelink(filename, text=None): | |
filesize = os.path.getsize(os.path.join(input, filename)) | |
showtext = text or os.path.basename(filename) | |
filesize_str = filesize_to_string(filesize) | |
return FILE_LINK % (showtext, filename, filesize_str) | |
def filesize_to_string(fsize): | |
if fsize > 500: | |
return '%0.2fK' % (fsize/1024.0) | |
elif fsize > 200000: | |
return '%0.2fM' % (fsize/1024000.0) | |
return str(fsize) | |
def list_tag(tag): | |
taglist = [page_list_formatter(page) | |
for page in pages | |
if page.get('tag') == tag] | |
taglist.sort() | |
return '\n'.join(taglist) | |
def page_list_formatter(page): | |
title = page['title'] | |
url = '/' + page['u'] | |
updated = page.get('updated') | |
if updated: | |
return LIST_ITEM_LINK_UPDATED % (title, url, updated) | |
return LIST_ITEM_LINK % (title, url) | |
def make_shortpath(page): | |
url = page['url'] | |
if url.endswith('index.html'): | |
return url[:-10] | |
return url | |
def make_breadcrumbs(page): | |
crumbs = [crumb for crumb in crumb_generator(page['url']) if crumb] | |
if not page['url'].endswith('index.html'): | |
crumbs.append(page) | |
return crumbs | |
def crumb_generator(url): | |
for path in subpath_generator(url): | |
yield find_directory_index_page(path) | |
def subpath_generator(path): | |
concat = lambda first, second: first + second | |
return scanl(concat, '', path.split('/')[:-1]) | |
def find_directory_index_page(path): | |
expected_url = path + '/index.html' | |
for page in pages: | |
if page['url'] == expected_url: | |
return page | |
def hook_preconvert_add_attributes(): | |
for page in pages: | |
page['u'] = make_shortpath(page) | |
page['crumbs'] = make_breadcrumbs(page) | |
page['lastmodified'] = last_modified(page) | |
def hook_postconvert_lesscss(): | |
LessCSS(media_dir=input, exclude_dirs=('files', 'js', 'img'), | |
based=False, compressed=True, output_dir=output) | |
def hook_postconvert_jsondata(): | |
for page in pages: | |
writepath = os.path.join(output, page['url'] + '.json') | |
page_clone = clone_non_recursive_page(page) | |
with open(writepath, 'w') as outfile: | |
json.dump(page_clone, outfile) | |
def clone_non_recursive_page(page): | |
pclone = page.copy() | |
pclone['html'] = page.html | |
if 'crumbs' in pclone: | |
pclone['crumbs'] = clone_crumbs(pclone) | |
return pclone | |
def clone_crumbs(page): | |
def nullify_crumbs(subpage): | |
pclone = subpage.copy() | |
if 'crumbs' in pclone: | |
del pclone['crumbs'] | |
return pclone | |
return [nullify_crumbs(subpage) | |
for subpage in page['crumbs']] | |
def scanl(f, base, iterable): | |
ourbase = base | |
for item in iterable: | |
ourbase = f(ourbase, item) | |
yield ourbase |
Sign up for free
to join this conversation on GitHub.
Already have an account?
Sign in to comment