Skip to content

Instantly share code, notes, and snippets.

@sleepdeprecation
Created March 18, 2013 06:07
Show Gist options
  • Save sleepdeprecation/5185338 to your computer and use it in GitHub Desktop.
Save sleepdeprecation/5185338 to your computer and use it in GitHub Desktop.
Slight refactor of makeme.py ( https://gist.github.com/dkuntz2/5179398 )
#!/usr/bin/env python2
'''
Make Me (makeme): refactor
Essentially the same thing as makeme, but refactored slightly. Because.
'''
import logging
import re
import misaka as m
from datetime import datetime
from jinja2 import Environment, FileSystemLoader
from os import makedirs, mkdir, walk
from os.path import basename, exists, getmtime
from shutil import copy2
from yaml import load as yaml_load
# configuration
config = yaml_load(file('config.yml'))
# set base logging level. Info by default, debug if debug is true in config file
logging.basicConfig(level=logging.INFO)
l = logging.getLogger('makeme')
l.setLevel(logging.INFO)
if 'debug' in config:
if config['debug'] == True:
l.setLevel(logging.DEBUG)
'''
The generic site maker - takes a dict of values for an individual site
and generates the nice and pretty things for it. Because that's important,
or something...
'''
def generate_site(site):
l.debug("Generating site: " + site['url'] + " from " + site['path'])
env = Environment(loader=FileSystemLoader(site['path'] + '/templates'))
if not exists('./generated/' + site['url']):
l.info('Creating ./generated/' + site['url'] + '/ directory')
mkdir('./generated/' + site['url'])
site['posts'] = parse_posts(site)
site['archives'] = parse_archives(site)
site['pages'] = parse_pages(site)
make_site(site, env)
# parse out a file
def parse_file(filename):
l.debug('Parsing file: ' + filename)
parsed = {}
f = open(filename)
contents = ""
for line in f:
contents += line
f.close()
frontmatter, bodymatter = re.search(r'\A---\s+^(.+?)$\s+---\s*(.*)\Z', contents, re.M | re.S).groups()
parsed['content'] = m.html(bodymatter)
p_config = yaml_load(frontmatter)
for k, v in p_config.items():
parsed[k] = v
parsed['filename'] = basename(filename)
parsed['changed'] = getmtime(filename)
return parsed
####################
# POST THINGS
####################
# Parse all posts in a site...
def parse_posts(site):
posts = []
post_directory = './' + site['path'] + '/posts'
for root, dirs, files in walk(post_directory):
for f in files:
posts.append(parse_post(root + '/' + f, site))
posts = sorted(posts, key=lambda p: p['timestamp'], reverse=True)
# set next/prev post things...
for i in range(len(posts)):
if i > 0:
posts[i]['next'] = post_meat(posts[i - 1])
if i < (len(posts) - 2):
posts[i]['prev'] = post_meat(posts[i + 1])
return posts
# parse an individual post
def parse_post(filename, site):
post = parse_file(filename)
# get timestamp from filename
sp = post['filename'].split('-')
hours = -1
mins = -1
try:
hours = int(sp[3])
mins = int(sp[4])
except Exception:
l.debug("No hours/minutes")
num_p = 3
if hours != -1 and mins != -1:
d = datetime(int(sp[0]), int(sp[1]), int(sp[2]), hours, mins)
num_p = 5
else:
d = datetime(int(sp[0]), int(sp[1]), int(sp[2]))
path = ""
for p in sp[num_p:]:
path += p + "-"
post['path'] = site['url'] + d.strftime('/%Y/%m/%d/') + '.'.join(path.split(".")[:-1])
post['timestamp'] = d
# set next/last
post['next'] = None
post['prev'] = None
# set url
post['url'] = 'http://' + post['path'] + '/'
# layout?
if 'layout' not in post:
post['layout'] = 'post.html'
return post
# get the 'meat' of a post
def post_meat(post):
return {'title': post['title'], 'url': post['url']}
# make the archives
def parse_archives(site):
arc = {}
for p in site['posts']:
year = p['timestamp'].year
if year < 10:
year = "0" + str(year)
else:
year = str(year)
month = p['timestamp'].month
if month < 10:
month = "0" + str(month)
else:
month = str(month)
if year not in arc:
arc[year] = {}
if month not in arc[year]:
arc[year][month] = []
arc[year][month].append(p)
return arc
##################
# Page Things
##################
# get pages?
def parse_pages(site):
pages = []
for root, dirs, files in walk('./' + site['path'] + '/pages/'):
for f in files:
pages.append(parse_page(root + '/' + f, site))
return pages
# get the information from a page
def parse_page(filename, site):
l.debug('Parsing the page ' + filename)
page = parse_file(filename)
page['path'] = site['url'] + '/' + '.'.join(page['filename'].split('.')[:-1])
page['url'] = "http://" + page['path'] + '/'
if 'layout' not in page:
page['layout'] = 'page.html'
return page
###############
# THE MAGIC
###############
# start to put things together
def make_site(site, env):
expand_posts(site, env)
expand_pages(site, env)
expand_index(site, env)
copy_assets(site)
if 'archives_on' in site:
if site['archives_on'] == True:
expand_archives(site, env)
# expand posts, creating posts and archives
def expand_posts(site, env):
for p in site['posts']:
oPath = './generated/' + p['path']
# do we need to remake the file?
l.debug(str(exists(oPath)) + " : " + oPath)
if not exists(oPath):
makedirs(oPath)
writer = open(oPath + 'index.html', 'w')
template = env.get_template(p['layout'])
writer.write(template.render(site=site, post=p))
writer.close()
def expand_archives(site, env):
year_layout = 'archive_year.html'
month_layout = 'archive_month.html'
if 'archives_layouts' in site:
if 'year' in site['archives_layouts']:
year_layout = site['archives_layouts']['year']
if 'month' in site['archives_layouts']:
month_layout = site['archives_layouts']['month']
year_template = env.get_template(year_layout)
month_template = env.get_template(month_layout)
for year, months in site['archives'].items():
yearPath = './generated/' + site['url'] + '/' + str(year)
writer = open(yearPath + '/index.html', 'w')
writer.write(year_template.render(year=year, months=months, site=site))
writer.close()
for month, posts in months.items():
monthPath = yearPath + '/' + str(month)
writer = open(monthPath + '/index.html', 'w')
writer.write(month_template.render(month=month, posts=posts, site=site))
writer.close()
def expand_pages(site, env):
for p in site['pages']:
oPath = './generated/' + p['path']
l.debug(str(exists(oPath)) + " : " + oPath)
if not exists(oPath):
makedirs(oPath)
writer = open(oPath + '/index.html', 'w')
template = env.get_template(p['layout'])
writer.write(template.render(page=p, site=site))
writer.close()
def expand_index(site, env):
index = parse_file('./' + site['path'] + '/index.md')
oFile = './generated/' + site['url'] + '/index.html'
if 'layout' not in index:
index['layout'] = 'page.html'
writer = open(oFile, 'w')
template = env.get_template(index['layout'])
writer.write(template.render(page=index, site=site, content=index['content']))
writer.close()
def copy_assets(site):
if not exists('./' + site['path'] + '/assets'):
l.debug('no assets in ' + site['path'])
return
newPath = './generated/' + site['url']
for root, dirs, files in walk('./' + site['path'] + '/assets'):
for d in dirs:
ndir = root + '/' + d
ndir = ndir.replace('./' + site['path'], newPath)
if not exists(ndir):
makedirs(ndir)
for f in files:
fPath = root + '/' + f
nfPath = fPath.replace('./' + site['path'], newPath)
if exists(nfPath):
if getmtime(nfPath) > getmtime(fPath):
continue
copy2(fPath, nfPath)
if __name__ == '__main__':
if not exists('./generated/'):
l.info('Creating ./generated/ directory')
mkdir('./generated/')
for name, site in config['sites'].items():
generate_site(site)
# should be titled config.yml
#debug: true
sites:
whimsy:
path: whimsy
url: whimsy.kuntz.co
name: The Whimsy of Don Kuntz
archives_on: true
archives_layouts:
year: archive_year.html
month: archive_month.html
Sign up for free to join this conversation on GitHub. Already have an account? Sign in to comment