Skip to content

Instantly share code, notes, and snippets.

@tonyfast
Last active December 11, 2019 04:01
Show Gist options
  • Star 0 You must be signed in to star a gist
  • Fork 0 You must be signed in to fork a gist
  • Save tonyfast/3b66729ac43e6e47ea38e882091b7b71 to your computer and use it in GitHub Desktop.
Save tonyfast/3b66729ac43e6e47ea38e882091b7b71 to your computer and use it in GitHub Desktop.
Display the source blob
Display the rendered blob
Raw
Sorry, something went wrong. Reload?
Sorry, we cannot display this file.
Sorry, this file is invalid so it cannot be displayed.
Display the source blob
Display the rendered blob
Raw
Sorry, something went wrong. Reload?
Sorry, we cannot display this file.
Sorry, this file is invalid so it cannot be displayed.
Display the source blob
Display the rendered blob
Raw
Sorry, something went wrong. Reload?
Sorry, we cannot display this file.
Sorry, this file is invalid so it cannot be displayed.
Display the source blob
Display the rendered blob
Raw
Sorry, something went wrong. Reload?
Sorry, we cannot display this file.
Sorry, this file is invalid so it cannot be displayed.
Display the source blob
Display the rendered blob
Raw
Sorry, something went wrong. Reload?
Sorry, we cannot display this file.
Sorry, this file is invalid so it cannot be displayed.
Display the source blob
Display the rendered blob
Raw
{
"cells": [
{
"cell_type": "code",
"execution_count": 1,
"metadata": {},
"outputs": [
{
"name": "stderr",
"output_type": "stream",
"text": [
"/Users/tonyfast/xxxxx/tonyfast/poser/2019-11-22-poser_recipes.ipynb:12: DeprecationWarning: invalid escape sequence \\c\n",
" '''\n",
"/Users/tonyfast/anaconda3/lib/python3.7/site-packages/IPython/core/interactiveshell.py:473: ProvisionalWarning: `enable_html_pager` is provisional since IPython 5.0 and might change in future versions.\n",
" warn(\"`enable_html_pager` is provisional since IPython 5.0 and might change in future versions.\", ProvisionalWarning)\n"
]
},
{
"data": {
"text/markdown": [
" if __name__ == '__main__': from pidgin_programming import *"
],
"text/plain": [
" if __name__ == '__main__': from pidgin_programming import *"
]
},
"metadata": {},
"output_type": "display_data"
}
],
"source": [
" if __name__ == '__main__': from pidgin_programming import *"
]
},
{
"cell_type": "code",
"execution_count": 2,
"metadata": {},
"outputs": [
{
"data": {
"text/markdown": [
" import re, textwrap, tokenize, io, itertools, IPython; from toolz.curried import *\n",
" __all__ = 'parse',"
],
"text/plain": [
" import re, textwrap, tokenize, io, itertools, IPython; from toolz.curried import *\n",
" __all__ = 'parse',"
]
},
"metadata": {},
"output_type": "display_data"
}
],
"source": [
" import re, textwrap, tokenize, io, itertools, IPython; from toolz.curried import *\n",
" __all__ = 'parse',"
]
},
{
"cell_type": "code",
"execution_count": 3,
"metadata": {},
"outputs": [
{
"data": {
"text/markdown": [
" class Lexer(__import__('mistune').BlockLexer):\n",
" def parse(self, text, rules=None):\n",
" text = ''.join(x if x.strip() else \"\\n\" for x in text.splitlines(True))\n",
" rules = rules or self.default_rules\n",
" def manipulate(text):\n",
" for key in rules:\n",
" m = getattr(self.rules, key).match(text)\n",
" if m: getattr(self, 'parse_%s' % key)(m); return m\n",
" return False \n",
" while text:\n",
" m = manipulate(text)\n",
" if m: text = text[len(m.group(0)):]\n",
" if not m and text: raise RuntimeError('Infinite loop at: %s' % text)\n",
" if self.tokens: self.tokens[-1]['match'] = m\n",
" return self.tokens"
],
"text/plain": [
" class Lexer(__import__('mistune').BlockLexer):\n",
" def parse(self, text, rules=None):\n",
" text = ''.join(x if x.strip() else \"\\n\" for x in text.splitlines(True))\n",
" rules = rules or self.default_rules\n",
" def manipulate(text):\n",
" for key in rules:\n",
" m = getattr(self.rules, key).match(text)\n",
" if m: getattr(self, 'parse_%s' % key)(m); return m\n",
" return False \n",
" while text:\n",
" m = manipulate(text)\n",
" if m: text = text[len(m.group(0)):]\n",
" if not m and text: raise RuntimeError('Infinite loop at: %s' % text)\n",
" if self.tokens: self.tokens[-1]['match'] = m\n",
" return self.tokens"
]
},
"metadata": {},
"output_type": "display_data"
}
],
"source": [
" class Lexer(__import__('mistune').BlockLexer):\n",
" def parse(self, text, rules=None):\n",
" text = ''.join(x if x.strip() else \"\\n\" for x in text.splitlines(True))\n",
" rules = rules or self.default_rules\n",
" def manipulate(text):\n",
" for key in rules:\n",
" m = getattr(self.rules, key).match(text)\n",
" if m: getattr(self, 'parse_%s' % key)(m); return m\n",
" return False \n",
" while text:\n",
" m = manipulate(text)\n",
" if m: text = text[len(m.group(0)):]\n",
" if not m and text: raise RuntimeError('Infinite loop at: %s' % text)\n",
" if self.tokens: self.tokens[-1]['match'] = m\n",
" return self.tokens"
]
},
{
"cell_type": "code",
"execution_count": 4,
"metadata": {},
"outputs": [
{
"data": {
"text/markdown": [
" def quote(str, prior=\"\", tick='\"\"\"', semicolon=''):\n",
" \"\"\"wrap a block of text in quotes. \"\"\"\n",
" if not str.strip(): return str\n",
" indent, outdent = len(str)-len(str.lstrip()), len(str.rstrip())\n",
" if tick in str or str.endswith(tick[0]): tick = '\"\"\"'\n",
" return str[:indent] + prior + tick + str[indent:outdent] + tick + semicolon + str[outdent:]"
],
"text/plain": [
" def quote(str, prior=\"\", tick='\"\"\"', semicolon=''):\n",
" \"\"\"wrap a block of text in quotes. \"\"\"\n",
" if not str.strip(): return str\n",
" indent, outdent = len(str)-len(str.lstrip()), len(str.rstrip())\n",
" if tick in str or str.endswith(tick[0]): tick = '\"\"\"'\n",
" return str[:indent] + prior + tick + str[indent:outdent] + tick + semicolon + str[outdent:]"
]
},
"metadata": {},
"output_type": "display_data"
}
],
"source": [
" def quote(str, prior=\"\", tick='\"\"\"', semicolon=''):\n",
" \"\"\"wrap a block of text in quotes. \"\"\"\n",
" if not str.strip(): return str\n",
" indent, outdent = len(str)-len(str.lstrip()), len(str.rstrip())\n",
" if tick in str or str.endswith(tick[0]): tick = '\"\"\"'\n",
" return str[:indent] + prior + tick + str[indent:outdent] + tick + semicolon + str[outdent:]"
]
},
{
"cell_type": "markdown",
"metadata": {},
"source": [
"`parse` markdown code into valid python code."
]
},
{
"cell_type": "code",
"execution_count": 5,
"metadata": {},
"outputs": [
{
"data": {
"text/markdown": [
" class Parser:\n",
" def parse_code(self, token, *, indent=-1):\n",
" code = token['match'].group().rstrip(); stripped = code.strip()\n",
" if stripped.startswith(('>>>',)) or (not stripped and 'match' in token): return # don't do anything for blank code.\n",
" if code.startswith(('```',)): \n",
" code = ''.join(['\\n'] + code.rstrip('`').splitlines(True)[1:])\n",
" if textwrap.dedent(code) == code: \n",
" code = textwrap.indent(code, ' '*max(indent + bool(block_level)*2, indent))\n",
" return code\n",
" \n",
" def indent_text(self, body, text, code=\"\", *, semicolon='', indent=-1, block_level=0):\n",
" try: tokenized = list(tokenize.tokenize(io.BytesIO(textwrap.dedent(body).encode('utf-8')).readline)) if body else []\n",
" except tokenize.TokenError as exception:\n",
" if exception.args[0] == 'EOF in multi-line string': \n",
" text = textwrap.indent(text, ' '*indent)\n",
" elif body.strip() and body.rstrip()[-1] in '[{(':\n",
" text = textwrap.indent(quote(text), ' '*indent)\n",
" else:\n",
" text = (text.strip() and quote or (lambda *x:x[0]))(text, ' '*indent)\n",
" else: \n",
" while tokenized and not tokenized[-1].string: tokenized.pop()\n",
" this_indent, line = indent, tokenized[-1].line if tokenized else \"\"\n",
" this_indent += len(line) - len(line.lstrip())\n",
" if body.rstrip().endswith(':'): \n",
" for last in code.splitlines() or ['']:\n",
" if last.strip(): break\n",
" this_indent += max(len(last)-len(last.lstrip())-this_indent + block_level*2, indent)\n",
" text = quote(textwrap.indent(text, ' '*this_indent), semicolon=semicolon) \n",
" if code.lstrip().startswith('.'): print(code)\n",
" return body + text + code\n",
" \n",
" def parse(self, object, *, formatted=\"\", indent=-1, block_level=0):\n",
" tokens, attrs = Lexer()(object), set(dir(self))\n",
" while tokens:\n",
" token = tokens.pop(0)\n",
" if token['type'] == 'list_start': block_level += 1\n",
" if token['type'] == 'list_end': block_level -= 1\n",
" if F\"parse_{token['type']}\" in attrs: \n",
" code = getattr(self, F\"parse_{token['type']}\")(token, indent=indent)\n",
" if code is None: continue\n",
" indent = max(len(code) - len(code.lstrip()), 4)\n",
" text, object = re.split(r'\\s*'.join(re.escape(token['match'].group().rstrip()).splitlines(True)), object)\n",
" formatted = self.indent_text(formatted, text, code, indent=indent, block_level=block_level)\n",
" return self.indent_text(formatted, object, indent=indent, semicolon=';')\n",
" \n",
" "
],
"text/plain": [
" class Parser:\n",
" def parse_code(self, token, *, indent=-1):\n",
" code = token['match'].group().rstrip(); stripped = code.strip()\n",
" if stripped.startswith(('>>>',)) or (not stripped and 'match' in token): return # don't do anything for blank code.\n",
" if code.startswith(('```',)): \n",
" code = ''.join(['\\n'] + code.rstrip('`').splitlines(True)[1:])\n",
" if textwrap.dedent(code) == code: \n",
" code = textwrap.indent(code, ' '*max(indent + bool(block_level)*2, indent))\n",
" return code\n",
" \n",
" def indent_text(self, body, text, code=\"\", *, semicolon='', indent=-1, block_level=0):\n",
" try: tokenized = list(tokenize.tokenize(io.BytesIO(textwrap.dedent(body).encode('utf-8')).readline)) if body else []\n",
" except tokenize.TokenError as exception:\n",
" if exception.args[0] == 'EOF in multi-line string': \n",
" text = textwrap.indent(text, ' '*indent)\n",
" elif body.strip() and body.rstrip()[-1] in '[{(':\n",
" text = textwrap.indent(quote(text), ' '*indent)\n",
" else:\n",
" text = (text.strip() and quote or (lambda *x:x[0]))(text, ' '*indent)\n",
" else: \n",
" while tokenized and not tokenized[-1].string: tokenized.pop()\n",
" this_indent, line = indent, tokenized[-1].line if tokenized else \"\"\n",
" this_indent += len(line) - len(line.lstrip())\n",
" if body.rstrip().endswith(':'): \n",
" for last in code.splitlines() or ['']:\n",
" if last.strip(): break\n",
" this_indent += max(len(last)-len(last.lstrip())-this_indent + block_level*2, indent)\n",
" text = quote(textwrap.indent(text, ' '*this_indent), semicolon=semicolon) \n",
" if code.lstrip().startswith('.'): print(code)\n",
" return body + text + code\n",
" \n",
" def parse(self, object, *, formatted=\"\", indent=-1, block_level=0):\n",
" tokens, attrs = Lexer()(object), set(dir(self))\n",
" while tokens:\n",
" token = tokens.pop(0)\n",
" if token['type'] == 'list_start': block_level += 1\n",
" if token['type'] == 'list_end': block_level -= 1\n",
" if F\"parse_{token['type']}\" in attrs: \n",
" code = getattr(self, F\"parse_{token['type']}\")(token, indent=indent)\n",
" if code is None: continue\n",
" indent = max(len(code) - len(code.lstrip()), 4)\n",
" text, object = re.split(r'\\s*'.join(re.escape(token['match'].group().rstrip()).splitlines(True)), object)\n",
" formatted = self.indent_text(formatted, text, code, indent=indent, block_level=block_level)\n",
" return self.indent_text(formatted, object, indent=indent, semicolon=';')\n",
" \n",
" "
]
},
"metadata": {},
"output_type": "display_data"
}
],
"source": [
" class Parser:\n",
" def parse_code(self, token, *, indent=-1):\n",
" code = token['match'].group().rstrip(); stripped = code.strip()\n",
" if stripped.startswith(('>>>',)) or (not stripped and 'match' in token): return # don't do anything for blank code.\n",
" if code.startswith(('```',)): \n",
" code = ''.join(['\\n'] + code.rstrip('`').splitlines(True)[1:])\n",
" if textwrap.dedent(code) == code: \n",
" code = textwrap.indent(code, ' '*max(indent + bool(block_level)*2, indent))\n",
" return code\n",
" \n",
" def indent_text(self, body, text, code=\"\", *, semicolon='', indent=-1, block_level=0):\n",
" try: tokenized = list(tokenize.tokenize(io.BytesIO(textwrap.dedent(body).encode('utf-8')).readline)) if body else []\n",
" except tokenize.TokenError as exception:\n",
" if exception.args[0] == 'EOF in multi-line string': \n",
" text = textwrap.indent(text, ' '*indent)\n",
" elif body.strip() and body.rstrip()[-1] in '[{(':\n",
" text = textwrap.indent(quote(text), ' '*indent)\n",
" else:\n",
" text = (text.strip() and quote or (lambda *x:x[0]))(text, ' '*indent)\n",
" else: \n",
" while tokenized and not tokenized[-1].string: tokenized.pop()\n",
" this_indent, line = indent, tokenized[-1].line if tokenized else \"\"\n",
" this_indent += len(line) - len(line.lstrip())\n",
" if body.rstrip().endswith(':'): \n",
" for last in code.splitlines() or ['']:\n",
" if last.strip(): break\n",
" this_indent += max(len(last)-len(last.lstrip())-this_indent + block_level*2, indent)\n",
" text = quote(textwrap.indent(text, ' '*this_indent), semicolon=semicolon) \n",
" if code.lstrip().startswith('.'): print(code)\n",
" return body + text + code\n",
" \n",
" def parse(self, object, *, formatted=\"\", indent=-1, block_level=0):\n",
" tokens, attrs = Lexer()(object), set(dir(self))\n",
" while tokens:\n",
" token = tokens.pop(0)\n",
" if token['type'] == 'list_start': block_level += 1\n",
" if token['type'] == 'list_end': block_level -= 1\n",
" if F\"parse_{token['type']}\" in attrs: \n",
" code = getattr(self, F\"parse_{token['type']}\")(token, indent=indent)\n",
" if code is None: continue\n",
" indent = max(len(code) - len(code.lstrip()), 4)\n",
" text, object = re.split(r'\\s*'.join(re.escape(token['match'].group().rstrip()).splitlines(True)), object)\n",
" formatted = self.indent_text(formatted, text, code, indent=indent, block_level=block_level)\n",
" return self.indent_text(formatted, object, indent=indent, semicolon=';')\n",
" \n",
" "
]
},
{
"cell_type": "markdown",
"metadata": {},
"source": [
"Another notebook defines how the output should be displayed.\n",
"\n",
"Create `IPython` extensions that can be reused."
]
},
{
"cell_type": "code",
"execution_count": 6,
"metadata": {},
"outputs": [
{
"data": {
"text/markdown": [
" parser = Parser()\n",
" def cleanup_transform(x): \n",
" global parser\n",
" return textwrap.dedent(parser.parse(''.join(x))).splitlines(True)\n",
"\n",
" def unload_ipython_extension(shell):\n",
" globals()['_transforms'] = globals().get('_transforms', shell.input_transformer_manager.cleanup_transforms)\n",
" global _transforms\n",
" shell.input_transformer_manager.cleanup_transforms = _transforms\n",
" def load_ipython_extension(shell):\n",
" unload_ipython_extension(shell)\n",
" shell.input_transformer_manager.cleanup_transforms = [cleanup_transform]\n",
"\n",
" #__name__ == '__main__' and load_ipython_extension(get_ipython())"
],
"text/plain": [
" parser = Parser()\n",
" def cleanup_transform(x): \n",
" global parser\n",
" return textwrap.dedent(parser.parse(''.join(x))).splitlines(True)\n",
"\n",
" def unload_ipython_extension(shell):\n",
" globals()['_transforms'] = globals().get('_transforms', shell.input_transformer_manager.cleanup_transforms)\n",
" global _transforms\n",
" shell.input_transformer_manager.cleanup_transforms = _transforms\n",
" def load_ipython_extension(shell):\n",
" unload_ipython_extension(shell)\n",
" shell.input_transformer_manager.cleanup_transforms = [cleanup_transform]\n",
"\n",
" #__name__ == '__main__' and load_ipython_extension(get_ipython())"
]
},
"metadata": {},
"output_type": "display_data"
}
],
"source": [
" parser = Parser()\n",
" def cleanup_transform(x): \n",
" global parser\n",
" return textwrap.dedent(parser.parse(''.join(x))).splitlines(True)\n",
"\n",
" def unload_ipython_extension(shell):\n",
" globals()['_transforms'] = globals().get('_transforms', shell.input_transformer_manager.cleanup_transforms)\n",
" global _transforms\n",
" shell.input_transformer_manager.cleanup_transforms = _transforms\n",
" def load_ipython_extension(shell):\n",
" unload_ipython_extension(shell)\n",
" shell.input_transformer_manager.cleanup_transforms = [cleanup_transform]\n",
"\n",
" #__name__ == '__main__' and load_ipython_extension(get_ipython())"
]
}
],
"metadata": {
"kernelspec": {
"display_name": "Python 3",
"language": "python",
"name": "python3"
},
"language_info": {
"codemirror_mode": {
"name": "ipython",
"version": 3
},
"file_extension": ".py",
"mimetype": "text/x-python",
"name": "python",
"nbconvert_exporter": "python",
"pygments_lexer": "ipython3",
"version": "3.7.3"
}
},
"nbformat": 4,
"nbformat_minor": 4
}
Display the source blob
Display the rendered blob
Raw
Sorry, something went wrong. Reload?
Sorry, we cannot display this file.
Sorry, this file is invalid so it cannot be displayed.
Display the source blob
Display the rendered blob
Raw
Sorry, something went wrong. Reload?
Sorry, we cannot display this file.
Sorry, this file is invalid so it cannot be displayed.
Display the source blob
Display the rendered blob
Raw
Sorry, something went wrong. Reload?
Sorry, we cannot display this file.
Sorry, this file is invalid so it cannot be displayed.
Display the source blob
Display the rendered blob
Raw
Sorry, something went wrong. Reload?
Sorry, we cannot display this file.
Sorry, this file is invalid so it cannot be displayed.
Display the source blob
Display the rendered blob
Raw
Sorry, something went wrong. Reload?
Sorry, we cannot display this file.
Sorry, this file is invalid so it cannot be displayed.
with __import__('importnb').Notebook():
try: from . import __doctest_post_run_cell, __interactive_markdown_cells, __string_expression_transformer, __interactive_jinja_templates, __xonsh_compiler, __emojis__, __return_yield_display, __pidgin_loader, __functional_programming_toolz, __default_markdown_display_for_strings
except: import __doctest_post_run_cell, __interactive_markdown_cells, __string_expression_transformer, __interactive_jinja_templates, __xonsh_compiler, __emojis__, __return_yield_display, __pidgin_loader, __functional_programming_toolz, __default_markdown_display_for_strings
PidginLoader = __pidgin_loader.PidginLoader
def unload_ipython_extension(shell): [x.unload_ipython_extension(shell) for x in (__doctest_post_run_cell, __interactive_markdown_cells, __string_expression_transformer, __interactive_jinja_templates, __xonsh_compiler, __emojis__, __return_yield_display, __default_markdown_display_for_strings)]
def load_ipython_extension(shell): [x.load_ipython_extension(shell) for x in (__doctest_post_run_cell, __interactive_markdown_cells, __string_expression_transformer, __interactive_jinja_templates, __xonsh_compiler, __emojis__, __return_yield_display, __default_markdown_display_for_strings)]
import schematypes
from tonyfast.poser import *
shell = get_ipython()
shell.run_cell("\n %reload_ext tonyfast.literacy")
shell.enable_html_pager = True
Display the source blob
Display the rendered blob
Raw
Sorry, something went wrong. Reload?
Sorry, we cannot display this file.
Sorry, this file is invalid so it cannot be displayed.
toolz
notebook
importnb
htmlmin
nbconvert
IPython
xonsh
graphviz
emoji
stringcase
@tonyfast
Copy link
Author

Screen Shot 2019-10-11 at 9 47 55 AM

Sign up for free to join this conversation on GitHub. Already have an account? Sign in to comment