Skip to content

Instantly share code, notes, and snippets.

@aseyboldt
Created January 12, 2017 19:44
Show Gist options
  • Save aseyboldt/777fc6e73f6444da14da03818352c96f to your computer and use it in GitHub Desktop.
Save aseyboldt/777fc6e73f6444da14da03818352c96f to your computer and use it in GitHub Desktop.
{
"cells": [
{
"cell_type": "markdown",
"metadata": {},
"source": [
"# Simplification of Sum(Elememwise)\n",
"\n",
"Compute the sum of an elementwise operation on two arrays.\n",
"Ideally, this should not store the results of the elementwise\n",
"operations, but perform the reduction on the fly (in parallel\n",
"if possible)"
]
},
{
"cell_type": "code",
"execution_count": 1,
"metadata": {
"collapsed": false
},
"outputs": [],
"source": [
"import numpy as np\n",
"import numba\n",
"import theano.tensor as tt\n",
"import theano"
]
},
{
"cell_type": "code",
"execution_count": 2,
"metadata": {
"collapsed": false
},
"outputs": [
{
"data": {
"text/plain": [
"'0.8.2.dev-RELEASE'"
]
},
"execution_count": 2,
"metadata": {},
"output_type": "execute_result"
}
],
"source": [
"theano.version.full_version"
]
},
{
"cell_type": "code",
"execution_count": 3,
"metadata": {
"collapsed": false
},
"outputs": [],
"source": [
"x = tt.vector()\n",
"y = tt.vector()\n",
"\n",
"x.tag.test_value = np.array([0])\n",
"y.tag.test_value = np.array([0])\n",
"\n",
"z = (x + y).sum()\n",
"func_tt = theano.function([x, y], z, profile=True)"
]
},
{
"cell_type": "code",
"execution_count": 4,
"metadata": {
"collapsed": false
},
"outputs": [
{
"name": "stdout",
"output_type": "stream",
"text": [
"Sum{acc_dtype=float64} [id A] '' 1\n",
" |Elemwise{add,no_inplace} [id B] '' 0\n",
" |<TensorType(float64, vector)> [id C]\n",
" |<TensorType(float64, vector)> [id D]\n"
]
}
],
"source": [
"theano.printing.debugprint(func_tt)"
]
},
{
"cell_type": "code",
"execution_count": 5,
"metadata": {
"collapsed": false
},
"outputs": [],
"source": [
"# This does what we want\n",
"@numba.jit\n",
"def func_numba(x, y):\n",
" result = 0.0\n",
" for a, b in zip(x, y):\n",
" result += a + b\n",
" return result\n",
"\n",
"# This is more like what theano does\n",
"@numba.jit\n",
"def func_numba2(x, y, out):\n",
" n = len(x)\n",
" assert n == len(y) and n == len(out)\n",
" for i in range(n):\n",
" out[i] = x[i] + y[i]\n",
" return out.sum()"
]
},
{
"cell_type": "code",
"execution_count": 6,
"metadata": {
"collapsed": true
},
"outputs": [],
"source": [
"N = 100000\n",
"a = np.random.random(N)\n",
"b = np.random.random(N)\n",
"out = np.zeros_like(a)\n",
"result = a.sum() + b.sum()"
]
},
{
"cell_type": "code",
"execution_count": 7,
"metadata": {
"collapsed": false
},
"outputs": [],
"source": [
"assert np.allclose(func_tt(a, b), result)\n",
"assert np.allclose(func_numba(a, b), result)\n",
"assert np.allclose(func_numba2(a, b, out), result)"
]
},
{
"cell_type": "code",
"execution_count": 8,
"metadata": {
"collapsed": false
},
"outputs": [
{
"name": "stdout",
"output_type": "stream",
"text": [
"10000 loops, best of 3: 180 µs per loop\n"
]
}
],
"source": [
"%timeit func_tt(a, b)"
]
},
{
"cell_type": "code",
"execution_count": 9,
"metadata": {
"collapsed": false
},
"outputs": [
{
"name": "stdout",
"output_type": "stream",
"text": [
"10000 loops, best of 3: 98.7 µs per loop\n"
]
}
],
"source": [
"%timeit func_numba(a, b)"
]
},
{
"cell_type": "code",
"execution_count": 10,
"metadata": {
"collapsed": false
},
"outputs": [
{
"name": "stdout",
"output_type": "stream",
"text": [
"10000 loops, best of 3: 189 µs per loop\n"
]
}
],
"source": [
"%timeit func_numba2(a, b, out)"
]
},
{
"cell_type": "code",
"execution_count": 11,
"metadata": {
"collapsed": false
},
"outputs": [
{
"name": "stdout",
"output_type": "stream",
"text": [
"10000 loops, best of 3: 88.3 µs per loop\n"
]
}
],
"source": [
"%timeit a.sum() + b.sum()"
]
},
{
"cell_type": "code",
"execution_count": 12,
"metadata": {
"collapsed": false,
"scrolled": false
},
"outputs": [
{
"name": "stderr",
"output_type": "stream",
"text": [
"Function profiling\n",
"==================\n",
" Message: <ipython-input-3-b61a3cd1fcf4>:8\n",
" Time in 41112 calls to Function.__call__: 7.027448e+00s\n",
" Time in Function.fn.__call__: 6.491591e+00s (92.375%)\n",
" Time in thunks: 6.410338e+00s (91.219%)\n",
" Total compile time: 3.813583e+00s\n",
" Number of Apply nodes: 2\n",
" Theano Optimizer time: 1.973689e-01s\n",
" Theano validate time: 0.000000e+00s\n",
" Theano Linker time (includes C, CUDA code generation/compiling): 1.068933e-01s\n",
" Import time 3.321648e-03s\n",
"\n",
"Time in all call to theano.grad() 0.000000e+00s\n",
"Time since theano import 27.517s\n",
"Class\n",
"---\n",
"<% time> <sum %> <apply time> <time per call> <type> <#call> <#apply> <Class name>\n",
" 69.8% 69.8% 4.476s 1.09e-04s C 41112 1 theano.tensor.elemwise.Sum\n",
" 30.2% 100.0% 1.934s 4.71e-05s C 41112 1 theano.tensor.elemwise.Elemwise\n",
" ... (remaining 0 Classes account for 0.00%(0.00s) of the runtime)\n",
"\n",
"Ops\n",
"---\n",
"<% time> <sum %> <apply time> <time per call> <type> <#call> <#apply> <Op name>\n",
" 69.8% 69.8% 4.476s 1.09e-04s C 41112 1 Sum{acc_dtype=float64}\n",
" 30.2% 100.0% 1.934s 4.71e-05s C 41112 1 Elemwise{add,no_inplace}\n",
" ... (remaining 0 Ops account for 0.00%(0.00s) of the runtime)\n",
"\n",
"Apply\n",
"------\n",
"<% time> <sum %> <apply time> <time per call> <#call> <id> <Apply name>\n",
" 69.8% 69.8% 4.476s 1.09e-04s 41112 1 Sum{acc_dtype=float64}(Elemwise{add,no_inplace}.0)\n",
" 30.2% 100.0% 1.934s 4.71e-05s 41112 0 Elemwise{add,no_inplace}(<TensorType(float64, vector)>, <TensorType(float64, vector)>)\n",
" ... (remaining 0 Apply instances account for 0.00%(0.00s) of the runtime)\n",
"\n",
"Here are tips to potentially make your code run faster\n",
" (if you think of new ones, suggest them on the mailing list).\n",
" Test them first, as they are not guaranteed to always provide a speedup.\n",
" - Try the Theano flag floatX=float32\n"
]
}
],
"source": [
"func_tt.profile.summary()"
]
},
{
"cell_type": "markdown",
"metadata": {},
"source": [
"# A simple example with pymc3"
]
},
{
"cell_type": "code",
"execution_count": 18,
"metadata": {
"collapsed": true
},
"outputs": [],
"source": [
"import pymc3"
]
},
{
"cell_type": "code",
"execution_count": 22,
"metadata": {
"collapsed": true
},
"outputs": [],
"source": [
"data = np.random.randn(100000)\n",
"\n",
"model = pymc3.Model()\n",
"with model:\n",
" mu = pymc3.Flat(\"mu\")\n",
" sd = pymc3.HalfCauchy(\"sd\", beta=1)\n",
" pymc3.Normal(\"measure\", mu=mu, sd=sd, observed=data)"
]
},
{
"cell_type": "code",
"execution_count": 23,
"metadata": {
"collapsed": false
},
"outputs": [
{
"name": "stderr",
"output_type": "stream",
"text": [
"100%|██████████| 5000/5000 [00:53<00:00, 93.07it/s] \n"
]
}
],
"source": [
"with model:\n",
" step = pymc3.NUTS(profile=True)\n",
" trace = pymc3.sample(5000, step=step)"
]
},
{
"cell_type": "markdown",
"metadata": {},
"source": [
"`Sum` eats 60% of the runtime an a machine with two cores. I've seen 40% in real world models.\n",
"\n",
"Less important, but easier to fix: `Sum(Alloc(x, n))` is not simplified to `Mul(n, x)`"
]
},
{
"cell_type": "code",
"execution_count": 24,
"metadata": {
"collapsed": false,
"scrolled": false
},
"outputs": [
{
"name": "stderr",
"output_type": "stream",
"text": [
"Function profiling\n",
"==================\n",
" Message: /home/adr/git/pymc3/pymc3/step_methods/nuts.py:241\n",
" Time in 30278 calls to Function.__call__: 4.438121e+01s\n",
" Time in Function.fn.__call__: 4.358158e+01s (98.198%)\n",
" Time in thunks: 4.176215e+01s (94.099%)\n",
" Total compile time: 9.875805e-01s\n",
" Number of Apply nodes: 84\n",
" Theano Optimizer time: 8.555846e-01s\n",
" Theano validate time: 1.924634e-02s\n",
" Theano Linker time (includes C, CUDA code generation/compiling): 5.489039e-02s\n",
" Import time 0.000000e+00s\n",
"\n",
"Time in all call to theano.grad() 3.258111e+00s\n",
"Time since theano import 550.632s\n",
"Class\n",
"---\n",
"<% time> <sum %> <apply time> <time per call> <type> <#call> <#apply> <Class name>\n",
" 57.9% 57.9% 24.159s 1.14e-04s C 211946 7 theano.tensor.elemwise.Sum\n",
" 33.4% 91.3% 13.955s 1.40e-05s C 999174 33 theano.tensor.elemwise.Elemwise\n",
" 6.5% 97.8% 2.718s 4.49e-05s C 60556 2 theano.tensor.basic.Alloc\n",
" 0.6% 98.4% 0.253s 4.18e-06s C 60556 2 theano.tensor.basic.Join\n",
" 0.5% 98.9% 0.201s 5.53e-07s C 363336 12 theano.tensor.basic.Reshape\n",
" 0.3% 99.2% 0.140s 5.12e-07s C 272502 9 theano.tensor.elemwise.DimShuffle\n",
" 0.2% 99.4% 0.096s 7.92e-07s C 121112 4 theano.tensor.subtensor.Subtensor\n",
" 0.2% 99.6% 0.070s 2.90e-07s C 242224 8 theano.compile.ops.Rebroadcast\n",
" 0.2% 99.7% 0.065s 2.16e-06s C 30278 1 theano.tensor.blas_c.CGemv\n",
" 0.1% 99.8% 0.029s 4.83e-07s C 60556 2 theano.tensor.basic.ScalarFromTensor\n",
" 0.1% 99.9% 0.028s 4.54e-07s C 60556 2 theano.compile.ops.ViewOp\n",
" 0.1% 99.9% 0.027s 8.84e-07s C 30278 1 theano.tensor.basic.AllocEmpty\n",
" 0.1% 100.0% 0.021s 7.02e-07s C 30278 1 theano.compile.ops.Shape_i\n",
" ... (remaining 0 Classes account for 0.00%(0.00s) of the runtime)\n",
"\n",
"Ops\n",
"---\n",
"<% time> <sum %> <apply time> <time per call> <type> <#call> <#apply> <Op name>\n",
" 57.9% 57.9% 24.159s 1.14e-04s C 211946 7 Sum{acc_dtype=float64}\n",
" 7.3% 65.2% 3.062s 5.06e-05s C 60556 2 Elemwise{sub,no_inplace}\n",
" 7.2% 72.4% 2.995s 4.95e-05s C 60556 2 Elemwise{Composite{Switch(i0, (i1 * i2), i3)}}[(0, 2)]\n",
" 6.5% 78.9% 2.718s 4.49e-05s C 60556 2 Alloc\n",
" 5.2% 84.1% 2.192s 7.24e-05s C 30278 1 Elemwise{Composite{Switch(i0, (i1 * ((-(i2 * i3)) + i4)), i5)}}\n",
" 4.4% 88.5% 1.822s 6.02e-05s C 30278 1 Elemwise{Composite{Switch(i0, (i1 * sqr(i2)), i3)}}\n",
" 4.0% 92.5% 1.665s 5.50e-05s C 30278 1 Elemwise{Composite{Switch(i0, (i1 * i2), i3)}}\n",
" 2.8% 95.3% 1.177s 3.89e-05s C 30278 1 Elemwise{Sqr}[(0, 0)]\n",
" 0.6% 95.9% 0.253s 4.18e-06s C 60556 2 Join\n",
" 0.3% 96.2% 0.140s 5.79e-07s C 242224 8 Reshape{1}\n",
" 0.3% 96.5% 0.113s 1.86e-06s C 60556 2 Elemwise{gt,no_inplace}\n",
" 0.2% 96.7% 0.096s 7.92e-07s C 121112 4 Subtensor{int64}\n",
" 0.2% 96.9% 0.095s 4.47e-07s C 211946 7 InplaceDimShuffle{x}\n",
" 0.2% 97.2% 0.089s 2.95e-06s C 30278 1 Elemwise{Composite{log((i0 * i1))}}\n",
" 0.2% 97.3% 0.075s 2.49e-06s C 30278 1 Elemwise{Composite{((-((Switch(i0, (i1 - log1p(i2)), i3) + log(i4) + i5) - (i6 * i7))) - i8)}}[(0, 2)]\n",
" 0.2% 97.5% 0.075s 1.23e-06s C 60556 2 Elemwise{Switch}\n",
" 0.2% 97.7% 0.074s 2.43e-06s C 30278 1 Elemwise{Composite{(i0 - (i1 * i2 * i3))}}[(0, 3)]\n",
" 0.2% 97.9% 0.070s 2.32e-06s C 30278 1 Elemwise{Composite{(i0 - (i1 * i2 * i3))}}[(0, 0)]\n",
" 0.2% 98.0% 0.065s 2.16e-06s C 30278 1 CGemv{inplace}\n",
" 0.2% 98.2% 0.065s 2.14e-06s C 30278 1 Elemwise{Composite{(i0 + (i1 * i2 * i3))}}\n",
" ... (remaining 22 Ops account for 1.82%(0.76s) of the runtime)\n",
"\n",
"Apply\n",
"------\n",
"<% time> <sum %> <apply time> <time per call> <#call> <id> <Apply name>\n",
" 8.5% 8.5% 3.549s 1.17e-04s 30278 69 Sum{acc_dtype=float64}(Elemwise{Composite{Switch(i0, (i1 * ((-(i2 * i3)) + i4)), i5)}}.0)\n",
" 8.3% 16.8% 3.480s 1.15e-04s 30278 26 Sum{acc_dtype=float64}(Elemwise{Composite{Switch(i0, (i1 * sqr(i2)), i3)}}.0)\n",
" 8.3% 25.1% 3.455s 1.14e-04s 30278 62 Sum{acc_dtype=float64}(Elemwise{Composite{Switch(i0, (i1 * i2), i3)}}.0)\n",
" 8.2% 33.3% 3.441s 1.14e-04s 30278 71 Sum{acc_dtype=float64}(Elemwise{Composite{Switch(i0, (i1 * i2), i3)}}[(0, 2)].0)\n",
" 8.2% 41.6% 3.433s 1.13e-04s 30278 29 Sum{acc_dtype=float64}(Elemwise{Composite{Switch(i0, (i1 * i2), i3)}}[(0, 2)].0)\n",
" 8.2% 49.7% 3.410s 1.13e-04s 30278 30 Sum{acc_dtype=float64}(Alloc.0)\n",
" 8.1% 57.9% 3.391s 1.12e-04s 30278 68 Sum{acc_dtype=float64}(Alloc.0)\n",
" 5.2% 63.1% 2.192s 7.24e-05s 30278 64 Elemwise{Composite{Switch(i0, (i1 * ((-(i2 * i3)) + i4)), i5)}}(Elemwise{gt,no_inplace}.0, TensorConstant{(1,) of 0.5}, InplaceDimShuffle{x}.0, Elemwise{Sqr}[(0, 0)].0, Elemwise{Composite{log((i0 * i1))}}.0, TensorConstant{(1,) of -inf})\n",
" 4.4% 67.5% 1.822s 6.02e-05s 30278 22 Elemwise{Composite{Switch(i0, (i1 * sqr(i2)), i3)}}(Elemwise{gt,no_inplace}.0, TensorConstant{(1,) of 0.5}, Elemwise{sub,no_inplace}.0, TensorConstant{(1,) of 0})\n",
" 4.0% 71.5% 1.666s 5.50e-05s 30278 15 Elemwise{sub,no_inplace}(TensorConstant{[-0.414223...10634007]}, InplaceDimShuffle{x}.0)\n",
" 4.0% 75.4% 1.665s 5.50e-05s 30278 58 Elemwise{Composite{Switch(i0, (i1 * i2), i3)}}(Elemwise{gt,no_inplace}.0, InplaceDimShuffle{x}.0, Elemwise{sub,no_inplace}.0, TensorConstant{(1,) of 0})\n",
" 3.6% 79.0% 1.506s 4.97e-05s 30278 25 Elemwise{Composite{Switch(i0, (i1 * i2), i3)}}[(0, 2)](Elemwise{gt,no_inplace}.0, InplaceDimShuffle{x}.0, Elemwise{sub,no_inplace}.0, TensorConstant{(1,) of 0})\n",
" 3.6% 82.6% 1.489s 4.92e-05s 30278 66 Elemwise{Composite{Switch(i0, (i1 * i2), i3)}}[(0, 2)](Elemwise{gt,no_inplace}.0, TensorConstant{(1,) of 0.5}, Elemwise{Sqr}[(0, 0)].0, TensorConstant{(1,) of 0})\n",
" 3.4% 86.0% 1.411s 4.66e-05s 30278 27 Alloc(Elemwise{Switch}.0, TensorConstant{100000})\n",
" 3.3% 89.3% 1.396s 4.61e-05s 30278 49 Elemwise{sub,no_inplace}(TensorConstant{[-0.414223...10634007]}, InplaceDimShuffle{x}.0)\n",
" 3.1% 92.5% 1.307s 4.32e-05s 30278 63 Alloc(Elemwise{Switch}.0, TensorConstant{100000})\n",
" 2.8% 95.3% 1.177s 3.89e-05s 30278 60 Elemwise{Sqr}[(0, 0)](Elemwise{sub,no_inplace}.0)\n",
" 0.3% 95.6% 0.128s 4.22e-06s 30278 77 Join(TensorConstant{0}, Rebroadcast{1}.0, Rebroadcast{1}.0)\n",
" 0.3% 95.9% 0.125s 4.13e-06s 30278 37 Join(TensorConstant{0}, Rebroadcast{1}.0, Rebroadcast{1}.0)\n",
" 0.2% 96.1% 0.089s 2.95e-06s 30278 57 Elemwise{Composite{log((i0 * i1))}}(TensorConstant{(1,) of 0...9154943092}, InplaceDimShuffle{x}.0)\n",
" ... (remaining 64 Apply instances account for 3.90%(1.63s) of the runtime)\n",
"\n",
"Here are tips to potentially make your code run faster\n",
" (if you think of new ones, suggest them on the mailing list).\n",
" Test them first, as they are not guaranteed to always provide a speedup.\n",
" - Try the Theano flag floatX=float32\n",
" - Try installing amdlibm and set the Theano flag lib.amdlibm=True. This speeds up only some Elemwise operation.\n"
]
}
],
"source": [
"step.leapfrog1_dE.profile.summary()"
]
},
{
"cell_type": "markdown",
"metadata": {},
"source": [
"Minimal example for `Sum(Alloc)`"
]
},
{
"cell_type": "code",
"execution_count": 13,
"metadata": {
"collapsed": false
},
"outputs": [],
"source": [
"n = tt.iscalar()\n",
"x = tt.scalar()\n",
"\n",
"n.tag.test_value = np.array(1, dtype='int32')\n",
"x.tag.test_value = np.array(0)\n",
"\n",
"y = tt.alloc(x, n)\n",
"\n",
"# Compute n * x in a roundabout way\n",
"z = y.sum()\n",
"\n",
"f = theano.function([n, x], z, profile=True)"
]
},
{
"cell_type": "code",
"execution_count": 14,
"metadata": {
"collapsed": false
},
"outputs": [
{
"name": "stdout",
"output_type": "stream",
"text": [
"Sum{acc_dtype=float64} [id A] '' 1\n",
" |Alloc [id B] '' 0\n",
" |<TensorType(float64, scalar)> [id C]\n",
" |<TensorType(int32, scalar)> [id D]\n"
]
}
],
"source": [
"theano.printing.debugprint(f)"
]
},
{
"cell_type": "code",
"execution_count": 15,
"metadata": {
"collapsed": true
},
"outputs": [],
"source": [
"assert f(100, 5) == 500"
]
},
{
"cell_type": "code",
"execution_count": 16,
"metadata": {
"collapsed": false
},
"outputs": [
{
"name": "stdout",
"output_type": "stream",
"text": [
"1000 loops, best of 3: 1.8 ms per loop\n"
]
}
],
"source": [
"%timeit f(1000000, 5)"
]
},
{
"cell_type": "code",
"execution_count": 17,
"metadata": {
"collapsed": false
},
"outputs": [
{
"name": "stderr",
"output_type": "stream",
"text": [
"Function profiling\n",
"==================\n",
" Message: <ipython-input-13-fa535a4b87cb>:12\n",
" Time in 4112 calls to Function.__call__: 7.599000e+00s\n",
" Time in Function.fn.__call__: 7.116978e+00s (93.657%)\n",
" Time in thunks: 7.069806e+00s (93.036%)\n",
" Total compile time: 5.121684e-02s\n",
" Number of Apply nodes: 2\n",
" Theano Optimizer time: 6.103516e-03s\n",
" Theano validate time: 0.000000e+00s\n",
" Theano Linker time (includes C, CUDA code generation/compiling): 1.172066e-03s\n",
" Import time 0.000000e+00s\n",
"\n",
"Time in all call to theano.grad() 0.000000e+00s\n",
"Time since theano import 35.332s\n",
"Class\n",
"---\n",
"<% time> <sum %> <apply time> <time per call> <type> <#call> <#apply> <Class name>\n",
" 62.1% 62.1% 4.391s 1.07e-03s C 4112 1 theano.tensor.elemwise.Sum\n",
" 37.9% 100.0% 2.679s 6.51e-04s C 4112 1 theano.tensor.basic.Alloc\n",
" ... (remaining 0 Classes account for 0.00%(0.00s) of the runtime)\n",
"\n",
"Ops\n",
"---\n",
"<% time> <sum %> <apply time> <time per call> <type> <#call> <#apply> <Op name>\n",
" 62.1% 62.1% 4.391s 1.07e-03s C 4112 1 Sum{acc_dtype=float64}\n",
" 37.9% 100.0% 2.679s 6.51e-04s C 4112 1 Alloc\n",
" ... (remaining 0 Ops account for 0.00%(0.00s) of the runtime)\n",
"\n",
"Apply\n",
"------\n",
"<% time> <sum %> <apply time> <time per call> <#call> <id> <Apply name>\n",
" 62.1% 62.1% 4.391s 1.07e-03s 4112 1 Sum{acc_dtype=float64}(Alloc.0)\n",
" 37.9% 100.0% 2.679s 6.51e-04s 4112 0 Alloc(<TensorType(float64, scalar)>, <TensorType(int32, scalar)>)\n",
" ... (remaining 0 Apply instances account for 0.00%(0.00s) of the runtime)\n",
"\n",
"Here are tips to potentially make your code run faster\n",
" (if you think of new ones, suggest them on the mailing list).\n",
" Test them first, as they are not guaranteed to always provide a speedup.\n",
" - Try the Theano flag floatX=float32\n"
]
}
],
"source": [
"f.profile.summary()"
]
}
],
"metadata": {
"kernelspec": {
"display_name": "Python 3",
"language": "python",
"name": "python3"
},
"language_info": {
"codemirror_mode": {
"name": "ipython",
"version": 3
},
"file_extension": ".py",
"mimetype": "text/x-python",
"name": "python",
"nbconvert_exporter": "python",
"pygments_lexer": "ipython3",
"version": "3.5.2"
}
},
"nbformat": 4,
"nbformat_minor": 2
}
Sign up for free to join this conversation on GitHub. Already have an account? Sign in to comment