Attention: the list was moved to
https://github.com/dypsilon/frontend-dev-bookmarks
This page is not maintained anymore, please update your bookmarks.
Attention: the list was moved to
https://github.com/dypsilon/frontend-dev-bookmarks
This page is not maintained anymore, please update your bookmarks.
| /* Update interpreter state based on supplied configuration settings | |
| * | |
| * After calling this function, most of the restrictions on the interpreter | |
| * are lifted. The only remaining incomplete settings are those related | |
| * to the main module (sys.argv[0], __main__ metadata) | |
| * | |
| * Calling this when the interpreter is not initializing, is already | |
| * initialized or without a valid current thread state is a fatal error. | |
| * Other errors should be reported as normal Python exceptions with a | |
| * non-zero return code. |
| void | |
| Py_InitializeEx(int install_sigs) | |
| { | |
| PyInterpreterState *interp; | |
| PyThreadState *tstate; | |
| PyObject *bimod, *sysmod; | |
| char *p; | |
| char *icodeset = NULL; /* On Windows, input codeset may theoretically | |
| differ from output codeset. */ | |
| char *codeset = NULL; |
| static PyObject * | |
| run_mod(mod_ty mod, const char *filename, PyObject *globals, PyObject *locals, | |
| PyCompilerFlags *flags, PyArena *arena) | |
| { | |
| PyCodeObject *co; | |
| PyObject *v; | |
| co = PyAST_Compile(mod, filename, flags, arena); | |
| if (co == NULL) | |
| return NULL; | |
| v = PyEval_EvalCode(co, globals, locals); |
| static PyObject * | |
| run_mod(mod_ty mod, PyObject *filename, PyObject *globals, PyObject *locals, | |
| PyCompilerFlags *flags, PyArena *arena) | |
| { | |
| PyCodeObject *co; | |
| PyObject *v; | |
| co = PyAST_CompileObject(mod, filename, flags, -1, arena); | |
| if (co == NULL) | |
| return NULL; | |
| v = PyEval_EvalCode((PyObject*)co, globals, locals); |
| """ | |
| Benchmark threading module. | |
| """ | |
| # FIXME : notify hangs | |
| import perf | |
| from six.moves import xrange | |
| import threading |
| """ | |
| Benchmark concurrency. | |
| """ | |
| import perf | |
| from six.moves import xrange | |
| import threading | |
| import multiprocessing | |
| def add_cmdline_args(cmd, args): |
| The above script tries to benchmark "Concurrency" implemented using threading and multiprocessing.Actually "threads" in cpython are restricted by "GIL" | |
| ,so it's not actually concurrent...On the other hand "multiprocessing" module creates whole different processes but there is substaintial cost involved in | |
| spawing a whole new process. | |
| So the there is a trade-off involved which is evident as we increase "CRUNCH_NO" variable. | |
| But the benchmark actually tries to compare the same phenomenon in py2 and py3.And py2 looks faster here. | |
| I'm adding two graphs comparing the timings for py2 and py3. |
| import perf | |
| from six.moves import xrange | |
| def add_cmdline_args(cmd, args): | |
| if args.benchmark: | |
| cmd.append(args.benchmark) | |
| CRUNCH_NO=10000000 | |
| def bench_number_crunching(loops): | |
| range_it = xrange(loops) | |
| t0 = perf.perf_counter() |
| """ | |
| Benchmark zlib module. | |
| """ | |
| import perf | |
| from six.moves import xrange | |
| import zlib | |
| import binascii |