Let's close the ultra-small library cycle with some awesome array-based templating. 323 bytes minified.
Just download the minified version here or include it into your code:
# Copyright (c) 2009-2010 Denis Bilenko. See LICENSE for details. | |
"""UDP/SSL server""" | |
import sys | |
import errno | |
import traceback | |
from gevent import socket | |
from gevent import core | |
from gevent.baseserver import BaseServer | |
#!/usr/bin/env python | |
from __future__ import print_function | |
from keras.models import Sequential | |
from keras.layers import TimeDistributed | |
from keras.layers.core import Dense, Activation, Dropout, RepeatVector, TimeDistributedDense | |
from keras.layers.recurrent import LSTM | |
from keras.utils.data_utils import get_file | |
import numpy as np | |
import random,string | |
import sys |
Let's close the ultra-small library cycle with some awesome array-based templating. 323 bytes minified.
Just download the minified version here or include it into your code:
''' | |
matplotlib must be developer release for voxel support | |
install instructions: | |
https://matplotlib.org/devdocs/users/installing.html | |
''' | |
import matplotlib.pyplot as plt | |
import numpy as np | |
from mpl_toolkits.mplot3d import Axes3D | |
import matplotlib.animation as manimation |
#Evolution Strategies with Keras | |
#Based off of: https://blog.openai.com/evolution-strategies/ | |
#Implementation by: Nicholas Samoray | |
#README | |
#Meant to be run on a single machine | |
#APPLY_BIAS is currently not working, keep to False | |
#Solves Cartpole as-is in about 50 episodes | |
#Solves BipedalWalker-v2 in about 1000 |
#!/usr/bin/env python | |
""" | |
Some Hessian codes | |
""" | |
import numpy as np | |
from scipy.optimize import approx_fprime | |
def hessian ( x0, epsilon=1.e-5, linear_approx=False, *args ): | |
""" |
//////////////////////////////////////////////////////////////// | |
// | |
// Defer statement | |
// - Akin to D's SCOPE_EXIT or similar to Go's defer but scope-based | |
// | |
//////////////////////////////////////////////////////////////// | |
#if defined(__cplusplus) | |
extern "C++" { | |
// NOTE(bill): Stupid fucking templates | |
template <typename T> struct gbRemove_Reference { typedef T Type; }; |
from socket import socket, SO_REUSEADDR, SOL_SOCKET | |
from asyncio import Task, coroutine, get_event_loop | |
class Peer(object): | |
def __init__(self, server, sock, name): | |
self.loop = server.loop | |
self.name = name | |
self._sock = sock | |
self._server = server | |
Task(self._peer_handler()) |
As of version 3.3, python includes the very promising concurrent.futures
module, with elegant context managers for running tasks concurrently. Thanks to the simple and consistent interface you can use both threads and processes with minimal effort.
For most CPU bound tasks - anything that is heavy number crunching - you want your program to use all the CPUs in your PC. The simplest way to get a CPU bound task to run in parallel is to use the ProcessPoolExecutor, which will create enough sub-processes to keep all your CPUs busy.
We use the context manager thusly:
with concurrent.futures.ProcessPoolExecutor() as executor:
The following gist is an extract of the article Flask-SQLAlchemy Caching. It allows automated simple cache query and invalidation of cache relations through event among other features.
# pulling one User object
user = User.query.get(1)