Skip to content

Instantly share code, notes, and snippets.

# Christian Jauvin <cjauvin@gmail.com>
# http://cjauvin.github.com
#
# Solutions for:
# https://www.readyforzero.com/challenge
# ajobwelldone+b26e9@readyforzero.com
# Problem 1
s = "en3pG3+nz+A2acXKrsyDouhViP9EDQS4JQK6uJqM3rBjKEBKC3yc=AA1=LUQqRPHvQ4dopgkbb/axClP3smzVcaTkRsCqHSG5aKFQ2TbOae0t5r4nWrCVesGK1Z3yEq+dClrDwXXOiAMyW09WdCS+CaKcfu=6kv9dUFBcS4KsUIgwMiXimoBpJZSWlBzILVf4zVA=7GjRP8RXn6uKjbjAPkpFEs/mYJpeOpEnhQfPhjoscgjfL5/SQsU6+jaAf5pg9MQzZdQAEJt7Jm1541fEmnumpjmJMd/MTJ5vzBttBBA7b5rbjDX0nHdTWn8C5suYKfNyYzc6x8S6FIepoEBsMS2mKhx5BRH5jSBrYRem4iQgYARzGnCFot3jPhp3cHj7qjXBWfZZASz8YJqi2d+r393AmdGm1L9NfU2f=FJprbLwJpuE7uT7xAlQA3Ry8aHRNgNkffP29Iqb2DSoQ0PK+9LX0t37HIAhI5zvoP6b4J7yQZEQDgeNlnPQMvSjw9pLWAxQ1VUY+NMU5BLZ2Bxuma1cIsHxcx4PwHcg0u1HYPJAWM2WK=xhJP5aQSc6oNMQK4s2=6guQRVFll6rvWkXTebrdsws7m/Kpa29spUFl8XzFx0ondEMCF3byAyWj875wAI3Hn8ZY92ddTAKj0s+a4X7qSti2lA0GzePHjBjMCD5g9kZYLtB94kkbVZ6eCle/xtto4LHH8GElc5YoUi=mk3nmQ5iOL1zMWfDyRUMLq+HCXbQL9NTejNa/yTdL3sayJOlMW1T7/Jmaz1FMbfBRFzruHeMT41=Zu3nYZJ3nIP22qKrFzNkt/24RuQ+7IMVCI2
import psycopg2, psycopg2.extras
import little_pger as db
from flask import *
application = Flask('autocomplete-tribute')
@application.route('/autocomplete', methods=['GET'])
def autocomplete():
conn = psycopg2.connect("dbname=autocomplete-tribute user=christian",
connection_factory=psycopg2.extras.RealDictConnection)
import time
from math import sqrt
from joblib import Parallel, delayed
# Results obtained on my dual code Thinkpad laptop by using this modification:
# https://github.com/cjauvin/joblib/compare/parallel_job_batch
start = time.time()
Parallel(n_jobs=1)(delayed(sqrt)(i**2) for i in xrange(50000))
print time.time() - start # 4.2 secs
# Simulator for the simple Boltzmann machine of Coursera NN Lecture 11e
# Christian Jauvin - cjauvin@gmail.com
from collections import defaultdict
import numpy as np
# weights
w_v1_h1 = 2
w_h1_h2 = -1
w_h2_v2 = 1
import time
from sklearn.feature_extraction.text import CountVectorizer
# brown20.txt is the Brown corpus concatenated 20x to itself (~1M lines)
# Results obtained on a 24-core Linux machine
start = time.time()
vect = CountVectorizer()
vect.fit_transform(open('brown20x.txt'), n_jobs=1)
print time.time() - start # 307 seconds
import psycopg2, psycopg2.extras
import little_pger as db
from flask import *
application = Flask('autocomplete-tribute')
@application.route('/autocomplete', methods=['GET'])
def autocomplete():
conn = psycopg2.connect("dbname=autocomplete-tribute user=christian",
connection_factory=psycopg2.extras.RealDictConnection)
Ext.onReady(function() {
Ext.define('Ubuntu', {
extend: 'Ext.data.Model',
fields: [{
name: 'release',
convert: function(v, rec) {
return Ext.String.format('{0} {1} - {2}',
rec.raw.adjective,
rec.raw.animal,
where = {}
where['adjective'] = 'Lucid'
# where adjective = 'Lucid'
where['adjective'] = ('Warty', 'Dapper')
# where adjective in ('Warty', 'Dapper')
# For sequence values, the rules are: a tuple translates
# to the 'in' operator (as above), a list to a PG array
select * from ubuntu where
adjective || animal || version ilike E'%lynx%' and
adjective || animal || version ilike E'%04%' and
adjective || animal || version ilike E'%lucid%'
create table ubuntu (
id serial primary key,
adjective text,
animal text,
version text
);
insert into ubuntu (adjective, animal, version)
values ('Warty', 'Warthog', '4.10');
insert into ubuntu (adjective, animal, version)