View gist:c264863573a6bfbf7605
loadArticle (res, req) ->
article = yield findArticlebyId(...)
return res.notFound() unless artilce?
res.ok(article)
findArticleById (id) ->
conn = yield conn.Open().
row = yield conn.QueryAsync("....", id)
new Article(row)
View gist:3fbe1aa749b1e58c485e
func LoadArticle(res http.ResponseWriter, req *http.Request) {
if article, err := FindArticleById(...)
if err != nil {
handleError(res, err)
return
}
if article == nil {
handleNotFound(res, err)
}
}
View gist:0ba24030fb12b10b686b
/*
bolt 5000 277963 ns/op
redis 30000 48081 ns/op
pg 10000 149691 ns/op
Yes, the Bolt transactions could be batched. But so too could the PG transactions,
and the Redis work could be pipelined. And that isn't always a workable solution.
*/
import (
View init.coffee
# place this in your init.coffee
atom.workspace.observeTextEditors (e) ->
editor = atom.views.getView(e)
clone = editor.querySelector('.wrap-guide').cloneNode()
clone.style.left = (editor.getDefaultCharacterWidth() * 120) + "px"
editor.querySelector('.underlayer').appendChild(clone)
View gist:a6b441269d82690381e5
/*
Playing with building a specialized set for integers. Specifically:
- Integers are natually randomly distributed (auto-incrementing ids for millions of records would do)
- The number of items is known upfront (can be approximate)
- The set sizes changes infrequently (we're mostly interested in membership tests)
Accomodating constraints, to be sure, but if we can meet the first two requirements,
we'll end up with a little less than 1/2 the memory of a map[int]struct{}, and membership
tests taking about ~1/2 the time.
View gist:5a6a45ace2048545b6c3
BYTE UNIX Benchmarks (Version 5.1.3)
System: p2.kapi.io: GNU/Linux
OS: GNU/Linux -- 3.13.0-36-generic -- #63-Ubuntu SMP Wed Sep 3 21:30:07 UTC 2014
Machine: x86_64 (x86_64)
Language: en_US.utf8 (charmap="ANSI_X3.4-1968", collate="ANSI_X3.4-1968")
CPU 0: Intel(R) Core(TM) i7-4770 CPU @ 3.40GHz (6799.8 bogomips)
Hyper-Threading, x86-64, MMX, Physical Address Ext, SYSENTER/SYSEXIT, SYSCALL/SYSRET, Intel virtualization
CPU 1: Intel(R) Core(TM) i7-4770 CPU @ 3.40GHz (6799.8 bogomips)
Hyper-Threading, x86-64, MMX, Physical Address Ext, SYSENTER/SYSEXIT, SYSCALL/SYSRET, Intel virtualization
View gist:a659ef87b3a4a5d590e9
BYTE UNIX Benchmarks (Version 5.1.3)
System: ip-172-30-0-152: GNU/Linux
OS: GNU/Linux -- 3.13.0-36-generic -- #63-Ubuntu SMP Wed Sep 3 21:30:07 UTC 2014
Machine: x86_64 (x86_64)
Language: en_US.utf8 (charmap="ANSI_X3.4-1968", collate="ANSI_X3.4-1968")
CPU 0: Intel(R) Xeon(R) CPU E5-2666 v3 @ 2.90GHz (5800.1 bogomips)
Hyper-Threading, x86-64, MMX, Physical Address Ext, SYSENTER/SYSEXIT, SYSCALL/SYSRET
CPU 1: Intel(R) Xeon(R) CPU E5-2666 v3 @ 2.90GHz (5800.1 bogomips)
Hyper-Threading, x86-64, MMX, Physical Address Ext, SYSENTER/SYSEXIT, SYSCALL/SYSRET
View gist:0f38b9ad87ebd54375da
BYTE UNIX Benchmarks (Version 5.1.3)
System: ip-172-30-0-254: GNU/Linux
OS: GNU/Linux -- 3.13.0-36-generic -- #63-Ubuntu SMP Wed Sep 3 21:30:07 UTC 2014
Machine: x86_64 (x86_64)
Language: en_US.utf8 (charmap="ANSI_X3.4-1968", collate="ANSI_X3.4-1968")
CPU 0: Intel(R) Xeon(R) CPU E5-2666 v3 @ 2.90GHz (5786.7 bogomips)
Hyper-Threading, x86-64, MMX, Physical Address Ext, SYSENTER/SYSEXIT, SYSCALL/SYSRET
CPU 1: Intel(R) Xeon(R) CPU E5-2666 v3 @ 2.90GHz (5786.7 bogomips)
Hyper-Threading, x86-64, MMX, Physical Address Ext, SYSENTER/SYSEXIT, SYSCALL/SYSRET
View bench.go
package main
// @see https://twitter.com/karlseguin/status/524452778093977600
import (
"math/rand"
"strconv"
"testing"
)
const (
View gist:9686555
type PgAutoPrepare struct {
driver.Conn
cache map[string]driver.Stmt
}
func (pap *PgAutoPrepare) Open(name string) (driver.Conn, error) {
conn, err := pq.Open(name)
if err != nil {
return nil, err
}