Skip to content

Instantly share code, notes, and snippets.

<?php
class FooBar { }
$fooBar = new FooBar();
$iterations = 1e6;
$assignment = false;
$start = microtime(true);
{
"items": {
"19192": {
"id": 19192,
"title": "Hitman Absolution Full Disclosure Trailer",
"ns": 6,
"url": "/File:Hitman_Absolution_Full_Disclosure_Trailer",
"revision": {
"id": 37872,
"user": "TheBlueRogue",
{
"sections": [
{
"title": "Hitman Absolution Full Disclosure Trailer",
"level": 1,
"content": [],
"images": []
},
{
"title": "Description",
@drsnyder
drsnyder / urandom-reads.py
Last active August 11, 2021 10:44
Demonstrate the contention on /dev/urandom with N threads.
# Create a user land file for testing.
# dd if=/dev/urandom of=/tmp/urandom bs=1M count=10
#
# urandom-reads.py infile threads
# Examples:
# time python2.6 urandom-reads.py /tmp/urandom
# time python2.6 urandom-reads.py /dev/urandom
#
# R to generate a plot of the read time distribution at each level of concurrency
# rdt = read.csv("output.csv", header=F)
-- Another solution using a rollup table.
CREATE TABLE object_container_order_rollup (
container_id INTEGER REFERENCES containers(id),
category_id INTEGER REFERENCES categories(id),
object_id INTEGER REFERENCES objects(id),
type VARCHAR(32) NOT NULL,
index INTEGER NOT NULL,
PRIMARY KEY (container_id, type, category_id, object_id)
);
@drsnyder
drsnyder / array-unest-solution.sql
Last active August 29, 2015 13:56
A possible solution to the object ordering problem described here https://gist.github.com/drsnyder/9277054.
-- One solution to the problem. Cache the ordering in an array.
-- Pros:
-- It's fast enough (~90ms) even with filtering
-- Updates are atomic. You are only updating one row in one table.
-- If the ordering is predictable (e.g. create time) then the
-- maintenance is fast and easy because you just append to the array.
-- Cons:
-- It requires maintenance. It's not a view and it's not a relation
-- in the formal sense (is that right?). CREATE TYPE object_container_order_type AS (
@drsnyder
drsnyder / object-ordering-setup.sql
Last active August 29, 2015 13:56
This gist contains setup SQL and example data for reproducing an object ordering problem that I'm trying to optimize.
-- For normal_rand() so we can create distributions that are not uniform.
-- The "real" distributions are not normal but they are closer to normal than uniform.
CREATE EXTENSION tablefunc;
BEGIN;
-- ###########################
-- The base "object" that is displayed using different either id DESC proxy
-- or scores.
CREATE TABLE objects (
id INTEGER PRIMARY KEY,
@drsnyder
drsnyder / perf-stats.sh
Created January 27, 2014 19:21
Simple perf stats
for i in {1..20}; do ( time -p curl -I -s http://www.site.com ) 2>&1 | grep real | cut -c6-; done | sort -n | awk '{ d[NR]=$1; } END { print NR, "-", d[1], d[int(NR*0.5)], d[int(NR*0.95)], d[NR]; }'
@drsnyder
drsnyder / counters-explain-withpk.txt
Created January 3, 2014 20:24
EXPLAIN ANALYZE on counters WITH primary key on (count_type, count_id)
test=# explain (analyze, buffers, verbose) update counters set count = count + 1 where count_type = 0 and count_id = 1;
QUERY PLAN
----------------------------------------------------------------------------------------------------------------
Update on public.counters (cost=0.00..5.03 rows=1 width=18) (actual time=0.036..0.036 rows=0 loops=1)
Buffers: shared hit=6
-> Seq Scan on public.counters (cost=0.00..5.03 rows=1 width=18) (actual time=0.020..0.021 rows=1 loops=1)
Output: count_type, count_id, (count + 1), ctid
Filter: ((counters.count_type = 0) AND (counters.count_id = 1))
Rows Removed by Filter: 4
Buffers: shared hit=5
@drsnyder
drsnyder / counters-explain-npk.txt
Created January 3, 2014 20:09
EXPLAIN ANALYZE on counters with no primary or unique key.
test=# explain (analyze, buffers, verbose) update counters set count = count + 1 where count_type = 0 and count_id = 1;
QUERY PLAN ------------------------------------------------------------------------------------------------------------------
Update on public.counters (cost=0.00..5.74 rows=15 width=18) (actual time=0.110..0.110 rows=0 loops=1)
Buffers: shared hit=20
-> Seq Scan on public.counters (cost=0.00..5.74 rows=15 width=18) (actual time=0.014..0.054 rows=15 loops=1)
Output: count_type, count_id, (count + 1), ctid
Filter: ((counters.count_type = 0) AND (counters.count_id = 1))
Rows Removed by Filter: 32
Buffers: shared hit=5
Total runtime: 0.137 ms