Skip to content

Instantly share code, notes, and snippets.

@begriffs
Last active December 20, 2017 05:14
Show Gist options
  • Save begriffs/daae43db68634bb945f3a99996fdb193 to your computer and use it in GitHub Desktop.
Save begriffs/daae43db68634bb945f3a99996fdb193 to your computer and use it in GitHub Desktop.
UUID Benchmark

Benchmark of UUID insert speed for btree vs hash index

AWS instance

  • ami-aa2ea6d0
    • Ubuntu Server 16.04 LTS (HVM),EBS General Purpose (SSD) Volume Type
    • c3.large
    • vCPU: 2
    • RAM GiB: 3.75
    • Disk GB: 2 x 16 (SSD)

Install postgres

sudo sh -c 'echo "deb http://apt.postgresql.org/pub/repos/apt/ $(lsb_release -cs)-pgdg main" > /etc/apt/sources.list.d/pgdg.list'

sudo apt-get install wget ca-certificates
wget --quiet -O - https://www.postgresql.org/media/keys/ACCC4CF8.asc | sudo apt-key add -
sudo apt-get update
sudo apt-get install postgresql-10
  • Version
    • PostgreSQL 10.1 on x86_64-pc-linux-gnu, compiled by gcc (Ubuntu 5.4.0-6ubuntu1~16.04.4) 5.4.0 20160609, 64-bit

Prepare schema

create extension pgcrypto;

create table with_btree ( u uuid default gen_random_uuid() );
create table with_hash  ( u uuid default gen_random_uuid() );
create index on with_btree using btree (u);
create index on with_hash  using hash  (u);

create table no_idx ( u uuid default gen_random_uuid() );

/*
 max_wal_size
--------------
 1GB
(1 row)

 checkpoint_timeout
--------------------
 5min
*/

Run benchmark

sudo su - postgres

echo "insert into no_idx default values;" > insert-no-idx.sql
pgbench postgres://postgres@/postgres -j 64 -c 100 -f insert-no-idx.sql -T 30

# number of transactions actually processed: 454844
# latency average = 6.613 ms
# tps = 15122.089083 (including connections establishing)
# tps = 15226.324782 (excluding connections establishing)

echo "insert into with_btree default values;" > insert-btree.sql
echo "insert into with_hash default values;" > insert-hash.sql

pgbench postgres://postgres@/postgres -j 64 -c 100 -f insert-btree.sql -T 30

# number of transactions actually processed: 399464
# latency average = 7.533 ms
# tps = 13275.157771 (including connections establishing)
# tps = 13413.477559 (excluding connections establishing)

pgbench postgres://postgres@/postgres -j 64 -c 100 -f insert-hash.sql -T 30

# number of transactions actually processed: 401432
# latency average = 7.494 ms
# tps = 13343.430249 (including connections establishing)
# tps = 13473.951515 (excluding connections establishing)

Try btree again

# number of transactions actually processed: 394232
# latency average = 7.635 ms
# tps = 13096.886671 (including connections establishing)
# tps = 13149.673835 (excluding connections establishing)

Try hash again

# number of transactions actually processed: 402507
# latency average = 7.478 ms
# tps = 13373.208235 (including connections establishing)
# tps = 13466.044780 (excluding connections establishing)

Try with bigger tables

-- start fresh
truncate with_btree;
truncate with_hash;
vacuum with_btree;
vacuum with_hash;

-- load up the tables
insert into with_btree (select gen_random_uuid() from generate_series(1, 1024*1024*1024/128));
insert into with_hash (select gen_random_uuid() from generate_series(1, 1024*1024*1024/128));

/*
 At this point:

    table    | total size | index Size
 ------------+------------+------------
  with_btree | 681 MB     | 326 MB
  with_hash  | 647 MB     | 293 MB
*/

New benchmark numbers

pgbench postgres://postgres@/postgres -j 64 -c 100 -f insert-btree.sql -T 30

# number of transactions actually processed: 304535
# latency average = 9.896 ms
# tps = 10105.187084 (including connections establishing)
# tps = 10148.426023 (excluding connections establishing)

pgbench postgres://postgres@/postgres -j 64 -c 100 -f insert-hash.sql -T 30

# number of transactions actually processed: 333183
# latency average = 9.050 ms
# tps = 11050.178431 (including connections establishing)
# tps = 11100.631423 (excluding connections establishing)
Sign up for free to join this conversation on GitHub. Already have an account? Sign in to comment