Skip to content

Instantly share code, notes, and snippets.

@steve-chavez
Created March 11, 2019 18:56
Show Gist options
  • Save steve-chavez/f79b5c3e777a435d024d44cebb8ac8f4 to your computer and use it in GitHub Desktop.
Save steve-chavez/f79b5c3e777a435d024d44cebb8ac8f4 to your computer and use it in GitHub Desktop.
bench tests for PostgREST new m2m and child embed queries.
-- http GET "localhost:3000/items?select=id,name,c1,c2,c3,c4,subitems{id,name}&id=lte.100"
\set rid random(1, 999900)
BEGIN ISOLATION LEVEL READ COMMITTED READ ONLY;
WITH pg_source AS (
SELECT
"new_m2m_child_queries"."items"."id",
"new_m2m_child_queries"."items"."name",
"new_m2m_child_queries"."items"."c1",
"new_m2m_child_queries"."items"."c2",
"new_m2m_child_queries"."items"."c3",
"new_m2m_child_queries"."items"."c4",
coalesce(nullif(json_agg("subitems_subitems")::text, '[null]'), '[]')::json AS "subitems"
FROM "new_m2m_child_queries"."items"
LEFT JOIN LATERAL (
SELECT
"new_m2m_child_queries"."subitems"."id",
"new_m2m_child_queries"."subitems"."name"
FROM "new_m2m_child_queries"."subitems"
WHERE
"new_m2m_child_queries"."subitems"."item_id" = "new_m2m_child_queries"."items"."id") AS "subitems_subitems" ON TRUE
WHERE
"new_m2m_child_queries"."items"."id" > :rid AND
"new_m2m_child_queries"."items"."id" <= (:rid + 100)
GROUP BY
"new_m2m_child_queries"."items"."id",
"new_m2m_child_queries"."items"."name",
"new_m2m_child_queries"."items"."c1",
"new_m2m_child_queries"."items"."c2",
"new_m2m_child_queries"."items"."c3",
"new_m2m_child_queries"."items"."c4")
SELECT coalesce(json_agg(_postgrest_t), '[]')::character varying AS BODY
FROM (SELECT * FROM pg_source) _postgrest_t;
COMMIT;
-- ┌───────────────────────────────────────────────────────────────────────────────────────────────────────────────────────────────────────────────────────┐
-- │ QUERY PLAN │
-- ├───────────────────────────────────────────────────────────────────────────────────────────────────────────────────────────────────────────────────────┤
-- │ Aggregate (cost=22104.98..22104.99 rows=1 width=32) (actual time=228.436..228.436 rows=1 loops=1) │
-- │ CTE pg_source │
-- │ -> GroupAggregate (cost=22099.59..22102.77 rows=98 width=47) (actual time=219.880..227.585 rows=100 loops=1) │
-- │ Group Key: items.id │
-- │ -> Sort (cost=22099.59..22099.83 rows=98 width=47) (actual time=219.784..220.232 rows=9999 loops=1) │
-- │ Sort Key: items.id │
-- │ Sort Method: quicksort Memory: 1734kB │
-- │ -> Hash Right Join (cost=11.37..22096.35 rows=98 width=47) (actual time=0.048..218.544 rows=9999 loops=1) │
-- │ Hash Cond: (subitems.item_id = items.id) │
-- │ -> Seq Scan on subitems (cost=0.00..18334.00 rows=1000000 width=36) (actual time=0.015..173.752 rows=1000000 loops=1) │
-- │ -> Hash (cost=10.14..10.14 rows=98 width=15) (actual time=0.029..0.029 rows=100 loops=1) │
-- │ Buckets: 1024 Batches: 1 Memory Usage: 13kB │
-- │ -> Index Scan using items_pkey on items (cost=0.42..10.14 rows=98 width=15) (actual time=0.004..0.020 rows=100 loops=1) │
-- │ Index Cond: (id <= 100) │
-- │ -> CTE Scan on pg_source (cost=0.00..1.96 rows=98 width=68) (actual time=219.882..227.751 rows=100 loops=1) │
-- │ Planning time: 0.154 ms │
-- │ Execution time: 228.600 ms │
-- └───────────────────────────────────────────────────────────────────────────────────────────────────────────────────────────────────────────────────────┘
-- pgbench -T 300 -f support/issues/1075/new-child-query.sql example [INS]
-- starting vacuum...end.
-- transaction type: support/issues/1075/new-child-query.sql
-- scaling factor: 1
-- query mode: simple
-- number of clients: 1
-- number of threads: 1
-- duration: 300 s
-- number of transactions actually processed: 2011
-- latency average = 149.182 ms
-- tps = 6.703237 (including connections establishing)
-- tps = 6.703264 (excluding connections establishing)
-- http GET "localhost:3000/subitems?select=id,name,c1,c2,c3,c4,pieces(id,name)&id=gt.100&id=lte.100"
\set rid random(1, 999900)
BEGIN ISOLATION LEVEL READ COMMITTED READ ONLY;
WITH pg_source AS (
SELECT
"new_m2m_child_queries"."subitems"."id",
"new_m2m_child_queries"."subitems"."name",
"new_m2m_child_queries"."subitems"."c1",
"new_m2m_child_queries"."subitems"."c2",
"new_m2m_child_queries"."subitems"."c3",
"new_m2m_child_queries"."subitems"."c4",
json_agg("pieces_pieces") AS "pieces"
FROM "new_m2m_child_queries"."subitems"
LEFT JOIN LATERAL (
SELECT
"new_m2m_child_queries"."pieces"."id",
"new_m2m_child_queries"."pieces"."name"
FROM
"new_m2m_child_queries"."subitems_pieces",
"new_m2m_child_queries"."pieces"
WHERE
"new_m2m_child_queries"."subitems"."id" = "new_m2m_child_queries"."subitems_pieces"."subitem_id" AND
"new_m2m_child_queries"."pieces"."id" = "new_m2m_child_queries"."subitems_pieces"."piece_id"
) AS "pieces_pieces" ON TRUE
WHERE
"new_m2m_child_queries"."subitems"."id" > :rid AND
"new_m2m_child_queries"."subitems"."id" <= (:rid + 100)
GROUP BY
"new_m2m_child_queries"."subitems"."id",
"new_m2m_child_queries"."subitems"."name",
"new_m2m_child_queries"."subitems"."c1",
"new_m2m_child_queries"."subitems"."c2",
"new_m2m_child_queries"."subitems"."c3",
"new_m2m_child_queries"."subitems"."c4"
)
SELECT coalesce(json_agg(_postgrest_t), '[]')::character varying AS BODY
FROM (SELECT * FROM pg_source) _postgrest_t;
COMMIT;
-- ┌─────────────────────────────────────────────────────────────────────────────────────────────────────────────────────────────────────────────────────────────────────┐
-- │ QUERY PLAN │
-- ├─────────────────────────────────────────────────────────────────────────────────────────────────────────────────────────────────────────────────────────────────────┤
-- │ Aggregate (cost=45770.41..45770.43 rows=1 width=32) (actual time=240.952..240.952 rows=1 loops=1) │
-- │ CTE pg_source │
-- │ -> GroupAggregate (cost=45765.99..45768.07 rows=104 width=66) (actual time=240.487..240.726 rows=100 loops=1) │
-- │ Group Key: subitems.id │
-- │ -> Sort (cost=45765.99..45766.25 rows=104 width=66) (actual time=240.472..240.488 rows=300 loops=1) │
-- │ Sort Key: subitems.id │
-- │ Sort Method: quicksort Memory: 67kB │
-- │ -> Hash Right Join (cost=35735.09..45762.50 rows=104 width=66) (actual time=128.773..240.425 rows=300 loops=1) │
-- │ Hash Cond: (subitems_pieces.subitem_id = subitems.id) │
-- │ -> Merge Join (cost=35723.55..44625.65 rows=300000 width=36) (actual time=128.730..226.955 rows=300000 loops=1) │
-- │ Merge Cond: (pieces.id = subitems_pieces.piece_id) │
-- │ -> Index Scan using pieces_pkey on pieces (cost=0.42..34219.43 rows=1000000 width=36) (actual time=0.005..22.375 rows=100003 loops=1) │
-- │ -> Materialize (cost=35721.90..37221.90 rows=300000 width=8) (actual time=128.723..164.870 rows=300000 loops=1) │
-- │ -> Sort (cost=35721.90..36471.90 rows=300000 width=8) (actual time=128.721..148.239 rows=300000 loops=1) │
-- │ Sort Key: subitems_pieces.piece_id │
-- │ Sort Method: external sort Disk: 5280kB │
-- │ -> Seq Scan on subitems_pieces (cost=0.00..4328.00 rows=300000 width=8) (actual time=0.009..23.914 rows=300000 loops=1) │
-- │ -> Hash (cost=10.25..10.25 rows=104 width=34) (actual time=0.034..0.034 rows=100 loops=1) │
-- │ Buckets: 1024 Batches: 1 Memory Usage: 15kB │
-- │ -> Index Scan using subitems_pkey on subitems (cost=0.42..10.25 rows=104 width=34) (actual time=0.005..0.021 rows=100 loops=1) │
-- │ Index Cond: (id <= 100) │
-- │ -> CTE Scan on pg_source (cost=0.00..2.08 rows=104 width=84) (actual time=240.489..240.751 rows=100 loops=1) │
-- │ Planning time: 0.224 ms │
-- │ Execution time: 241.937 ms │
-- └─────────────────────────────────────────────────────────────────────────────────────────────────────────────────────────────────────────────────────────────────────┘
-- pgbench -T 300 -f support/issues/1075/new-m2m-query.sql example [INS]
-- starting vacuum...end.
-- transaction type: support/issues/1075/new-m2m-query.sql
-- scaling factor: 1
-- query mode: simple
-- number of clients: 1
-- number of threads: 1
-- duration: 300 s
-- number of transactions actually processed: 1660
-- latency average = 180.831 ms
-- tps = 5.530016 (including connections establishing)
-- tps = 5.530037 (excluding connections establishing)
-- http GET "localhost:3000/items?select=id,name,c1,c2,c3,c4,subitems{id,name}&id=gt.100&id=lte.100"
\set rid random(1, 999900)
BEGIN ISOLATION LEVEL READ COMMITTED READ ONLY;
WITH pg_source AS
(SELECT "new_m2m_child_queries"."items"."id",
"new_m2m_child_queries"."items"."name",
"new_m2m_child_queries"."items"."c1",
"new_m2m_child_queries"."items"."c2",
"new_m2m_child_queries"."items"."c3",
"new_m2m_child_queries"."items"."c4",
COALESCE(
(SELECT json_agg("subitems".*)
FROM
(SELECT "new_m2m_child_queries"."subitems"."id", "new_m2m_child_queries"."subitems"."name"
FROM "new_m2m_child_queries"."subitems"
WHERE "new_m2m_child_queries"."subitems"."item_id" = "new_m2m_child_queries"."items"."id" ) "subitems"), '[]') AS "subitems"
FROM "new_m2m_child_queries"."items"
WHERE
"new_m2m_child_queries"."items"."id" > :rid AND
"new_m2m_child_queries"."items"."id" <= (:rid + 100) )
SELECT coalesce(json_agg(_postgrest_t), '[]')::character varying AS BODY
FROM (SELECT * FROM pg_source) _postgrest_t;
COMMIT;
-- ┌────────────────────────────────────────────────────────────────────────────────────────────────────────────────────────────────────────┐
-- │ QUERY PLAN │
-- ├────────────────────────────────────────────────────────────────────────────────────────────────────────────────────────────────────────┤
-- │ Aggregate (cost=2041770.07..2041770.09 rows=1 width=32) (actual time=6954.117..6954.117 rows=1 loops=1) │
-- │ CTE pg_source │
-- │ -> Index Scan using items_pkey on items (cost=0.42..2041767.86 rows=98 width=63) (actual time=70.461..6952.133 rows=100 loops=1) │
-- │ Index Cond: (id <= 100) │
-- │ SubPlan 1 │
-- │ -> Aggregate (cost=20834.25..20834.26 rows=1 width=32) (actual time=69.515..69.516 rows=1 loops=100) │
-- │ -> Seq Scan on subitems (cost=0.00..20834.00 rows=100 width=18) (actual time=0.353..69.440 rows=100 loops=100) │
-- │ Filter: (item_id = items.id) │
-- │ Rows Removed by Filter: 999900 │
-- │ -> CTE Scan on pg_source (cost=0.00..1.96 rows=98 width=84) (actual time=70.463..6952.376 rows=100 loops=1) │
-- │ Planning time: 0.115 ms │
-- │ Execution time: 6954.229 ms │
-- └────────────────────────────────────────────────────────────────────────────────────────────────────────────────────────────────────────┘
-- pgbench -T 300 -f support/issues/1075/old-child-query.sql example [INS]
-- starting vacuum...end.
-- transaction type: support/issues/1075/old-child-query.sql
-- scaling factor: 1
-- query mode: simple
-- number of clients: 1
-- number of threads: 1
-- duration: 300 s
-- number of transactions actually processed: 51
-- latency average = 5899.788 ms
-- tps = 0.169498 (including connections establishing)
-- tps = 0.169499 (excluding connections establishing)
-- http GET "localhost:3000/subitems?select=id,name,c1,c2,c3,c4,pieces(id,name)&id=gt.100&id=lte.100"
\set rid random(1, 999900)
BEGIN ISOLATION LEVEL READ COMMITTED READ ONLY;
WITH pg_source AS (
SELECT
"new_m2m_child_queries"."subitems"."id",
"new_m2m_child_queries"."subitems"."name",
"new_m2m_child_queries"."subitems"."c1",
"new_m2m_child_queries"."subitems"."c2",
"new_m2m_child_queries"."subitems"."c3",
"new_m2m_child_queries"."subitems"."c4",
COALESCE ((
SELECT
json_agg("pieces".*)
FROM (
SELECT
"new_m2m_child_queries"."pieces"."id",
"new_m2m_child_queries"."pieces"."name"
FROM
"new_m2m_child_queries"."pieces",
"new_m2m_child_queries"."subitems_pieces"
WHERE
"new_m2m_child_queries"."pieces"."id" = "new_m2m_child_queries"."subitems_pieces"."piece_id" AND
"new_m2m_child_queries"."subitems"."id" = "new_m2m_child_queries"."subitems_pieces"."subitem_id" ) "pieces"), '[]') AS "pieces"
FROM "new_m2m_child_queries"."subitems"
WHERE
"new_m2m_child_queries"."subitems"."id" > :rid AND
"new_m2m_child_queries"."subitems"."id" <= (:rid + 100))
SELECT coalesce(json_agg(_postgrest_t), '[]')::character varying AS body
FROM ( SELECT * FROM pg_source) _postgrest_t;
COMMIT;
-- ┌───────────────────────────────────────────────────────────────────────────────────────────────────────────────────────────────────────────────────┐
-- │ QUERY PLAN │
-- ├───────────────────────────────────────────────────────────────────────────────────────────────────────────────────────────────────────────────────┤
-- │ Aggregate (cost=530763.85..530763.86 rows=1 width=32) (actual time=1733.119..1733.119 rows=1 loops=1) │
-- │ CTE pg_source │
-- │ -> Index Scan using subitems_pkey on subitems (cost=0.42..530761.51 rows=104 width=66) (actual time=18.267..1732.301 rows=100 loops=1) │
-- │ Index Cond: (id <= 100) │
-- │ SubPlan 1 │
-- │ -> Aggregate (cost=5103.37..5103.38 rows=1 width=32) (actual time=17.319..17.320 rows=1 loops=100) │
-- │ -> Nested Loop (cost=0.42..5103.36 rows=3 width=16) (actual time=0.017..17.313 rows=3 loops=100) │
-- │ -> Seq Scan on subitems_pieces (cost=0.00..5078.00 rows=3 width=4) (actual time=0.012..17.303 rows=3 loops=100) │
-- │ Filter: (subitems.id = subitem_id) │
-- │ Rows Removed by Filter: 299997 │
-- │ -> Index Scan using pieces_pkey on pieces (cost=0.42..8.44 rows=1 width=16) (actual time=0.002..0.002 rows=1 loops=300) │
-- │ Index Cond: (id = subitems_pieces.piece_id) │
-- │ -> CTE Scan on pg_source (cost=0.00..2.08 rows=104 width=84) (actual time=18.270..1732.437 rows=100 loops=1) │
-- │ Planning time: 0.172 ms │
-- │ Execution time: 1733.161 ms │
-- └───────────────────────────────────────────────────────────────────────────────────────────────────────────────────────────────────────────────────┘
-- pgbench -T 300 -f support/issues/1075/old-m2m-query.sql example [INS]
-- starting vacuum...end.
-- transaction type: support/issues/1075/old-m2m-query.sql
-- scaling factor: 1
-- query mode: simple
-- number of clients: 1
-- number of threads: 1
-- duration: 300 s
-- number of transactions actually processed: 204
-- latency average = 1471.506 ms
-- tps = 0.679576 (including connections establishing)
-- tps = 0.679580 (excluding connections establishing)
-- create database example;
-- \c example;
drop schema if exists new_m2m_child_queries cascade;
create schema new_m2m_child_queries;
set search_path = new_m2m_child_queries;
create table items (
id int not null primary key
, name text not null
, c1 int not null
, c2 int not null
, c3 int not null
, c4 int not null
);
create table subitems (
id int not null primary key
, name text not null
, item_id int not null references items(id)
, c1 int not null
, c2 int not null
, c3 int not null
, c4 int not null
);
create table pieces (
id int not null primary key
, name text not null
, c1 int not null
, c2 int not null
, c3 int not null
, c4 int not null
);
create table subitems_pieces (
subitem_id int not null references subitems(id)
, piece_id int not null references pieces(id)
);
create or replace function populate() returns void as $$
begin
for i in 1..1000000 loop
insert into items values(i, 'item ' || i, i + 1, i + 2, i + 3, i + 4);
end loop;
for i in 1..1000000 loop
insert into subitems values(i, 'subitem ' || i, (i / 100) + 1, i + 1, i + 2, i + 3, i + 4);
end loop;
for i in 1..1000000 loop
insert into pieces values(i, 'piece ' || i, i + 1, i + 2, i + 3, i + 4);
end loop;
for i in 1..100000 loop
insert into subitems_pieces values (i, i), (i, i + 1), (i, i + 2);
end loop;
end;
$$ language plpgsql;
select populate();
Sign up for free to join this conversation on GitHub. Already have an account? Sign in to comment