Skip to content

Instantly share code, notes, and snippets.

@funny-falcon
Last active December 30, 2021 10:57
Show Gist options
  • Save funny-falcon/45fa9460507c015390db99a7efd18a75 to your computer and use it in GitHub Desktop.
Save funny-falcon/45fa9460507c015390db99a7efd18a75 to your computer and use it in GitHub Desktop.
Bug report
-- Test correctness of parallel query execution after removal
-- of Append path due to single non-trivial child.
-- It is (were) broken since commit 8edd0e79460b414b1d971895312e549e95e12e4f
-- "Suppress Append and MergeAppend plan nodes that have a single child."
-- ie PostgreSQL v12.
-- Plans below for broken version.
DROP TABLE IF EXISTS gather_append_1, gather_append_2;
CREATE TABLE gather_append_1 (
fk int,
f bool
);
INSERT INTO gather_append_1 (fk, f) SELECT i, i%50=0 from generate_series(1, 2000) as i;
CREATE INDEX gather_append_1_ix on gather_append_1 (f);
CREATE TABLE gather_append_2 (
fk int,
val serial
);
INSERT INTO gather_append_2 (fk) SELECT fk from gather_append_1, generate_series(1, 5) as i;
ANALYZE gather_append_1, gather_append_2;
SET max_parallel_workers_per_gather = 0;
-- Find correct rows count
SELECT count(1)
FROM (
SELECT fk FROM gather_append_1 WHERE f
UNION ALL
SELECT fk FROM gather_append_1 WHERE false
) as t
LEFT OUTER JOIN gather_append_2
USING (fk);
-- count
---------
-- 200
--(1 row)
SET parallel_setup_cost = 0;
SET parallel_tuple_cost = 0.1;
SET min_parallel_table_scan_size = 0;
SET max_parallel_workers_per_gather = 2;
SELECT count(1)
FROM (
SELECT fk FROM gather_append_1 WHERE f
UNION ALL
SELECT fk FROM gather_append_1 WHERE false
) as t
LEFT OUTER JOIN gather_append_2
USING (fk);
-- count
---------
-- 400
--(1 row)
EXPLAIN (ANALYZE true, COSTS false, TIMING false, SUMMARY false, VERBOSE false, BUFFERS false)
SELECT count(1)
FROM (
SELECT fk FROM gather_append_1 WHERE f
UNION ALL
SELECT fk FROM gather_append_1 WHERE false
) as t
LEFT OUTER JOIN gather_append_2
USING (fk);
-- Finalize Aggregate (actual rows=1 loops=1)
-- -> Gather (actual rows=2 loops=1)
-- Workers Planned: 1
-- Workers Launched: 1
-- -> Partial Aggregate (actual rows=1 loops=2)
-- -> Parallel Hash Left Join (actual rows=200 loops=2)
-- Hash Cond: (gather_append_1.fk = gather_append_2.fk)
-- -> Index Scan using gather_append_1_ix on gather_append_1 (actual rows=40 loops=2)
-- Index Cond: (f = true)
-- -> Parallel Hash (actual rows=5000 loops=2)
-- Buckets: 16384 Batches: 1 Memory Usage: 544kB
-- -> Parallel Seq Scan on gather_append_2 (actual rows=5000 loops=2)
-- Result rows in root node should be equal to non-parallel count
EXPLAIN (ANALYZE true, COSTS false, TIMING false, SUMMARY false, VERBOSE false, BUFFERS false)
SELECT val
FROM (
SELECT fk FROM gather_append_1 WHERE f
UNION ALL
SELECT fk FROM gather_append_1 WHERE false
) as t
LEFT OUTER JOIN gather_append_2
USING (fk)
ORDER BY val;
-- Gather Merge (actual rows=400 loops=1)
-- Workers Planned: 1
-- Workers Launched: 1
-- -> Sort (actual rows=200 loops=2)
-- Sort Key: gather_append_2.val
-- Sort Method: quicksort Memory: 25kB
-- Worker 0: Sort Method: quicksort Memory: 25kB
-- -> Parallel Hash Left Join (actual rows=200 loops=2)
-- Hash Cond: (gather_append_1.fk = gather_append_2.fk)
-- -> Index Scan using gather_append_1_ix on gather_append_1 (actual rows=40 loops=2)
-- Index Cond: (f = true)
-- -> Parallel Hash (actual rows=5000 loops=2)
-- Buckets: 16384 Batches: 1 Memory Usage: 544kB
-- -> Parallel Seq Scan on gather_append_2 (actual rows=5000 loops=2)
-- Result rows in root node should be equal to non-parallel count
EXPLAIN (ANALYZE true, COSTS false, TIMING false, SUMMARY false, VERBOSE false, BUFFERS false)
SELECT val
FROM (
SELECT fk FROM gather_append_1 WHERE f
UNION ALL
SELECT fk FROM gather_append_1 WHERE false
) as t
LEFT OUTER JOIN gather_append_2
USING (fk)
ORDER BY val;
-- Gather Merge (actual rows=400 loops=1)
-- Workers Planned: 1
-- Workers Launched: 1
-- -> Sort (actual rows=200 loops=2)
-- Sort Key: gather_append_2.val
-- Sort Method: quicksort Memory: 25kB
-- Worker 0: Sort Method: quicksort Memory: 25kB
-- -> Parallel Hash Left Join (actual rows=200 loops=2)
-- Hash Cond: (gather_append_1.fk = gather_append_2.fk)
-- -> Index Scan using gather_append_1_ix on gather_append_1 (actual rows=40 loops=2)
-- Index Cond: (f = true)
-- -> Parallel Hash (actual rows=5000 loops=2)
-- Buckets: 16384 Batches: 1 Memory Usage: 544kB
-- -> Parallel Seq Scan on gather_append_2 (actual rows=5000 loops=2)
Sign up for free to join this conversation on GitHub. Already have an account? Sign in to comment