summaryrefslogtreecommitdiffstats
path: root/src/test/regress
diff options
context:
space:
mode:
Diffstat (limited to '')
-rw-r--r--src/test/regress/expected/fast_default.out18
-rw-r--r--src/test/regress/expected/generated.out3
-rw-r--r--src/test/regress/expected/horology.out4
-rw-r--r--src/test/regress/expected/indexing.out39
-rw-r--r--src/test/regress/expected/inherit.out17
-rw-r--r--src/test/regress/expected/int8.out2
-rw-r--r--src/test/regress/expected/interval.out13
-rw-r--r--src/test/regress/expected/join.out26
-rw-r--r--src/test/regress/expected/matview.out16
-rw-r--r--src/test/regress/expected/merge.out91
-rw-r--r--src/test/regress/expected/opr_sanity.out2
-rw-r--r--src/test/regress/expected/partition_join.out167
-rw-r--r--src/test/regress/expected/triggers.out45
-rw-r--r--src/test/regress/expected/tsdicts.out34
-rw-r--r--src/test/regress/pg_regress.c2
-rw-r--r--src/test/regress/sql/fast_default.sql11
-rw-r--r--src/test/regress/sql/generated.sql3
-rw-r--r--src/test/regress/sql/horology.sql2
-rw-r--r--src/test/regress/sql/indexing.sql20
-rw-r--r--src/test/regress/sql/inherit.sql9
-rw-r--r--src/test/regress/sql/int8.sql1
-rw-r--r--src/test/regress/sql/interval.sql8
-rw-r--r--src/test/regress/sql/join.sql12
-rw-r--r--src/test/regress/sql/matview.sql17
-rw-r--r--src/test/regress/sql/merge.sql64
-rw-r--r--src/test/regress/sql/opr_sanity.sql2
-rw-r--r--src/test/regress/sql/partition_join.sql48
-rw-r--r--src/test/regress/sql/triggers.sql36
-rw-r--r--src/test/regress/sql/tsdicts.sql30
29 files changed, 739 insertions, 3 deletions
diff --git a/src/test/regress/expected/fast_default.out b/src/test/regress/expected/fast_default.out
index 91f2571..59365da 100644
--- a/src/test/regress/expected/fast_default.out
+++ b/src/test/regress/expected/fast_default.out
@@ -272,7 +272,25 @@ SELECT comp();
Rewritten
(1 row)
+-- check that we notice insertion of a volatile default argument
+CREATE FUNCTION foolme(timestamptz DEFAULT clock_timestamp())
+ RETURNS timestamptz
+ IMMUTABLE AS 'select $1' LANGUAGE sql;
+ALTER TABLE T ADD COLUMN c3 timestamptz DEFAULT foolme();
+NOTICE: rewriting table t for reason 2
+SELECT attname, atthasmissing, attmissingval FROM pg_attribute
+ WHERE attrelid = 't'::regclass AND attnum > 0
+ ORDER BY attnum;
+ attname | atthasmissing | attmissingval
+---------+---------------+---------------
+ pk | f |
+ c1 | f |
+ c2 | f |
+ c3 | f |
+(4 rows)
+
DROP TABLE T;
+DROP FUNCTION foolme(timestamptz);
-- Simple querie
CREATE TABLE T (pk INT NOT NULL PRIMARY KEY);
SELECT set('t');
diff --git a/src/test/regress/expected/generated.out b/src/test/regress/expected/generated.out
index 7c3c7b2..1020848 100644
--- a/src/test/regress/expected/generated.out
+++ b/src/test/regress/expected/generated.out
@@ -61,6 +61,9 @@ LINE 1: ..._3 (a int PRIMARY KEY, b int GENERATED ALWAYS AS (c * 2) STO...
-- generation expression must be immutable
CREATE TABLE gtest_err_4 (a int PRIMARY KEY, b double precision GENERATED ALWAYS AS (random()) STORED);
ERROR: generation expression is not immutable
+-- ... but be sure that the immutability test is accurate
+CREATE TABLE gtest2 (a int, b text GENERATED ALWAYS AS (a || ' sec') STORED);
+DROP TABLE gtest2;
-- cannot have default/identity and generated
CREATE TABLE gtest_err_5a (a int PRIMARY KEY, b int DEFAULT 5 GENERATED ALWAYS AS (a * 2) STORED);
ERROR: both default and generation expression specified for column "b" of table "gtest_err_5a"
diff --git a/src/test/regress/expected/horology.out b/src/test/regress/expected/horology.out
index de73683..2dc9b00 100644
--- a/src/test/regress/expected/horology.out
+++ b/src/test/regress/expected/horology.out
@@ -373,6 +373,8 @@ SELECT timestamp without time zone 'Jan 1, 4713 BC' + interval '109203489 days'
Sun Dec 31 00:00:00 294276
(1 row)
+SELECT timestamp without time zone '2000-01-01' - interval '2483590 days' AS "out of range";
+ERROR: timestamp out of range
SELECT timestamp without time zone '12/31/294276' - timestamp without time zone '12/23/1999' AS "106751991 Days";
106751991 Days
------------------
@@ -633,6 +635,8 @@ SELECT timestamp with time zone '1999-12-01' + interval '1 month - 1 second' AS
Fri Dec 31 23:59:59 1999 PST
(1 row)
+SELECT timestamp with time zone '2000-01-01' - interval '2483590 days' AS "out of range";
+ERROR: timestamp out of range
SELECT (timestamp with time zone 'today' = (timestamp with time zone 'yesterday' + interval '1 day')) as "True";
True
------
diff --git a/src/test/regress/expected/indexing.out b/src/test/regress/expected/indexing.out
index 368e735..e17879b 100644
--- a/src/test/regress/expected/indexing.out
+++ b/src/test/regress/expected/indexing.out
@@ -1282,6 +1282,45 @@ select tableoid::regclass, * from idxpart order by a;
(8 rows)
drop table idxpart;
+-- Test some other non-btree index types
+create table idxpart (a int, b text, c int[]) partition by range (a);
+create table idxpart1 partition of idxpart for values from (0) to (100000);
+set enable_seqscan to off;
+create index idxpart_brin on idxpart using brin(b);
+explain (costs off) select * from idxpart where b = 'abcd';
+ QUERY PLAN
+-------------------------------------------
+ Bitmap Heap Scan on idxpart1 idxpart
+ Recheck Cond: (b = 'abcd'::text)
+ -> Bitmap Index Scan on idxpart1_b_idx
+ Index Cond: (b = 'abcd'::text)
+(4 rows)
+
+drop index idxpart_brin;
+create index idxpart_spgist on idxpart using spgist(b);
+explain (costs off) select * from idxpart where b = 'abcd';
+ QUERY PLAN
+-------------------------------------------
+ Bitmap Heap Scan on idxpart1 idxpart
+ Recheck Cond: (b = 'abcd'::text)
+ -> Bitmap Index Scan on idxpart1_b_idx
+ Index Cond: (b = 'abcd'::text)
+(4 rows)
+
+drop index idxpart_spgist;
+create index idxpart_gin on idxpart using gin(c);
+explain (costs off) select * from idxpart where c @> array[42];
+ QUERY PLAN
+----------------------------------------------
+ Bitmap Heap Scan on idxpart1 idxpart
+ Recheck Cond: (c @> '{42}'::integer[])
+ -> Bitmap Index Scan on idxpart1_c_idx
+ Index Cond: (c @> '{42}'::integer[])
+(4 rows)
+
+drop index idxpart_gin;
+reset enable_seqscan;
+drop table idxpart;
-- intentionally leave some objects around
create table idxpart (a int) partition by range (a);
create table idxpart1 partition of idxpart for values from (0) to (100);
diff --git a/src/test/regress/expected/inherit.out b/src/test/regress/expected/inherit.out
index 61e1522..33d9f13 100644
--- a/src/test/regress/expected/inherit.out
+++ b/src/test/regress/expected/inherit.out
@@ -1088,6 +1088,23 @@ Inherits: inht1,
inhs1
DROP TABLE inhts;
+-- Test for adding a column to a parent table with complex inheritance
+CREATE TABLE inhta ();
+CREATE TABLE inhtb () INHERITS (inhta);
+CREATE TABLE inhtc () INHERITS (inhtb);
+CREATE TABLE inhtd () INHERITS (inhta, inhtb, inhtc);
+ALTER TABLE inhta ADD COLUMN i int;
+NOTICE: merging definition of column "i" for child "inhtd"
+NOTICE: merging definition of column "i" for child "inhtd"
+\d+ inhta
+ Table "public.inhta"
+ Column | Type | Collation | Nullable | Default | Storage | Stats target | Description
+--------+---------+-----------+----------+---------+---------+--------------+-------------
+ i | integer | | | | plain | |
+Child tables: inhtb,
+ inhtd
+
+DROP TABLE inhta, inhtb, inhtc, inhtd;
-- Test for renaming in diamond inheritance
CREATE TABLE inht2 (x int) INHERITS (inht1);
CREATE TABLE inht3 (y int) INHERITS (inht1);
diff --git a/src/test/regress/expected/int8.out b/src/test/regress/expected/int8.out
index 1ae23cf..329f391 100644
--- a/src/test/regress/expected/int8.out
+++ b/src/test/regress/expected/int8.out
@@ -654,6 +654,8 @@ select -('-9223372036854775807'::int8);
select -('-9223372036854775808'::int8);
ERROR: bigint out of range
+select 0::int8 - '-9223372036854775808'::int8;
+ERROR: bigint out of range
select '9223372036854775800'::int8 + '9223372036854775800'::int8;
ERROR: bigint out of range
select '-9223372036854775800'::int8 + '-9223372036854775800'::int8;
diff --git a/src/test/regress/expected/interval.out b/src/test/regress/expected/interval.out
index 5350abf..f96eadd 100644
--- a/src/test/regress/expected/interval.out
+++ b/src/test/regress/expected/interval.out
@@ -357,6 +357,19 @@ SELECT * FROM INTERVAL_TBL;
@ 5 mons 12 hours
(10 rows)
+-- multiplication and division overflow test cases
+SELECT '3000000 months'::interval * 1000;
+ERROR: interval out of range
+SELECT '3000000 months'::interval / 0.001;
+ERROR: interval out of range
+SELECT '3000000 days'::interval * 1000;
+ERROR: interval out of range
+SELECT '3000000 days'::interval / 0.001;
+ERROR: interval out of range
+SELECT '1 month 2146410 days'::interval * 1000.5002;
+ERROR: interval out of range
+SELECT '4611686018427387904 usec'::interval / 0.1;
+ERROR: interval out of range
-- test avg(interval), which is somewhat fragile since people have been
-- known to change the allowed input syntax for type interval without
-- updating pg_aggregate.agginitval
diff --git a/src/test/regress/expected/join.out b/src/test/regress/expected/join.out
index 0eb6339..867c6d2 100644
--- a/src/test/regress/expected/join.out
+++ b/src/test/regress/expected/join.out
@@ -5803,6 +5803,32 @@ select * from
Output: (COALESCE((COALESCE(b.q2, '42'::bigint)), d.q2))
(24 rows)
+-- another case requiring nested PlaceHolderVars
+explain (verbose, costs off)
+select * from
+ (select 0 as val0) as ss0
+ left join (select 1 as val) as ss1 on true
+ left join lateral (select ss1.val as val_filtered where false) as ss2 on true;
+ QUERY PLAN
+--------------------------------
+ Nested Loop Left Join
+ Output: 0, (1), ((1))
+ -> Result
+ Output: 1
+ -> Result
+ Output: (1)
+ One-Time Filter: false
+(7 rows)
+
+select * from
+ (select 0 as val0) as ss0
+ left join (select 1 as val) as ss1 on true
+ left join lateral (select ss1.val as val_filtered where false) as ss2 on true;
+ val0 | val | val_filtered
+------+-----+--------------
+ 0 | 1 |
+(1 row)
+
-- case that breaks the old ph_may_need optimization
explain (verbose, costs off)
select c.*,a.*,ss1.q1,ss2.q1,ss3.* from
diff --git a/src/test/regress/expected/matview.out b/src/test/regress/expected/matview.out
index c109d97..e811e1a 100644
--- a/src/test/regress/expected/matview.out
+++ b/src/test/regress/expected/matview.out
@@ -572,6 +572,22 @@ REFRESH MATERIALIZED VIEW mvtest_mv_foo;
REFRESH MATERIALIZED VIEW CONCURRENTLY mvtest_mv_foo;
DROP OWNED BY regress_user_mvtest CASCADE;
DROP ROLE regress_user_mvtest;
+-- Concurrent refresh requires a unique index on the materialized
+-- view. Test what happens if it's dropped during the refresh.
+CREATE OR REPLACE FUNCTION mvtest_drop_the_index()
+ RETURNS bool AS $$
+BEGIN
+ EXECUTE 'DROP INDEX IF EXISTS mvtest_drop_idx';
+ RETURN true;
+END;
+$$ LANGUAGE plpgsql;
+CREATE MATERIALIZED VIEW drop_idx_matview AS
+ SELECT 1 as i WHERE mvtest_drop_the_index();
+NOTICE: index "mvtest_drop_idx" does not exist, skipping
+CREATE UNIQUE INDEX mvtest_drop_idx ON drop_idx_matview (i);
+REFRESH MATERIALIZED VIEW CONCURRENTLY drop_idx_matview;
+ERROR: could not find suitable unique index on materialized view
+DROP MATERIALIZED VIEW drop_idx_matview; -- clean up
-- make sure that create WITH NO DATA works via SPI
BEGIN;
CREATE FUNCTION mvtest_func()
diff --git a/src/test/regress/expected/merge.out b/src/test/regress/expected/merge.out
index 15c6632..edc0043 100644
--- a/src/test/regress/expected/merge.out
+++ b/src/test/regress/expected/merge.out
@@ -1662,6 +1662,10 @@ ALTER TABLE pa_target ATTACH PARTITION part4 DEFAULT;
INSERT INTO pa_target SELECT id, id * 100, 'initial' FROM generate_series(1,14,2) AS id;
-- try simple MERGE
BEGIN;
+DO $$
+DECLARE
+ result integer;
+BEGIN
MERGE INTO pa_target t
USING pa_source s
ON t.tid = s.sid
@@ -1669,6 +1673,11 @@ MERGE INTO pa_target t
UPDATE SET balance = balance + delta, val = val || ' updated by merge'
WHEN NOT MATCHED THEN
INSERT VALUES (sid, delta, 'inserted by merge');
+GET DIAGNOSTICS result := ROW_COUNT;
+RAISE NOTICE 'ROW_COUNT = %', result;
+END;
+$$;
+NOTICE: ROW_COUNT = 14
SELECT * FROM pa_target ORDER BY tid;
tid | balance | val
-----+---------+--------------------------
@@ -1725,6 +1734,10 @@ SELECT * FROM pa_target ORDER BY tid;
ROLLBACK;
-- try updating the partition key column
BEGIN;
+DO $$
+DECLARE
+ result integer;
+BEGIN
MERGE INTO pa_target t
USING pa_source s
ON t.tid = s.sid
@@ -1732,6 +1745,11 @@ MERGE INTO pa_target t
UPDATE SET tid = tid + 1, balance = balance + delta, val = val || ' updated by merge'
WHEN NOT MATCHED THEN
INSERT VALUES (sid, delta, 'inserted by merge');
+GET DIAGNOSTICS result := ROW_COUNT;
+RAISE NOTICE 'ROW_COUNT = %', result;
+END;
+$$;
+NOTICE: ROW_COUNT = 14
SELECT * FROM pa_target ORDER BY tid;
tid | balance | val
-----+---------+--------------------------
@@ -1752,6 +1770,79 @@ SELECT * FROM pa_target ORDER BY tid;
(14 rows)
ROLLBACK;
+-- as above, but blocked by BEFORE DELETE ROW trigger
+BEGIN;
+CREATE FUNCTION trig_fn() RETURNS trigger LANGUAGE plpgsql AS
+ $$ BEGIN RETURN NULL; END; $$;
+CREATE TRIGGER del_trig BEFORE DELETE ON pa_target
+ FOR EACH ROW EXECUTE PROCEDURE trig_fn();
+DO $$
+DECLARE
+ result integer;
+BEGIN
+MERGE INTO pa_target t
+ USING pa_source s
+ ON t.tid = s.sid
+ WHEN MATCHED THEN
+ UPDATE SET tid = tid + 1, balance = balance + delta, val = val || ' updated by merge'
+ WHEN NOT MATCHED THEN
+ INSERT VALUES (sid, delta, 'inserted by merge');
+GET DIAGNOSTICS result := ROW_COUNT;
+RAISE NOTICE 'ROW_COUNT = %', result;
+END;
+$$;
+NOTICE: ROW_COUNT = 10
+SELECT * FROM pa_target ORDER BY tid;
+ tid | balance | val
+-----+---------+--------------------------
+ 1 | 100 | initial
+ 2 | 20 | inserted by merge
+ 3 | 300 | initial
+ 4 | 40 | inserted by merge
+ 6 | 550 | initial updated by merge
+ 6 | 60 | inserted by merge
+ 7 | 700 | initial
+ 8 | 80 | inserted by merge
+ 9 | 900 | initial
+ 10 | 100 | inserted by merge
+ 12 | 1210 | initial updated by merge
+ 12 | 120 | inserted by merge
+ 14 | 1430 | initial updated by merge
+ 14 | 140 | inserted by merge
+(14 rows)
+
+ROLLBACK;
+-- as above, but blocked by BEFORE INSERT ROW trigger
+BEGIN;
+CREATE FUNCTION trig_fn() RETURNS trigger LANGUAGE plpgsql AS
+ $$ BEGIN RETURN NULL; END; $$;
+CREATE TRIGGER ins_trig BEFORE INSERT ON pa_target
+ FOR EACH ROW EXECUTE PROCEDURE trig_fn();
+DO $$
+DECLARE
+ result integer;
+BEGIN
+MERGE INTO pa_target t
+ USING pa_source s
+ ON t.tid = s.sid
+ WHEN MATCHED THEN
+ UPDATE SET tid = tid + 1, balance = balance + delta, val = val || ' updated by merge'
+ WHEN NOT MATCHED THEN
+ INSERT VALUES (sid, delta, 'inserted by merge');
+GET DIAGNOSTICS result := ROW_COUNT;
+RAISE NOTICE 'ROW_COUNT = %', result;
+END;
+$$;
+NOTICE: ROW_COUNT = 3
+SELECT * FROM pa_target ORDER BY tid;
+ tid | balance | val
+-----+---------+--------------------------
+ 6 | 550 | initial updated by merge
+ 12 | 1210 | initial updated by merge
+ 14 | 1430 | initial updated by merge
+(3 rows)
+
+ROLLBACK;
-- test RLS enforcement
BEGIN;
ALTER TABLE pa_target ENABLE ROW LEVEL SECURITY;
diff --git a/src/test/regress/expected/opr_sanity.out b/src/test/regress/expected/opr_sanity.out
index f27217c..ba5875d 100644
--- a/src/test/regress/expected/opr_sanity.out
+++ b/src/test/regress/expected/opr_sanity.out
@@ -1887,7 +1887,7 @@ WHERE p1.oid = a1.amhandler AND a1.amtype = 'i' AND
-- Check for table amhandler functions with the wrong signature
SELECT a1.oid, a1.amname, p1.oid, p1.proname
FROM pg_am AS a1, pg_proc AS p1
-WHERE p1.oid = a1.amhandler AND a1.amtype = 's' AND
+WHERE p1.oid = a1.amhandler AND a1.amtype = 't' AND
(p1.prorettype != 'table_am_handler'::regtype
OR p1.proretset
OR p1.pronargs != 1
diff --git a/src/test/regress/expected/partition_join.out b/src/test/regress/expected/partition_join.out
index bb5b7c4..8b179fa 100644
--- a/src/test/regress/expected/partition_join.out
+++ b/src/test/regress/expected/partition_join.out
@@ -459,6 +459,99 @@ SELECT t1.a, ss.t2a, ss.t2c FROM prt1 t1 LEFT JOIN LATERAL
550 | |
(12 rows)
+SET max_parallel_workers_per_gather = 0;
+-- If there are lateral references to the other relation in sample scan,
+-- we cannot generate a partitionwise join.
+EXPLAIN (COSTS OFF)
+SELECT * FROM prt1 t1 JOIN LATERAL
+ (SELECT * FROM prt1 t2 TABLESAMPLE SYSTEM (t1.a) REPEATABLE(t1.b)) s
+ ON t1.a = s.a;
+ QUERY PLAN
+---------------------------------------------------------
+ Nested Loop
+ -> Append
+ -> Seq Scan on prt1_p1 t1_1
+ -> Seq Scan on prt1_p2 t1_2
+ -> Seq Scan on prt1_p3 t1_3
+ -> Append
+ -> Sample Scan on prt1_p1 t2_1
+ Sampling: system (t1.a) REPEATABLE (t1.b)
+ Filter: (t1.a = a)
+ -> Sample Scan on prt1_p2 t2_2
+ Sampling: system (t1.a) REPEATABLE (t1.b)
+ Filter: (t1.a = a)
+ -> Sample Scan on prt1_p3 t2_3
+ Sampling: system (t1.a) REPEATABLE (t1.b)
+ Filter: (t1.a = a)
+(15 rows)
+
+-- If there are lateral references to the other relation in scan's restriction
+-- clauses, we cannot generate a partitionwise join.
+EXPLAIN (COSTS OFF)
+SELECT count(*) FROM prt1 t1 LEFT JOIN LATERAL
+ (SELECT t1.b AS t1b, t2.* FROM prt2 t2) s
+ ON t1.a = s.b WHERE s.t1b = s.a;
+ QUERY PLAN
+---------------------------------------------------------------
+ Aggregate
+ -> Nested Loop
+ -> Append
+ -> Seq Scan on prt1_p1 t1_1
+ -> Seq Scan on prt1_p2 t1_2
+ -> Seq Scan on prt1_p3 t1_3
+ -> Append
+ -> Index Scan using iprt2_p1_b on prt2_p1 t2_1
+ Index Cond: (b = t1.a)
+ Filter: (t1.b = a)
+ -> Index Scan using iprt2_p2_b on prt2_p2 t2_2
+ Index Cond: (b = t1.a)
+ Filter: (t1.b = a)
+ -> Index Scan using iprt2_p3_b on prt2_p3 t2_3
+ Index Cond: (b = t1.a)
+ Filter: (t1.b = a)
+(16 rows)
+
+SELECT count(*) FROM prt1 t1 LEFT JOIN LATERAL
+ (SELECT t1.b AS t1b, t2.* FROM prt2 t2) s
+ ON t1.a = s.b WHERE s.t1b = s.a;
+ count
+-------
+ 100
+(1 row)
+
+EXPLAIN (COSTS OFF)
+SELECT count(*) FROM prt1 t1 LEFT JOIN LATERAL
+ (SELECT t1.b AS t1b, t2.* FROM prt2 t2) s
+ ON t1.a = s.b WHERE s.t1b = s.b;
+ QUERY PLAN
+--------------------------------------------------------------------
+ Aggregate
+ -> Nested Loop
+ -> Append
+ -> Seq Scan on prt1_p1 t1_1
+ -> Seq Scan on prt1_p2 t1_2
+ -> Seq Scan on prt1_p3 t1_3
+ -> Append
+ -> Index Only Scan using iprt2_p1_b on prt2_p1 t2_1
+ Index Cond: (b = t1.a)
+ Filter: (b = t1.b)
+ -> Index Only Scan using iprt2_p2_b on prt2_p2 t2_2
+ Index Cond: (b = t1.a)
+ Filter: (b = t1.b)
+ -> Index Only Scan using iprt2_p3_b on prt2_p3 t2_3
+ Index Cond: (b = t1.a)
+ Filter: (b = t1.b)
+(16 rows)
+
+SELECT count(*) FROM prt1 t1 LEFT JOIN LATERAL
+ (SELECT t1.b AS t1b, t2.* FROM prt2 t2) s
+ ON t1.a = s.b WHERE s.t1b = s.b;
+ count
+-------
+ 5
+(1 row)
+
+RESET max_parallel_workers_per_gather;
-- bug with inadequate sort key representation
SET enable_partitionwise_aggregate TO true;
SET enable_hashjoin TO false;
@@ -1897,6 +1990,80 @@ SELECT * FROM prt1_l t1 LEFT JOIN LATERAL
550 | 0 | 0002 | | | | |
(12 rows)
+SET max_parallel_workers_per_gather = 0;
+-- If there are lateral references to the other relation in sample scan,
+-- we cannot generate a partitionwise join.
+EXPLAIN (COSTS OFF)
+SELECT * FROM prt1_l t1 JOIN LATERAL
+ (SELECT * FROM prt1_l t2 TABLESAMPLE SYSTEM (t1.a) REPEATABLE(t1.b)) s
+ ON t1.a = s.a AND t1.b = s.b AND t1.c = s.c;
+ QUERY PLAN
+----------------------------------------------------------------------------------
+ Nested Loop
+ -> Append
+ -> Seq Scan on prt1_l_p1 t1_1
+ -> Seq Scan on prt1_l_p2_p1 t1_2
+ -> Seq Scan on prt1_l_p2_p2 t1_3
+ -> Seq Scan on prt1_l_p3_p1 t1_4
+ -> Seq Scan on prt1_l_p3_p2 t1_5
+ -> Append
+ -> Sample Scan on prt1_l_p1 t2_1
+ Sampling: system (t1.a) REPEATABLE (t1.b)
+ Filter: ((t1.a = a) AND (t1.b = b) AND ((t1.c)::text = (c)::text))
+ -> Sample Scan on prt1_l_p2_p1 t2_2
+ Sampling: system (t1.a) REPEATABLE (t1.b)
+ Filter: ((t1.a = a) AND (t1.b = b) AND ((t1.c)::text = (c)::text))
+ -> Sample Scan on prt1_l_p2_p2 t2_3
+ Sampling: system (t1.a) REPEATABLE (t1.b)
+ Filter: ((t1.a = a) AND (t1.b = b) AND ((t1.c)::text = (c)::text))
+ -> Sample Scan on prt1_l_p3_p1 t2_4
+ Sampling: system (t1.a) REPEATABLE (t1.b)
+ Filter: ((t1.a = a) AND (t1.b = b) AND ((t1.c)::text = (c)::text))
+ -> Sample Scan on prt1_l_p3_p2 t2_5
+ Sampling: system (t1.a) REPEATABLE (t1.b)
+ Filter: ((t1.a = a) AND (t1.b = b) AND ((t1.c)::text = (c)::text))
+(23 rows)
+
+-- If there are lateral references to the other relation in scan's restriction
+-- clauses, we cannot generate a partitionwise join.
+EXPLAIN (COSTS OFF)
+SELECT COUNT(*) FROM prt1_l t1 LEFT JOIN LATERAL
+ (SELECT t1.b AS t1b, t2.* FROM prt2_l t2) s
+ ON t1.a = s.b AND t1.b = s.a AND t1.c = s.c
+ WHERE s.t1b = s.a;
+ QUERY PLAN
+-------------------------------------------------------------------------------------------------------
+ Aggregate
+ -> Nested Loop
+ -> Append
+ -> Seq Scan on prt1_l_p1 t1_1
+ -> Seq Scan on prt1_l_p2_p1 t1_2
+ -> Seq Scan on prt1_l_p2_p2 t1_3
+ -> Seq Scan on prt1_l_p3_p1 t1_4
+ -> Seq Scan on prt1_l_p3_p2 t1_5
+ -> Append
+ -> Seq Scan on prt2_l_p1 t2_1
+ Filter: ((a = t1.b) AND (t1.a = b) AND (t1.b = a) AND ((t1.c)::text = (c)::text))
+ -> Seq Scan on prt2_l_p2_p1 t2_2
+ Filter: ((a = t1.b) AND (t1.a = b) AND (t1.b = a) AND ((t1.c)::text = (c)::text))
+ -> Seq Scan on prt2_l_p2_p2 t2_3
+ Filter: ((a = t1.b) AND (t1.a = b) AND (t1.b = a) AND ((t1.c)::text = (c)::text))
+ -> Seq Scan on prt2_l_p3_p1 t2_4
+ Filter: ((a = t1.b) AND (t1.a = b) AND (t1.b = a) AND ((t1.c)::text = (c)::text))
+ -> Seq Scan on prt2_l_p3_p2 t2_5
+ Filter: ((a = t1.b) AND (t1.a = b) AND (t1.b = a) AND ((t1.c)::text = (c)::text))
+(19 rows)
+
+SELECT COUNT(*) FROM prt1_l t1 LEFT JOIN LATERAL
+ (SELECT t1.b AS t1b, t2.* FROM prt2_l t2) s
+ ON t1.a = s.b AND t1.b = s.a AND t1.c = s.c
+ WHERE s.t1b = s.a;
+ count
+-------
+ 100
+(1 row)
+
+RESET max_parallel_workers_per_gather;
-- join with one side empty
EXPLAIN (COSTS OFF)
SELECT t1.a, t1.c, t2.b, t2.c FROM (SELECT * FROM prt1_l WHERE a = 1 AND a = 2) t1 RIGHT JOIN prt2_l t2 ON t1.a = t2.b AND t1.b = t2.a AND t1.c = t2.c;
diff --git a/src/test/regress/expected/triggers.out b/src/test/regress/expected/triggers.out
index cc15f5c..723e8b7 100644
--- a/src/test/regress/expected/triggers.out
+++ b/src/test/regress/expected/triggers.out
@@ -2309,6 +2309,51 @@ NOTICE: trigger zzz on parted_trig_1_1 AFTER INSERT for ROW
NOTICE: trigger bbb on parted_trig_2 AFTER INSERT for ROW
NOTICE: trigger zzz on parted_trig_2 AFTER INSERT for ROW
drop table parted_trig;
+-- Verify that the correct triggers fire for cross-partition updates
+create table parted_trig (a int) partition by list (a);
+create table parted_trig1 partition of parted_trig for values in (1);
+create table parted_trig2 partition of parted_trig for values in (2);
+insert into parted_trig values (1);
+create or replace function trigger_notice() returns trigger as $$
+ begin
+ raise notice 'trigger % on % % % for %', TG_NAME, TG_TABLE_NAME, TG_WHEN, TG_OP, TG_LEVEL;
+ if TG_LEVEL = 'ROW' then
+ if TG_OP = 'DELETE' then
+ return OLD;
+ else
+ return NEW;
+ end if;
+ end if;
+ return null;
+ end;
+ $$ language plpgsql;
+create trigger parted_trig_before_stmt before insert or update or delete on parted_trig
+ for each statement execute procedure trigger_notice();
+create trigger parted_trig_before_row before insert or update or delete on parted_trig
+ for each row execute procedure trigger_notice();
+create trigger parted_trig_after_row after insert or update or delete on parted_trig
+ for each row execute procedure trigger_notice();
+create trigger parted_trig_after_stmt after insert or update or delete on parted_trig
+ for each statement execute procedure trigger_notice();
+update parted_trig set a = 2 where a = 1;
+NOTICE: trigger parted_trig_before_stmt on parted_trig BEFORE UPDATE for STATEMENT
+NOTICE: trigger parted_trig_before_row on parted_trig1 BEFORE UPDATE for ROW
+NOTICE: trigger parted_trig_before_row on parted_trig1 BEFORE DELETE for ROW
+NOTICE: trigger parted_trig_before_row on parted_trig2 BEFORE INSERT for ROW
+NOTICE: trigger parted_trig_after_row on parted_trig1 AFTER DELETE for ROW
+NOTICE: trigger parted_trig_after_row on parted_trig2 AFTER INSERT for ROW
+NOTICE: trigger parted_trig_after_stmt on parted_trig AFTER UPDATE for STATEMENT
+-- update action in merge should behave the same
+merge into parted_trig using (select 1) as ss on true
+ when matched and a = 2 then update set a = 1;
+NOTICE: trigger parted_trig_before_stmt on parted_trig BEFORE UPDATE for STATEMENT
+NOTICE: trigger parted_trig_before_row on parted_trig2 BEFORE UPDATE for ROW
+NOTICE: trigger parted_trig_before_row on parted_trig2 BEFORE DELETE for ROW
+NOTICE: trigger parted_trig_before_row on parted_trig1 BEFORE INSERT for ROW
+NOTICE: trigger parted_trig_after_row on parted_trig2 AFTER DELETE for ROW
+NOTICE: trigger parted_trig_after_row on parted_trig1 AFTER INSERT for ROW
+NOTICE: trigger parted_trig_after_stmt on parted_trig AFTER UPDATE for STATEMENT
+drop table parted_trig;
-- Verify propagation of trigger arguments to partitions
create table parted_trig (a int) partition by list (a);
create table parted_trig1 partition of parted_trig for values in (1);
diff --git a/src/test/regress/expected/tsdicts.out b/src/test/regress/expected/tsdicts.out
index c804293..4eff85d 100644
--- a/src/test/regress/expected/tsdicts.out
+++ b/src/test/regress/expected/tsdicts.out
@@ -687,3 +687,37 @@ CREATE TEXT SEARCH DICTIONARY tsdict_case
"AffFile" = ispell_sample
);
ERROR: unrecognized Ispell parameter: "DictFile"
+-- Test grammar for configurations
+CREATE TEXT SEARCH CONFIGURATION dummy_tst (COPY=english);
+-- Overriden mapping change with duplicated tokens.
+ALTER TEXT SEARCH CONFIGURATION dummy_tst
+ ALTER MAPPING FOR word, word WITH ispell;
+-- Not a token supported by the configuration's parser, fails.
+ALTER TEXT SEARCH CONFIGURATION dummy_tst
+ DROP MAPPING FOR not_a_token, not_a_token;
+ERROR: token type "not_a_token" does not exist
+-- Not a token supported by the configuration's parser, fails even
+-- with IF EXISTS.
+ALTER TEXT SEARCH CONFIGURATION dummy_tst
+ DROP MAPPING IF EXISTS FOR not_a_token, not_a_token;
+ERROR: token type "not_a_token" does not exist
+-- Token supported by the configuration's parser, succeeds.
+ALTER TEXT SEARCH CONFIGURATION dummy_tst
+ DROP MAPPING FOR word, word;
+-- No mapping for token supported by the configuration's parser, fails.
+ALTER TEXT SEARCH CONFIGURATION dummy_tst
+ DROP MAPPING FOR word;
+ERROR: mapping for token type "word" does not exist
+-- Token supported by the configuration's parser, cannot be found,
+-- succeeds with IF EXISTS.
+ALTER TEXT SEARCH CONFIGURATION dummy_tst
+ DROP MAPPING IF EXISTS FOR word, word;
+NOTICE: mapping for token type "word" does not exist, skipping
+-- Re-add mapping, with duplicated tokens supported by the parser.
+ALTER TEXT SEARCH CONFIGURATION dummy_tst
+ ADD MAPPING FOR word, word WITH ispell;
+-- Not a token supported by the configuration's parser, fails.
+ALTER TEXT SEARCH CONFIGURATION dummy_tst
+ ADD MAPPING FOR not_a_token WITH ispell;
+ERROR: token type "not_a_token" does not exist
+DROP TEXT SEARCH CONFIGURATION dummy_tst;
diff --git a/src/test/regress/pg_regress.c b/src/test/regress/pg_regress.c
index d7d5f4a..e275871 100644
--- a/src/test/regress/pg_regress.c
+++ b/src/test/regress/pg_regress.c
@@ -1126,7 +1126,7 @@ spawn_process(const char *cmdline)
comspec = "CMD";
memset(&pi, 0, sizeof(pi));
- cmdline2 = psprintf("\"%s\" /c \"%s\"", comspec, cmdline);
+ cmdline2 = psprintf("\"%s\" /d /c \"%s\"", comspec, cmdline);
if ((restrictedToken =
CreateRestrictedProcess(cmdline2, &pi)) == 0)
diff --git a/src/test/regress/sql/fast_default.sql b/src/test/regress/sql/fast_default.sql
index 16a3b7c..dc9df78 100644
--- a/src/test/regress/sql/fast_default.sql
+++ b/src/test/regress/sql/fast_default.sql
@@ -256,7 +256,18 @@ ALTER TABLE T ADD COLUMN c2 TIMESTAMP DEFAULT clock_timestamp();
SELECT comp();
+-- check that we notice insertion of a volatile default argument
+CREATE FUNCTION foolme(timestamptz DEFAULT clock_timestamp())
+ RETURNS timestamptz
+ IMMUTABLE AS 'select $1' LANGUAGE sql;
+ALTER TABLE T ADD COLUMN c3 timestamptz DEFAULT foolme();
+
+SELECT attname, atthasmissing, attmissingval FROM pg_attribute
+ WHERE attrelid = 't'::regclass AND attnum > 0
+ ORDER BY attnum;
+
DROP TABLE T;
+DROP FUNCTION foolme(timestamptz);
-- Simple querie
CREATE TABLE T (pk INT NOT NULL PRIMARY KEY);
diff --git a/src/test/regress/sql/generated.sql b/src/test/regress/sql/generated.sql
index 8d25161..53bccf9 100644
--- a/src/test/regress/sql/generated.sql
+++ b/src/test/regress/sql/generated.sql
@@ -26,6 +26,9 @@ CREATE TABLE gtest_err_3 (a int PRIMARY KEY, b int GENERATED ALWAYS AS (c * 2) S
-- generation expression must be immutable
CREATE TABLE gtest_err_4 (a int PRIMARY KEY, b double precision GENERATED ALWAYS AS (random()) STORED);
+-- ... but be sure that the immutability test is accurate
+CREATE TABLE gtest2 (a int, b text GENERATED ALWAYS AS (a || ' sec') STORED);
+DROP TABLE gtest2;
-- cannot have default/identity and generated
CREATE TABLE gtest_err_5a (a int PRIMARY KEY, b int DEFAULT 5 GENERATED ALWAYS AS (a * 2) STORED);
diff --git a/src/test/regress/sql/horology.sql b/src/test/regress/sql/horology.sql
index 2724a2b..4065348 100644
--- a/src/test/regress/sql/horology.sql
+++ b/src/test/regress/sql/horology.sql
@@ -86,6 +86,7 @@ SELECT timestamp without time zone '1999-12-01' + interval '1 month - 1 second'
SELECT timestamp without time zone 'Jan 1, 4713 BC' + interval '106000000 days' AS "Feb 23, 285506";
SELECT timestamp without time zone 'Jan 1, 4713 BC' + interval '107000000 days' AS "Jan 20, 288244";
SELECT timestamp without time zone 'Jan 1, 4713 BC' + interval '109203489 days' AS "Dec 31, 294276";
+SELECT timestamp without time zone '2000-01-01' - interval '2483590 days' AS "out of range";
SELECT timestamp without time zone '12/31/294276' - timestamp without time zone '12/23/1999' AS "106751991 Days";
-- Shorthand values
@@ -117,6 +118,7 @@ SELECT timestamp with time zone '1996-03-01' - interval '1 second' AS "Feb 29";
SELECT timestamp with time zone '1999-03-01' - interval '1 second' AS "Feb 28";
SELECT timestamp with time zone '2000-03-01' - interval '1 second' AS "Feb 29";
SELECT timestamp with time zone '1999-12-01' + interval '1 month - 1 second' AS "Dec 31";
+SELECT timestamp with time zone '2000-01-01' - interval '2483590 days' AS "out of range";
SELECT (timestamp with time zone 'today' = (timestamp with time zone 'yesterday' + interval '1 day')) as "True";
SELECT (timestamp with time zone 'today' = (timestamp with time zone 'tomorrow' - interval '1 day')) as "True";
diff --git a/src/test/regress/sql/indexing.sql b/src/test/regress/sql/indexing.sql
index 6f60d1d..a48a317 100644
--- a/src/test/regress/sql/indexing.sql
+++ b/src/test/regress/sql/indexing.sql
@@ -668,6 +668,26 @@ insert into idxpart values (857142, 'six');
select tableoid::regclass, * from idxpart order by a;
drop table idxpart;
+-- Test some other non-btree index types
+create table idxpart (a int, b text, c int[]) partition by range (a);
+create table idxpart1 partition of idxpart for values from (0) to (100000);
+set enable_seqscan to off;
+
+create index idxpart_brin on idxpart using brin(b);
+explain (costs off) select * from idxpart where b = 'abcd';
+drop index idxpart_brin;
+
+create index idxpart_spgist on idxpart using spgist(b);
+explain (costs off) select * from idxpart where b = 'abcd';
+drop index idxpart_spgist;
+
+create index idxpart_gin on idxpart using gin(c);
+explain (costs off) select * from idxpart where c @> array[42];
+drop index idxpart_gin;
+
+reset enable_seqscan;
+drop table idxpart;
+
-- intentionally leave some objects around
create table idxpart (a int) partition by range (a);
create table idxpart1 partition of idxpart for values from (0) to (100);
diff --git a/src/test/regress/sql/inherit.sql b/src/test/regress/sql/inherit.sql
index 76ea197..bd90834 100644
--- a/src/test/regress/sql/inherit.sql
+++ b/src/test/regress/sql/inherit.sql
@@ -372,6 +372,15 @@ ALTER TABLE inhts RENAME d TO dd;
DROP TABLE inhts;
+-- Test for adding a column to a parent table with complex inheritance
+CREATE TABLE inhta ();
+CREATE TABLE inhtb () INHERITS (inhta);
+CREATE TABLE inhtc () INHERITS (inhtb);
+CREATE TABLE inhtd () INHERITS (inhta, inhtb, inhtc);
+ALTER TABLE inhta ADD COLUMN i int;
+\d+ inhta
+DROP TABLE inhta, inhtb, inhtc, inhtd;
+
-- Test for renaming in diamond inheritance
CREATE TABLE inht2 (x int) INHERITS (inht1);
CREATE TABLE inht3 (y int) INHERITS (inht1);
diff --git a/src/test/regress/sql/int8.sql b/src/test/regress/sql/int8.sql
index 38b7719..8a3d537 100644
--- a/src/test/regress/sql/int8.sql
+++ b/src/test/regress/sql/int8.sql
@@ -126,6 +126,7 @@ select '9223372036854775808'::int8;
select -('-9223372036854775807'::int8);
select -('-9223372036854775808'::int8);
+select 0::int8 - '-9223372036854775808'::int8;
select '9223372036854775800'::int8 + '9223372036854775800'::int8;
select '-9223372036854775800'::int8 + '-9223372036854775800'::int8;
diff --git a/src/test/regress/sql/interval.sql b/src/test/regress/sql/interval.sql
index 5b4944c..2a3dd0c 100644
--- a/src/test/regress/sql/interval.sql
+++ b/src/test/regress/sql/interval.sql
@@ -129,6 +129,14 @@ SET IntervalStyle to postgres_verbose;
SELECT * FROM INTERVAL_TBL;
+-- multiplication and division overflow test cases
+SELECT '3000000 months'::interval * 1000;
+SELECT '3000000 months'::interval / 0.001;
+SELECT '3000000 days'::interval * 1000;
+SELECT '3000000 days'::interval / 0.001;
+SELECT '1 month 2146410 days'::interval * 1000.5002;
+SELECT '4611686018427387904 usec'::interval / 0.1;
+
-- test avg(interval), which is somewhat fragile since people have been
-- known to change the allowed input syntax for type interval without
-- updating pg_aggregate.agginitval
diff --git a/src/test/regress/sql/join.sql b/src/test/regress/sql/join.sql
index 5fdacce..1113e98 100644
--- a/src/test/regress/sql/join.sql
+++ b/src/test/regress/sql/join.sql
@@ -1978,6 +1978,18 @@ select * from
) on c.q2 = ss2.q1,
lateral (select ss2.y offset 0) ss3;
+-- another case requiring nested PlaceHolderVars
+explain (verbose, costs off)
+select * from
+ (select 0 as val0) as ss0
+ left join (select 1 as val) as ss1 on true
+ left join lateral (select ss1.val as val_filtered where false) as ss2 on true;
+
+select * from
+ (select 0 as val0) as ss0
+ left join (select 1 as val) as ss1 on true
+ left join lateral (select ss1.val as val_filtered where false) as ss2 on true;
+
-- case that breaks the old ph_may_need optimization
explain (verbose, costs off)
select c.*,a.*,ss1.q1,ss2.q1,ss3.* from
diff --git a/src/test/regress/sql/matview.sql b/src/test/regress/sql/matview.sql
index 68b9ccf..543e0a6 100644
--- a/src/test/regress/sql/matview.sql
+++ b/src/test/regress/sql/matview.sql
@@ -231,6 +231,23 @@ REFRESH MATERIALIZED VIEW CONCURRENTLY mvtest_mv_foo;
DROP OWNED BY regress_user_mvtest CASCADE;
DROP ROLE regress_user_mvtest;
+-- Concurrent refresh requires a unique index on the materialized
+-- view. Test what happens if it's dropped during the refresh.
+CREATE OR REPLACE FUNCTION mvtest_drop_the_index()
+ RETURNS bool AS $$
+BEGIN
+ EXECUTE 'DROP INDEX IF EXISTS mvtest_drop_idx';
+ RETURN true;
+END;
+$$ LANGUAGE plpgsql;
+
+CREATE MATERIALIZED VIEW drop_idx_matview AS
+ SELECT 1 as i WHERE mvtest_drop_the_index();
+
+CREATE UNIQUE INDEX mvtest_drop_idx ON drop_idx_matview (i);
+REFRESH MATERIALIZED VIEW CONCURRENTLY drop_idx_matview;
+DROP MATERIALIZED VIEW drop_idx_matview; -- clean up
+
-- make sure that create WITH NO DATA works via SPI
BEGIN;
CREATE FUNCTION mvtest_func()
diff --git a/src/test/regress/sql/merge.sql b/src/test/regress/sql/merge.sql
index 4cf6db9..66cb75a 100644
--- a/src/test/regress/sql/merge.sql
+++ b/src/test/regress/sql/merge.sql
@@ -1060,6 +1060,10 @@ INSERT INTO pa_target SELECT id, id * 100, 'initial' FROM generate_series(1,14,2
-- try simple MERGE
BEGIN;
+DO $$
+DECLARE
+ result integer;
+BEGIN
MERGE INTO pa_target t
USING pa_source s
ON t.tid = s.sid
@@ -1067,6 +1071,10 @@ MERGE INTO pa_target t
UPDATE SET balance = balance + delta, val = val || ' updated by merge'
WHEN NOT MATCHED THEN
INSERT VALUES (sid, delta, 'inserted by merge');
+GET DIAGNOSTICS result := ROW_COUNT;
+RAISE NOTICE 'ROW_COUNT = %', result;
+END;
+$$;
SELECT * FROM pa_target ORDER BY tid;
ROLLBACK;
@@ -1085,6 +1093,34 @@ ROLLBACK;
-- try updating the partition key column
BEGIN;
+DO $$
+DECLARE
+ result integer;
+BEGIN
+MERGE INTO pa_target t
+ USING pa_source s
+ ON t.tid = s.sid
+ WHEN MATCHED THEN
+ UPDATE SET tid = tid + 1, balance = balance + delta, val = val || ' updated by merge'
+ WHEN NOT MATCHED THEN
+ INSERT VALUES (sid, delta, 'inserted by merge');
+GET DIAGNOSTICS result := ROW_COUNT;
+RAISE NOTICE 'ROW_COUNT = %', result;
+END;
+$$;
+SELECT * FROM pa_target ORDER BY tid;
+ROLLBACK;
+
+-- as above, but blocked by BEFORE DELETE ROW trigger
+BEGIN;
+CREATE FUNCTION trig_fn() RETURNS trigger LANGUAGE plpgsql AS
+ $$ BEGIN RETURN NULL; END; $$;
+CREATE TRIGGER del_trig BEFORE DELETE ON pa_target
+ FOR EACH ROW EXECUTE PROCEDURE trig_fn();
+DO $$
+DECLARE
+ result integer;
+BEGIN
MERGE INTO pa_target t
USING pa_source s
ON t.tid = s.sid
@@ -1092,6 +1128,34 @@ MERGE INTO pa_target t
UPDATE SET tid = tid + 1, balance = balance + delta, val = val || ' updated by merge'
WHEN NOT MATCHED THEN
INSERT VALUES (sid, delta, 'inserted by merge');
+GET DIAGNOSTICS result := ROW_COUNT;
+RAISE NOTICE 'ROW_COUNT = %', result;
+END;
+$$;
+SELECT * FROM pa_target ORDER BY tid;
+ROLLBACK;
+
+-- as above, but blocked by BEFORE INSERT ROW trigger
+BEGIN;
+CREATE FUNCTION trig_fn() RETURNS trigger LANGUAGE plpgsql AS
+ $$ BEGIN RETURN NULL; END; $$;
+CREATE TRIGGER ins_trig BEFORE INSERT ON pa_target
+ FOR EACH ROW EXECUTE PROCEDURE trig_fn();
+DO $$
+DECLARE
+ result integer;
+BEGIN
+MERGE INTO pa_target t
+ USING pa_source s
+ ON t.tid = s.sid
+ WHEN MATCHED THEN
+ UPDATE SET tid = tid + 1, balance = balance + delta, val = val || ' updated by merge'
+ WHEN NOT MATCHED THEN
+ INSERT VALUES (sid, delta, 'inserted by merge');
+GET DIAGNOSTICS result := ROW_COUNT;
+RAISE NOTICE 'ROW_COUNT = %', result;
+END;
+$$;
SELECT * FROM pa_target ORDER BY tid;
ROLLBACK;
diff --git a/src/test/regress/sql/opr_sanity.sql b/src/test/regress/sql/opr_sanity.sql
index 2b29285..0f039f2 100644
--- a/src/test/regress/sql/opr_sanity.sql
+++ b/src/test/regress/sql/opr_sanity.sql
@@ -1195,7 +1195,7 @@ WHERE p1.oid = a1.amhandler AND a1.amtype = 'i' AND
SELECT a1.oid, a1.amname, p1.oid, p1.proname
FROM pg_am AS a1, pg_proc AS p1
-WHERE p1.oid = a1.amhandler AND a1.amtype = 's' AND
+WHERE p1.oid = a1.amhandler AND a1.amtype = 't' AND
(p1.prorettype != 'table_am_handler'::regtype
OR p1.proretset
OR p1.pronargs != 1
diff --git a/src/test/regress/sql/partition_join.sql b/src/test/regress/sql/partition_join.sql
index 67f5063..04d7adb 100644
--- a/src/test/regress/sql/partition_join.sql
+++ b/src/test/regress/sql/partition_join.sql
@@ -91,6 +91,33 @@ SELECT t1.a, ss.t2a, ss.t2c FROM prt1 t1 LEFT JOIN LATERAL
(SELECT t2.a AS t2a, t3.a AS t3a, t2.b t2b, t2.c t2c, least(t1.a,t2.a,t3.a) FROM prt1 t2 JOIN prt2 t3 ON (t2.a = t3.b)) ss
ON t1.c = ss.t2c WHERE (t1.b + coalesce(ss.t2b, 0)) = 0 ORDER BY t1.a;
+SET max_parallel_workers_per_gather = 0;
+-- If there are lateral references to the other relation in sample scan,
+-- we cannot generate a partitionwise join.
+EXPLAIN (COSTS OFF)
+SELECT * FROM prt1 t1 JOIN LATERAL
+ (SELECT * FROM prt1 t2 TABLESAMPLE SYSTEM (t1.a) REPEATABLE(t1.b)) s
+ ON t1.a = s.a;
+
+-- If there are lateral references to the other relation in scan's restriction
+-- clauses, we cannot generate a partitionwise join.
+EXPLAIN (COSTS OFF)
+SELECT count(*) FROM prt1 t1 LEFT JOIN LATERAL
+ (SELECT t1.b AS t1b, t2.* FROM prt2 t2) s
+ ON t1.a = s.b WHERE s.t1b = s.a;
+SELECT count(*) FROM prt1 t1 LEFT JOIN LATERAL
+ (SELECT t1.b AS t1b, t2.* FROM prt2 t2) s
+ ON t1.a = s.b WHERE s.t1b = s.a;
+
+EXPLAIN (COSTS OFF)
+SELECT count(*) FROM prt1 t1 LEFT JOIN LATERAL
+ (SELECT t1.b AS t1b, t2.* FROM prt2 t2) s
+ ON t1.a = s.b WHERE s.t1b = s.b;
+SELECT count(*) FROM prt1 t1 LEFT JOIN LATERAL
+ (SELECT t1.b AS t1b, t2.* FROM prt2 t2) s
+ ON t1.a = s.b WHERE s.t1b = s.b;
+RESET max_parallel_workers_per_gather;
+
-- bug with inadequate sort key representation
SET enable_partitionwise_aggregate TO true;
SET enable_hashjoin TO false;
@@ -378,6 +405,27 @@ SELECT * FROM prt1_l t1 LEFT JOIN LATERAL
(SELECT t2.a AS t2a, t2.c AS t2c, t2.b AS t2b, t3.b AS t3b, least(t1.a,t2.a,t3.b) FROM prt1_l t2 JOIN prt2_l t3 ON (t2.a = t3.b AND t2.c = t3.c)) ss
ON t1.a = ss.t2a AND t1.c = ss.t2c WHERE t1.b = 0 ORDER BY t1.a;
+SET max_parallel_workers_per_gather = 0;
+-- If there are lateral references to the other relation in sample scan,
+-- we cannot generate a partitionwise join.
+EXPLAIN (COSTS OFF)
+SELECT * FROM prt1_l t1 JOIN LATERAL
+ (SELECT * FROM prt1_l t2 TABLESAMPLE SYSTEM (t1.a) REPEATABLE(t1.b)) s
+ ON t1.a = s.a AND t1.b = s.b AND t1.c = s.c;
+
+-- If there are lateral references to the other relation in scan's restriction
+-- clauses, we cannot generate a partitionwise join.
+EXPLAIN (COSTS OFF)
+SELECT COUNT(*) FROM prt1_l t1 LEFT JOIN LATERAL
+ (SELECT t1.b AS t1b, t2.* FROM prt2_l t2) s
+ ON t1.a = s.b AND t1.b = s.a AND t1.c = s.c
+ WHERE s.t1b = s.a;
+SELECT COUNT(*) FROM prt1_l t1 LEFT JOIN LATERAL
+ (SELECT t1.b AS t1b, t2.* FROM prt2_l t2) s
+ ON t1.a = s.b AND t1.b = s.a AND t1.c = s.c
+ WHERE s.t1b = s.a;
+RESET max_parallel_workers_per_gather;
+
-- join with one side empty
EXPLAIN (COSTS OFF)
SELECT t1.a, t1.c, t2.b, t2.c FROM (SELECT * FROM prt1_l WHERE a = 1 AND a = 2) t1 RIGHT JOIN prt2_l t2 ON t1.a = t2.b AND t1.b = t2.a AND t1.c = t2.c;
diff --git a/src/test/regress/sql/triggers.sql b/src/test/regress/sql/triggers.sql
index d29e98d..46795a9 100644
--- a/src/test/regress/sql/triggers.sql
+++ b/src/test/regress/sql/triggers.sql
@@ -1583,6 +1583,42 @@ create trigger qqq after insert on parted_trig_1_1 for each row execute procedur
insert into parted_trig values (50), (1500);
drop table parted_trig;
+-- Verify that the correct triggers fire for cross-partition updates
+create table parted_trig (a int) partition by list (a);
+create table parted_trig1 partition of parted_trig for values in (1);
+create table parted_trig2 partition of parted_trig for values in (2);
+insert into parted_trig values (1);
+
+create or replace function trigger_notice() returns trigger as $$
+ begin
+ raise notice 'trigger % on % % % for %', TG_NAME, TG_TABLE_NAME, TG_WHEN, TG_OP, TG_LEVEL;
+ if TG_LEVEL = 'ROW' then
+ if TG_OP = 'DELETE' then
+ return OLD;
+ else
+ return NEW;
+ end if;
+ end if;
+ return null;
+ end;
+ $$ language plpgsql;
+create trigger parted_trig_before_stmt before insert or update or delete on parted_trig
+ for each statement execute procedure trigger_notice();
+create trigger parted_trig_before_row before insert or update or delete on parted_trig
+ for each row execute procedure trigger_notice();
+create trigger parted_trig_after_row after insert or update or delete on parted_trig
+ for each row execute procedure trigger_notice();
+create trigger parted_trig_after_stmt after insert or update or delete on parted_trig
+ for each statement execute procedure trigger_notice();
+
+update parted_trig set a = 2 where a = 1;
+
+-- update action in merge should behave the same
+merge into parted_trig using (select 1) as ss on true
+ when matched and a = 2 then update set a = 1;
+
+drop table parted_trig;
+
-- Verify propagation of trigger arguments to partitions
create table parted_trig (a int) partition by list (a);
create table parted_trig1 partition of parted_trig for values in (1);
diff --git a/src/test/regress/sql/tsdicts.sql b/src/test/regress/sql/tsdicts.sql
index ddc6c7f..6a2b003 100644
--- a/src/test/regress/sql/tsdicts.sql
+++ b/src/test/regress/sql/tsdicts.sql
@@ -251,3 +251,33 @@ CREATE TEXT SEARCH DICTIONARY tsdict_case
"DictFile" = ispell_sample,
"AffFile" = ispell_sample
);
+
+-- Test grammar for configurations
+CREATE TEXT SEARCH CONFIGURATION dummy_tst (COPY=english);
+-- Overriden mapping change with duplicated tokens.
+ALTER TEXT SEARCH CONFIGURATION dummy_tst
+ ALTER MAPPING FOR word, word WITH ispell;
+-- Not a token supported by the configuration's parser, fails.
+ALTER TEXT SEARCH CONFIGURATION dummy_tst
+ DROP MAPPING FOR not_a_token, not_a_token;
+-- Not a token supported by the configuration's parser, fails even
+-- with IF EXISTS.
+ALTER TEXT SEARCH CONFIGURATION dummy_tst
+ DROP MAPPING IF EXISTS FOR not_a_token, not_a_token;
+-- Token supported by the configuration's parser, succeeds.
+ALTER TEXT SEARCH CONFIGURATION dummy_tst
+ DROP MAPPING FOR word, word;
+-- No mapping for token supported by the configuration's parser, fails.
+ALTER TEXT SEARCH CONFIGURATION dummy_tst
+ DROP MAPPING FOR word;
+-- Token supported by the configuration's parser, cannot be found,
+-- succeeds with IF EXISTS.
+ALTER TEXT SEARCH CONFIGURATION dummy_tst
+ DROP MAPPING IF EXISTS FOR word, word;
+-- Re-add mapping, with duplicated tokens supported by the parser.
+ALTER TEXT SEARCH CONFIGURATION dummy_tst
+ ADD MAPPING FOR word, word WITH ispell;
+-- Not a token supported by the configuration's parser, fails.
+ALTER TEXT SEARCH CONFIGURATION dummy_tst
+ ADD MAPPING FOR not_a_token WITH ispell;
+DROP TEXT SEARCH CONFIGURATION dummy_tst;