summaryrefslogtreecommitdiffstats
path: root/contrib
diff options
context:
space:
mode:
Diffstat (limited to '')
-rw-r--r--contrib/intarray/_int_gist.c10
-rw-r--r--contrib/intarray/data/test__int.data1
-rw-r--r--contrib/intarray/expected/_int.out33
-rw-r--r--contrib/intarray/sql/_int.sql5
-rw-r--r--contrib/pageinspect/expected/hash.out5
-rw-r--r--contrib/pageinspect/hashfuncs.c6
-rw-r--r--contrib/pageinspect/sql/hash.sql5
-rw-r--r--contrib/pgcrypto/pgp-decrypt.c3
-rw-r--r--contrib/pgstattuple/expected/pgstattuple.out11
-rw-r--r--contrib/pgstattuple/pgstatindex.c5
-rw-r--r--contrib/pgstattuple/sql/pgstattuple.sql2
-rw-r--r--contrib/postgres_fdw/expected/postgres_fdw.out7
-rw-r--r--contrib/postgres_fdw/sql/postgres_fdw.sql6
-rw-r--r--contrib/test_decoding/expected/catalog_change_snapshot.out44
-rw-r--r--contrib/test_decoding/specs/catalog_change_snapshot.spec15
-rw-r--r--contrib/xml2/xpath.c1
16 files changed, 126 insertions, 33 deletions
diff --git a/contrib/intarray/_int_gist.c b/contrib/intarray/_int_gist.c
index ea79c4b..5d46b6b 100644
--- a/contrib/intarray/_int_gist.c
+++ b/contrib/intarray/_int_gist.c
@@ -296,8 +296,7 @@ g_int_decompress(PG_FUNCTION_ARGS)
ArrayType *in;
int lenin;
int *din;
- int i,
- j;
+ int i;
in = DatumGetArrayTypeP(entry->key);
@@ -341,9 +340,12 @@ g_int_decompress(PG_FUNCTION_ARGS)
dr = ARRPTR(r);
for (i = 0; i < lenin; i += 2)
- for (j = din[i]; j <= din[i + 1]; j++)
+ {
+ /* use int64 for j in case din[i + 1] is INT_MAX */
+ for (int64 j = din[i]; j <= din[i + 1]; j++)
if ((!i) || *(dr - 1) != j)
- *dr++ = j;
+ *dr++ = (int) j;
+ }
if (in != (ArrayType *) DatumGetPointer(entry->key))
pfree(in);
diff --git a/contrib/intarray/data/test__int.data b/contrib/intarray/data/test__int.data
index b3903d0..0a7fac3 100644
--- a/contrib/intarray/data/test__int.data
+++ b/contrib/intarray/data/test__int.data
@@ -6998,3 +6998,4 @@
{173,208,229}
{6,22,142,267,299}
{22,122,173,245,293}
+{1,2,101,102,201,202,2147483647}
diff --git a/contrib/intarray/expected/_int.out b/contrib/intarray/expected/_int.out
index 2f33c7e..09ab234 100644
--- a/contrib/intarray/expected/_int.out
+++ b/contrib/intarray/expected/_int.out
@@ -464,13 +464,13 @@ SELECT count(*) from test__int WHERE a @@ '(20&23)|(50&68)';
SELECT count(*) from test__int WHERE a @@ '20 | !21';
count
-------
- 6566
+ 6567
(1 row)
SELECT count(*) from test__int WHERE a @@ '!20 & !21';
count
-------
- 6343
+ 6344
(1 row)
SET enable_seqscan = off; -- not all of these would use index by default
@@ -538,13 +538,13 @@ SELECT count(*) from test__int WHERE a @@ '(20&23)|(50&68)';
SELECT count(*) from test__int WHERE a @@ '20 | !21';
count
-------
- 6566
+ 6567
(1 row)
SELECT count(*) from test__int WHERE a @@ '!20 & !21';
count
-------
- 6343
+ 6344
(1 row)
INSERT INTO test__int SELECT array(SELECT x FROM generate_series(1, 1001) x); -- should fail
@@ -620,13 +620,13 @@ SELECT count(*) from test__int WHERE a @@ '(20&23)|(50&68)';
SELECT count(*) from test__int WHERE a @@ '20 | !21';
count
-------
- 6566
+ 6567
(1 row)
SELECT count(*) from test__int WHERE a @@ '!20 & !21';
count
-------
- 6343
+ 6344
(1 row)
DROP INDEX text_idx;
@@ -700,13 +700,13 @@ SELECT count(*) from test__int WHERE a @@ '(20&23)|(50&68)';
SELECT count(*) from test__int WHERE a @@ '20 | !21';
count
-------
- 6566
+ 6567
(1 row)
SELECT count(*) from test__int WHERE a @@ '!20 & !21';
count
-------
- 6343
+ 6344
(1 row)
DROP INDEX text_idx;
@@ -774,13 +774,13 @@ SELECT count(*) from test__int WHERE a @@ '(20&23)|(50&68)';
SELECT count(*) from test__int WHERE a @@ '20 | !21';
count
-------
- 6566
+ 6567
(1 row)
SELECT count(*) from test__int WHERE a @@ '!20 & !21';
count
-------
- 6343
+ 6344
(1 row)
DROP INDEX text_idx;
@@ -848,13 +848,13 @@ SELECT count(*) from test__int WHERE a @@ '(20&23)|(50&68)';
SELECT count(*) from test__int WHERE a @@ '20 | !21';
count
-------
- 6566
+ 6567
(1 row)
SELECT count(*) from test__int WHERE a @@ '!20 & !21';
count
-------
- 6343
+ 6344
(1 row)
DROP INDEX text_idx;
@@ -870,9 +870,10 @@ DROP INDEX text_idx;
-- core that would reach the same codepaths.
CREATE TABLE more__int AS SELECT
-- Leave alone NULLs, empty arrays and the one row that we use to test
- -- equality
+ -- equality; also skip INT_MAX
CASE WHEN a IS NULL OR a = '{}' OR a = '{73,23,20}' THEN a ELSE
- (select array_agg(u) || array_agg(u + 1000) || array_agg(u + 2000) from (select unnest(a) u) x)
+ (select array_agg(u) || array_agg(u + 1000) || array_agg(u + 2000)
+ from unnest(a) u where u < 2000000000)
END AS a, a as b
FROM test__int;
CREATE INDEX ON more__int using gist (a gist__int_ops(numranges = 252));
@@ -939,13 +940,13 @@ SELECT count(*) from more__int WHERE a @@ '(20&23)|(50&68)';
SELECT count(*) from more__int WHERE a @@ '20 | !21';
count
-------
- 6566
+ 6567
(1 row)
SELECT count(*) from more__int WHERE a @@ '!20 & !21';
count
-------
- 6343
+ 6344
(1 row)
RESET enable_seqscan;
diff --git a/contrib/intarray/sql/_int.sql b/contrib/intarray/sql/_int.sql
index bd3e012..95eec96 100644
--- a/contrib/intarray/sql/_int.sql
+++ b/contrib/intarray/sql/_int.sql
@@ -194,9 +194,10 @@ DROP INDEX text_idx;
-- core that would reach the same codepaths.
CREATE TABLE more__int AS SELECT
-- Leave alone NULLs, empty arrays and the one row that we use to test
- -- equality
+ -- equality; also skip INT_MAX
CASE WHEN a IS NULL OR a = '{}' OR a = '{73,23,20}' THEN a ELSE
- (select array_agg(u) || array_agg(u + 1000) || array_agg(u + 2000) from (select unnest(a) u) x)
+ (select array_agg(u) || array_agg(u + 1000) || array_agg(u + 2000)
+ from unnest(a) u where u < 2000000000)
END AS a, a as b
FROM test__int;
CREATE INDEX ON more__int using gist (a gist__int_ops(numranges = 252));
diff --git a/contrib/pageinspect/expected/hash.out b/contrib/pageinspect/expected/hash.out
index 5d6a518..ea387a6 100644
--- a/contrib/pageinspect/expected/hash.out
+++ b/contrib/pageinspect/expected/hash.out
@@ -1,6 +1,8 @@
CREATE TABLE test_hash (a int, b text);
INSERT INTO test_hash VALUES (1, 'one');
CREATE INDEX test_hash_a_idx ON test_hash USING hash (a);
+CREATE TABLE test_hash_part (a int, b int) PARTITION BY RANGE (a);
+CREATE INDEX test_hash_part_idx ON test_hash_part USING hash(b);
\x
SELECT hash_page_type(get_raw_page('test_hash_a_idx', 0));
-[ RECORD 1 ]--+---------
@@ -44,6 +46,8 @@ SELECT * FROM hash_bitmap_info('test_hash_a_idx', 5);
ERROR: invalid overflow block number 5
SELECT * FROM hash_bitmap_info('test_hash_a_idx', 6);
ERROR: block number 6 is out of range for relation "test_hash_a_idx"
+SELECT * FROM hash_bitmap_info('test_hash_part_idx', 1); -- error
+ERROR: "test_hash_part_idx" is not a hash index
SELECT magic, version, ntuples, bsize, bmsize, bmshift, maxbucket, highmask,
lowmask, ovflpoint, firstfree, nmaps, procid, spares, mapp FROM
hash_metapage_info(get_raw_page('test_hash_a_idx', 0));
@@ -203,3 +207,4 @@ SELECT hash_page_type(decode(repeat('00', :block_size), 'hex'));
hash_page_type | unused
DROP TABLE test_hash;
+DROP TABLE test_hash_part;
diff --git a/contrib/pageinspect/hashfuncs.c b/contrib/pageinspect/hashfuncs.c
index 69af7b9..7c0f73f 100644
--- a/contrib/pageinspect/hashfuncs.c
+++ b/contrib/pageinspect/hashfuncs.c
@@ -12,6 +12,7 @@
#include "access/hash.h"
#include "access/htup_details.h"
+#include "access/relation.h"
#include "catalog/pg_am.h"
#include "catalog/pg_type.h"
#include "funcapi.h"
@@ -27,6 +28,7 @@ PG_FUNCTION_INFO_V1(hash_page_items);
PG_FUNCTION_INFO_V1(hash_bitmap_info);
PG_FUNCTION_INFO_V1(hash_metapage_info);
+#define IS_INDEX(r) ((r)->rd_rel->relkind == RELKIND_INDEX)
#define IS_HASH(r) ((r)->rd_rel->relam == HASH_AM_OID)
/* ------------------------------------------------
@@ -417,9 +419,9 @@ hash_bitmap_info(PG_FUNCTION_ARGS)
(errcode(ERRCODE_INSUFFICIENT_PRIVILEGE),
errmsg("must be superuser to use raw page functions")));
- indexRel = index_open(indexRelid, AccessShareLock);
+ indexRel = relation_open(indexRelid, AccessShareLock);
- if (!IS_HASH(indexRel))
+ if (!IS_INDEX(indexRel) || !IS_HASH(indexRel))
ereport(ERROR,
(errcode(ERRCODE_WRONG_OBJECT_TYPE),
errmsg("\"%s\" is not a %s index",
diff --git a/contrib/pageinspect/sql/hash.sql b/contrib/pageinspect/sql/hash.sql
index 320fb9f..e4b9e97 100644
--- a/contrib/pageinspect/sql/hash.sql
+++ b/contrib/pageinspect/sql/hash.sql
@@ -2,6 +2,9 @@ CREATE TABLE test_hash (a int, b text);
INSERT INTO test_hash VALUES (1, 'one');
CREATE INDEX test_hash_a_idx ON test_hash USING hash (a);
+CREATE TABLE test_hash_part (a int, b int) PARTITION BY RANGE (a);
+CREATE INDEX test_hash_part_idx ON test_hash_part USING hash(b);
+
\x
SELECT hash_page_type(get_raw_page('test_hash_a_idx', 0));
@@ -21,6 +24,7 @@ SELECT * FROM hash_bitmap_info('test_hash_a_idx', 3);
SELECT * FROM hash_bitmap_info('test_hash_a_idx', 4);
SELECT * FROM hash_bitmap_info('test_hash_a_idx', 5);
SELECT * FROM hash_bitmap_info('test_hash_a_idx', 6);
+SELECT * FROM hash_bitmap_info('test_hash_part_idx', 1); -- error
SELECT magic, version, ntuples, bsize, bmsize, bmshift, maxbucket, highmask,
@@ -106,3 +110,4 @@ SELECT hash_page_stats(decode(repeat('00', :block_size), 'hex'));
SELECT hash_page_type(decode(repeat('00', :block_size), 'hex'));
DROP TABLE test_hash;
+DROP TABLE test_hash_part;
diff --git a/contrib/pgcrypto/pgp-decrypt.c b/contrib/pgcrypto/pgp-decrypt.c
index d12dcad..e1ea5b3 100644
--- a/contrib/pgcrypto/pgp-decrypt.c
+++ b/contrib/pgcrypto/pgp-decrypt.c
@@ -250,7 +250,8 @@ prefix_init(void **priv_p, void *arg, PullFilter *src)
uint8 tmpbuf[PGP_MAX_BLOCK + 2];
len = pgp_get_cipher_block_size(ctx->cipher_algo);
- if (len > sizeof(tmpbuf))
+ /* Make sure we have space for prefix */
+ if (len > PGP_MAX_BLOCK)
return PXE_BUG;
res = pullf_read_max(src, len + 2, &buf, tmpbuf);
diff --git a/contrib/pgstattuple/expected/pgstattuple.out b/contrib/pgstattuple/expected/pgstattuple.out
index e4ac86f..283856e 100644
--- a/contrib/pgstattuple/expected/pgstattuple.out
+++ b/contrib/pgstattuple/expected/pgstattuple.out
@@ -153,6 +153,7 @@ ERROR: relation "test_hashidx" is not a GIN index
-- check that using any of these functions with unsupported relations will fail
create table test_partitioned (a int) partition by range (a);
create index test_partitioned_index on test_partitioned(a);
+create index test_partitioned_hash_index on test_partitioned using hash(a);
-- these should all fail
select pgstattuple('test_partitioned');
ERROR: cannot get tuple-level statistics for relation "test_partitioned"
@@ -171,7 +172,9 @@ ERROR: relation "test_partitioned" is not a btree index
select pgstatginindex('test_partitioned');
ERROR: relation "test_partitioned" is not a GIN index
select pgstathashindex('test_partitioned');
-ERROR: "test_partitioned" is not an index
+ERROR: relation "test_partitioned" is not a hash index
+select pgstathashindex('test_partitioned_hash_index');
+ERROR: relation "test_partitioned_hash_index" is not a hash index
create view test_view as select 1;
-- these should all fail
select pgstattuple('test_view');
@@ -188,7 +191,7 @@ ERROR: relation "test_view" is not a btree index
select pgstatginindex('test_view');
ERROR: relation "test_view" is not a GIN index
select pgstathashindex('test_view');
-ERROR: "test_view" is not an index
+ERROR: relation "test_view" is not a hash index
create foreign data wrapper dummy;
create server dummy_server foreign data wrapper dummy;
create foreign table test_foreign_table () server dummy_server;
@@ -207,7 +210,7 @@ ERROR: relation "test_foreign_table" is not a btree index
select pgstatginindex('test_foreign_table');
ERROR: relation "test_foreign_table" is not a GIN index
select pgstathashindex('test_foreign_table');
-ERROR: "test_foreign_table" is not an index
+ERROR: relation "test_foreign_table" is not a hash index
-- a partition of a partitioned table should work though
create table test_partition partition of test_partitioned for values from (1) to (100);
select pgstattuple('test_partition');
@@ -253,7 +256,7 @@ ERROR: relation "test_partition" is not a btree index
select pgstatginindex('test_partition');
ERROR: relation "test_partition" is not a GIN index
select pgstathashindex('test_partition');
-ERROR: "test_partition" is not an index
+ERROR: relation "test_partition" is not a hash index
-- an actual index of a partitioned table should work though
create index test_partition_idx on test_partition(a);
create index test_partition_hash_idx on test_partition using hash (a);
diff --git a/contrib/pgstattuple/pgstatindex.c b/contrib/pgstattuple/pgstatindex.c
index 9bf14ea..0bd1a01 100644
--- a/contrib/pgstattuple/pgstatindex.c
+++ b/contrib/pgstattuple/pgstatindex.c
@@ -600,10 +600,9 @@ pgstathashindex(PG_FUNCTION_ARGS)
float8 free_percent;
uint64 total_space;
- rel = index_open(relid, AccessShareLock);
+ rel = relation_open(relid, AccessShareLock);
- /* index_open() checks that it's an index */
- if (!IS_HASH(rel))
+ if (!IS_INDEX(rel) || !IS_HASH(rel))
ereport(ERROR,
(errcode(ERRCODE_WRONG_OBJECT_TYPE),
errmsg("relation \"%s\" is not a hash index",
diff --git a/contrib/pgstattuple/sql/pgstattuple.sql b/contrib/pgstattuple/sql/pgstattuple.sql
index 5111be0..b08c31c 100644
--- a/contrib/pgstattuple/sql/pgstattuple.sql
+++ b/contrib/pgstattuple/sql/pgstattuple.sql
@@ -65,6 +65,7 @@ select pgstatginindex('test_hashidx');
-- check that using any of these functions with unsupported relations will fail
create table test_partitioned (a int) partition by range (a);
create index test_partitioned_index on test_partitioned(a);
+create index test_partitioned_hash_index on test_partitioned using hash(a);
-- these should all fail
select pgstattuple('test_partitioned');
select pgstattuple('test_partitioned_index');
@@ -73,6 +74,7 @@ select pg_relpages('test_partitioned');
select pgstatindex('test_partitioned');
select pgstatginindex('test_partitioned');
select pgstathashindex('test_partitioned');
+select pgstathashindex('test_partitioned_hash_index');
create view test_view as select 1;
-- these should all fail
diff --git a/contrib/postgres_fdw/expected/postgres_fdw.out b/contrib/postgres_fdw/expected/postgres_fdw.out
index 93ae45d..d21a3d8 100644
--- a/contrib/postgres_fdw/expected/postgres_fdw.out
+++ b/contrib/postgres_fdw/expected/postgres_fdw.out
@@ -10576,6 +10576,13 @@ SELECT * FROM result_tbl ORDER BY a;
(2 rows)
DELETE FROM result_tbl;
+-- Test error handling, if accessing one of the foreign partitions errors out
+CREATE FOREIGN TABLE async_p_broken PARTITION OF async_pt FOR VALUES FROM (10000) TO (10001)
+ SERVER loopback OPTIONS (table_name 'non_existent_table');
+SELECT * FROM async_pt;
+ERROR: relation "public.non_existent_table" does not exist
+CONTEXT: remote SQL command: SELECT a, b, c FROM public.non_existent_table
+DROP FOREIGN TABLE async_p_broken;
-- Check case where multiple partitions use the same connection
CREATE TABLE base_tbl3 (a int, b int, c text);
CREATE FOREIGN TABLE async_p3 PARTITION OF async_pt FOR VALUES FROM (3000) TO (4000)
diff --git a/contrib/postgres_fdw/sql/postgres_fdw.sql b/contrib/postgres_fdw/sql/postgres_fdw.sql
index f409729..80aa7fb 100644
--- a/contrib/postgres_fdw/sql/postgres_fdw.sql
+++ b/contrib/postgres_fdw/sql/postgres_fdw.sql
@@ -3404,6 +3404,12 @@ INSERT INTO result_tbl SELECT a, b, 'AAA' || c FROM async_pt WHERE b === 505;
SELECT * FROM result_tbl ORDER BY a;
DELETE FROM result_tbl;
+-- Test error handling, if accessing one of the foreign partitions errors out
+CREATE FOREIGN TABLE async_p_broken PARTITION OF async_pt FOR VALUES FROM (10000) TO (10001)
+ SERVER loopback OPTIONS (table_name 'non_existent_table');
+SELECT * FROM async_pt;
+DROP FOREIGN TABLE async_p_broken;
+
-- Check case where multiple partitions use the same connection
CREATE TABLE base_tbl3 (a int, b int, c text);
CREATE FOREIGN TABLE async_p3 PARTITION OF async_pt FOR VALUES FROM (3000) TO (4000)
diff --git a/contrib/test_decoding/expected/catalog_change_snapshot.out b/contrib/test_decoding/expected/catalog_change_snapshot.out
index b33e49c..551dc22 100644
--- a/contrib/test_decoding/expected/catalog_change_snapshot.out
+++ b/contrib/test_decoding/expected/catalog_change_snapshot.out
@@ -132,3 +132,47 @@ COMMIT
stop
(1 row)
+
+starting permutation: s0_init s0_begin s0_savepoint s0_create_part1 s0_savepoint_release s1_checkpoint s0_create_part2 s0_commit s0_begin s0_truncate s1_checkpoint s1_get_changes s0_insert_part s1_get_changes s0_commit s1_get_changes
+step s0_init: SELECT 'init' FROM pg_create_logical_replication_slot('isolation_slot', 'test_decoding');
+?column?
+--------
+init
+(1 row)
+
+step s0_begin: BEGIN;
+step s0_savepoint: SAVEPOINT sp1;
+step s0_create_part1: CREATE TABLE tbl1_part_p1 PARTITION OF tbl1_part FOR VALUES FROM (0) TO (10);
+step s0_savepoint_release: RELEASE SAVEPOINT sp1;
+step s1_checkpoint: CHECKPOINT;
+step s0_create_part2: CREATE TABLE tbl1_part_p2 PARTITION OF tbl1_part FOR VALUES FROM (10) TO (20);
+step s0_commit: COMMIT;
+step s0_begin: BEGIN;
+step s0_truncate: TRUNCATE tbl1;
+step s1_checkpoint: CHECKPOINT;
+step s1_get_changes: SELECT data FROM pg_logical_slot_get_changes('isolation_slot', NULL, NULL, 'skip-empty-xacts', '1', 'include-xids', '0');
+data
+----
+(0 rows)
+
+step s0_insert_part: INSERT INTO tbl1_part VALUES (1);
+step s1_get_changes: SELECT data FROM pg_logical_slot_get_changes('isolation_slot', NULL, NULL, 'skip-empty-xacts', '1', 'include-xids', '0');
+data
+----
+(0 rows)
+
+step s0_commit: COMMIT;
+step s1_get_changes: SELECT data FROM pg_logical_slot_get_changes('isolation_slot', NULL, NULL, 'skip-empty-xacts', '1', 'include-xids', '0');
+data
+--------------------------------------------------
+BEGIN
+table public.tbl1: TRUNCATE: (no-flags)
+table public.tbl1_part_p1: INSERT: val1[integer]:1
+COMMIT
+(4 rows)
+
+?column?
+--------
+stop
+(1 row)
+
diff --git a/contrib/test_decoding/specs/catalog_change_snapshot.spec b/contrib/test_decoding/specs/catalog_change_snapshot.spec
index 770dbd6..d8b9df9 100644
--- a/contrib/test_decoding/specs/catalog_change_snapshot.spec
+++ b/contrib/test_decoding/specs/catalog_change_snapshot.spec
@@ -3,13 +3,16 @@
setup
{
DROP TABLE IF EXISTS tbl1;
+ DROP TABLE IF EXISTS tbl1_part;
CREATE TABLE tbl1 (val1 integer, val2 integer);
+ CREATE TABLE tbl1_part (val1 integer) PARTITION BY RANGE (val1);
CREATE TABLE user_cat (val1 integer) WITH (user_catalog_table = true);
}
teardown
{
DROP TABLE tbl1;
+ DROP TABLE tbl1_part;
DROP TABLE user_cat;
SELECT 'stop' FROM pg_drop_replication_slot('isolation_slot');
}
@@ -19,9 +22,13 @@ setup { SET synchronous_commit=on; }
step "s0_init" { SELECT 'init' FROM pg_create_logical_replication_slot('isolation_slot', 'test_decoding'); }
step "s0_begin" { BEGIN; }
step "s0_savepoint" { SAVEPOINT sp1; }
+step "s0_savepoint_release" { RELEASE SAVEPOINT sp1; }
step "s0_truncate" { TRUNCATE tbl1; }
step "s0_insert" { INSERT INTO tbl1 VALUES (1); }
step "s0_insert2" { INSERT INTO user_cat VALUES (1); }
+step "s0_insert_part" { INSERT INTO tbl1_part VALUES (1); }
+step "s0_create_part1" { CREATE TABLE tbl1_part_p1 PARTITION OF tbl1_part FOR VALUES FROM (0) TO (10); }
+step "s0_create_part2" { CREATE TABLE tbl1_part_p2 PARTITION OF tbl1_part FOR VALUES FROM (10) TO (20); }
step "s0_commit" { COMMIT; }
session "s1"
@@ -60,3 +67,11 @@ permutation "s0_init" "s0_begin" "s0_savepoint" "s0_insert" "s1_checkpoint" "s1_
# to skip this xact but ensure that corresponding invalidation messages
# get processed.
permutation "s0_init" "s0_begin" "s0_savepoint" "s0_insert" "s1_checkpoint" "s1_get_changes" "s0_truncate" "s0_commit" "s0_begin" "s0_insert" "s1_checkpoint" "s1_get_changes" "s0_commit" "s1_get_changes"
+
+# The last decoding restarts from the first checkpoint and doesn't decode
+# any WAL records generated by the subtransaction that performed s0_create_part1.
+# While processing the commit record for the corresponding top-level transaction
+# which will be marked as containing catalog change even before commit, we ensure
+# that the corresponding substransaction is also marked as containing a catalog
+# modifying change.
+permutation "s0_init" "s0_begin" "s0_savepoint" "s0_create_part1" "s0_savepoint_release" "s1_checkpoint" "s0_create_part2" "s0_commit" "s0_begin" "s0_truncate" "s1_checkpoint" "s1_get_changes" "s0_insert_part" "s1_get_changes" "s0_commit" "s1_get_changes"
diff --git a/contrib/xml2/xpath.c b/contrib/xml2/xpath.c
index a692dc6..9464193 100644
--- a/contrib/xml2/xpath.c
+++ b/contrib/xml2/xpath.c
@@ -75,7 +75,6 @@ pgxml_parser_init(PgXmlStrictness strictness)
xmlInitParser();
xmlSubstituteEntitiesDefault(1);
- xmlLoadExtDtdDefaultValue = 1;
return xmlerrcxt;
}