summaryrefslogtreecommitdiffstats
path: root/src/test/isolation/specs
diff options
context:
space:
mode:
Diffstat (limited to 'src/test/isolation/specs')
-rw-r--r--src/test/isolation/specs/aborted-keyrevoke.spec46
-rw-r--r--src/test/isolation/specs/alter-table-1.spec170
-rw-r--r--src/test/isolation/specs/alter-table-2.spec79
-rw-r--r--src/test/isolation/specs/alter-table-3.spec79
-rw-r--r--src/test/isolation/specs/alter-table-4.spec37
-rw-r--r--src/test/isolation/specs/async-notify.spec84
-rw-r--r--src/test/isolation/specs/classroom-scheduling.spec29
-rw-r--r--src/test/isolation/specs/cluster-conflict-partition.spec37
-rw-r--r--src/test/isolation/specs/cluster-conflict.spec30
-rw-r--r--src/test/isolation/specs/create-trigger.spec54
-rw-r--r--src/test/isolation/specs/deadlock-hard.spec79
-rw-r--r--src/test/isolation/specs/deadlock-parallel.spec113
-rw-r--r--src/test/isolation/specs/deadlock-simple.spec29
-rw-r--r--src/test/isolation/specs/deadlock-soft-2.spec43
-rw-r--r--src/test/isolation/specs/deadlock-soft.spec40
-rw-r--r--src/test/isolation/specs/delete-abort-savept-2.spec34
-rw-r--r--src/test/isolation/specs/delete-abort-savept.spec37
-rw-r--r--src/test/isolation/specs/detach-partition-concurrently-1.spec69
-rw-r--r--src/test/isolation/specs/detach-partition-concurrently-2.spec41
-rw-r--r--src/test/isolation/specs/detach-partition-concurrently-3.spec86
-rw-r--r--src/test/isolation/specs/detach-partition-concurrently-4.spec83
-rw-r--r--src/test/isolation/specs/drop-index-concurrently-1.spec43
-rw-r--r--src/test/isolation/specs/eval-plan-qual-trigger.spec410
-rw-r--r--src/test/isolation/specs/eval-plan-qual.spec378
-rw-r--r--src/test/isolation/specs/fk-contention.spec19
-rw-r--r--src/test/isolation/specs/fk-deadlock.spec46
-rw-r--r--src/test/isolation/specs/fk-deadlock2.spec48
-rw-r--r--src/test/isolation/specs/fk-partitioned-1.spec45
-rw-r--r--src/test/isolation/specs/fk-partitioned-2.spec29
-rw-r--r--src/test/isolation/specs/fk-snapshot.spec76
-rw-r--r--src/test/isolation/specs/freeze-the-dead.spec56
-rw-r--r--src/test/isolation/specs/horizons.spec169
-rw-r--r--src/test/isolation/specs/index-only-scan.spec46
-rw-r--r--src/test/isolation/specs/inherit-temp.spec78
-rw-r--r--src/test/isolation/specs/insert-conflict-do-nothing-2.spec34
-rw-r--r--src/test/isolation/specs/insert-conflict-do-nothing.spec40
-rw-r--r--src/test/isolation/specs/insert-conflict-do-update-2.spec40
-rw-r--r--src/test/isolation/specs/insert-conflict-do-update-3.spec69
-rw-r--r--src/test/isolation/specs/insert-conflict-do-update.spec39
-rw-r--r--src/test/isolation/specs/insert-conflict-specconflict.spec259
-rw-r--r--src/test/isolation/specs/lock-committed-keyupdate.spec66
-rw-r--r--src/test/isolation/specs/lock-committed-update.spec62
-rw-r--r--src/test/isolation/specs/lock-update-delete.spec61
-rw-r--r--src/test/isolation/specs/lock-update-traversal.spec39
-rw-r--r--src/test/isolation/specs/matview-write-skew.spec51
-rw-r--r--src/test/isolation/specs/merge-delete.spec96
-rw-r--r--src/test/isolation/specs/merge-insert-update.spec51
-rw-r--r--src/test/isolation/specs/merge-join.spec45
-rw-r--r--src/test/isolation/specs/merge-match-recheck.spec184
-rw-r--r--src/test/isolation/specs/merge-update.spec156
-rw-r--r--src/test/isolation/specs/multiple-cic.spec43
-rw-r--r--src/test/isolation/specs/multiple-row-versions.spec47
-rw-r--r--src/test/isolation/specs/multixact-no-deadlock.spec35
-rw-r--r--src/test/isolation/specs/multixact-no-forget.spec44
-rw-r--r--src/test/isolation/specs/nowait-2.spec37
-rw-r--r--src/test/isolation/specs/nowait-3.spec33
-rw-r--r--src/test/isolation/specs/nowait-4.spec35
-rw-r--r--src/test/isolation/specs/nowait-5.spec57
-rw-r--r--src/test/isolation/specs/nowait.spec25
-rw-r--r--src/test/isolation/specs/partial-index.spec32
-rw-r--r--src/test/isolation/specs/partition-concurrent-attach.spec43
-rw-r--r--src/test/isolation/specs/partition-drop-index-locking.spec47
-rw-r--r--src/test/isolation/specs/partition-key-update-1.spec86
-rw-r--r--src/test/isolation/specs/partition-key-update-2.spec45
-rw-r--r--src/test/isolation/specs/partition-key-update-3.spec44
-rw-r--r--src/test/isolation/specs/partition-key-update-4.spec76
-rw-r--r--src/test/isolation/specs/plpgsql-toast.spec178
-rw-r--r--src/test/isolation/specs/predicate-gin.spec115
-rw-r--r--src/test/isolation/specs/predicate-gist.spec117
-rw-r--r--src/test/isolation/specs/predicate-hash.spec122
-rw-r--r--src/test/isolation/specs/predicate-lock-hot-tuple.spec37
-rw-r--r--src/test/isolation/specs/prepared-transactions-cic.spec37
-rw-r--r--src/test/isolation/specs/prepared-transactions.spec1507
-rw-r--r--src/test/isolation/specs/project-manager.spec30
-rw-r--r--src/test/isolation/specs/propagate-lock-delete.spec42
-rw-r--r--src/test/isolation/specs/read-only-anomaly-2.spec42
-rw-r--r--src/test/isolation/specs/read-only-anomaly-3.spec39
-rw-r--r--src/test/isolation/specs/read-only-anomaly.spec38
-rw-r--r--src/test/isolation/specs/read-write-unique-2.spec36
-rw-r--r--src/test/isolation/specs/read-write-unique-3.spec33
-rw-r--r--src/test/isolation/specs/read-write-unique-4.spec48
-rw-r--r--src/test/isolation/specs/read-write-unique.spec39
-rw-r--r--src/test/isolation/specs/receipt-report.spec47
-rw-r--r--src/test/isolation/specs/referential-integrity.spec32
-rw-r--r--src/test/isolation/specs/reindex-concurrently-toast.spec119
-rw-r--r--src/test/isolation/specs/reindex-concurrently.spec40
-rw-r--r--src/test/isolation/specs/reindex-schema.spec32
-rw-r--r--src/test/isolation/specs/ri-trigger.spec53
-rw-r--r--src/test/isolation/specs/sequence-ddl.spec41
-rw-r--r--src/test/isolation/specs/serializable-parallel-2.spec34
-rw-r--r--src/test/isolation/specs/serializable-parallel-3.spec47
-rw-r--r--src/test/isolation/specs/serializable-parallel.spec47
-rw-r--r--src/test/isolation/specs/simple-write-skew.spec30
-rw-r--r--src/test/isolation/specs/skip-locked-2.spec41
-rw-r--r--src/test/isolation/specs/skip-locked-3.spec36
-rw-r--r--src/test/isolation/specs/skip-locked-4.spec36
-rw-r--r--src/test/isolation/specs/skip-locked.spec28
-rw-r--r--src/test/isolation/specs/stats.spec760
-rw-r--r--src/test/isolation/specs/subxid-overflow.spec79
-rw-r--r--src/test/isolation/specs/temp-schema-cleanup.spec85
-rw-r--r--src/test/isolation/specs/temporal-range-integrity.spec38
-rw-r--r--src/test/isolation/specs/timeouts.spec49
-rw-r--r--src/test/isolation/specs/total-cash.spec28
-rw-r--r--src/test/isolation/specs/truncate-conflict.spec38
-rw-r--r--src/test/isolation/specs/tuplelock-conflict.spec63
-rw-r--r--src/test/isolation/specs/tuplelock-partition.spec32
-rw-r--r--src/test/isolation/specs/tuplelock-update.spec37
-rw-r--r--src/test/isolation/specs/tuplelock-upgrade-no-deadlock.spec69
-rw-r--r--src/test/isolation/specs/two-ids.spec40
-rw-r--r--src/test/isolation/specs/update-conflict-out.spec54
-rw-r--r--src/test/isolation/specs/update-locked-tuple.spec38
-rw-r--r--src/test/isolation/specs/vacuum-concurrent-drop.spec45
-rw-r--r--src/test/isolation/specs/vacuum-conflict.spec51
-rw-r--r--src/test/isolation/specs/vacuum-no-cleanup-lock.spec150
-rw-r--r--src/test/isolation/specs/vacuum-skip-locked.spec61
115 files changed, 9603 insertions, 0 deletions
diff --git a/src/test/isolation/specs/aborted-keyrevoke.spec b/src/test/isolation/specs/aborted-keyrevoke.spec
new file mode 100644
index 0000000..4f6f902
--- /dev/null
+++ b/src/test/isolation/specs/aborted-keyrevoke.spec
@@ -0,0 +1,46 @@
+# When a tuple that has been updated is locked, the locking command
+# should traverse the update chain; thus, a DELETE should not be able
+# to proceed until the lock has been released.
+
+setup
+{
+ CREATE TABLE foo (
+ key int PRIMARY KEY,
+ value int
+ );
+
+ INSERT INTO foo VALUES (1, 1);
+}
+
+teardown
+{
+ DROP TABLE foo;
+}
+
+session s1
+setup { BEGIN; }
+step s1s { SAVEPOINT f; }
+step s1u { UPDATE foo SET key = 2; } # obtain KEY REVOKE
+step s1r { ROLLBACK TO f; } # lose KEY REVOKE
+step s1l { SELECT * FROM foo FOR KEY SHARE; }
+step s1c { COMMIT; }
+
+session s2
+setup { BEGIN; }
+step s2l { SELECT * FROM foo FOR KEY SHARE; }
+step s2c { COMMIT; }
+
+permutation s1s s1u s1r s1l s1c s2l s2c
+permutation s1s s1u s1r s1l s2l s1c s2c
+permutation s1s s1u s1r s1l s2l s2c s1c
+permutation s1s s1u s1r s2l s1l s1c s2c
+permutation s1s s1u s1r s2l s1l s2c s1c
+permutation s1s s1u s1r s2l s2c s1l s1c
+permutation s1s s1u s2l s1r s1l s1c s2c
+permutation s1s s1u s2l s1r s1l s2c s1c
+permutation s1s s1u s2l s1r s2c s1l s1c
+permutation s1s s2l s1u s2c s1r s1l s1c
+permutation s1s s2l s2c s1u s1r s1l s1c
+permutation s2l s1s s1u s2c s1r s1l s1c
+permutation s2l s1s s2c s1u s1r s1l s1c
+permutation s2l s2c s1s s1u s1r s1l s1c
diff --git a/src/test/isolation/specs/alter-table-1.spec b/src/test/isolation/specs/alter-table-1.spec
new file mode 100644
index 0000000..dfd0ce7
--- /dev/null
+++ b/src/test/isolation/specs/alter-table-1.spec
@@ -0,0 +1,170 @@
+# ALTER TABLE - Add and Validate constraint with concurrent writes
+#
+# VALIDATE allows a minimum of ShareUpdateExclusiveLock
+# so we mix reads with it to see what works or waits
+
+setup
+{
+ CREATE TABLE a (i int PRIMARY KEY);
+ CREATE TABLE b (a_id int);
+ INSERT INTO a VALUES (0), (1), (2), (3);
+ INSERT INTO b SELECT generate_series(1,1000) % 4;
+}
+
+teardown
+{
+ DROP TABLE a, b;
+}
+
+session s1
+step s1 { BEGIN; }
+step at1 { ALTER TABLE b ADD CONSTRAINT bfk FOREIGN KEY (a_id) REFERENCES a (i) NOT VALID; }
+step sc1 { COMMIT; }
+step s2 { BEGIN; }
+step at2 { ALTER TABLE b VALIDATE CONSTRAINT bfk; }
+step sc2 { COMMIT; }
+
+session s2
+setup { BEGIN; }
+step rx1 { SELECT * FROM b WHERE a_id = 1 LIMIT 1; }
+step wx { INSERT INTO b VALUES (0); }
+step rx3 { SELECT * FROM b WHERE a_id = 3 LIMIT 3; }
+step c2 { COMMIT; }
+
+permutation s1 at1 sc1 s2 at2 sc2 rx1 wx rx3 c2
+permutation s1 at1 sc1 s2 at2 rx1 sc2 wx rx3 c2
+permutation s1 at1 sc1 s2 at2 rx1 wx sc2 rx3 c2
+permutation s1 at1 sc1 s2 at2 rx1 wx rx3 sc2 c2
+permutation s1 at1 sc1 s2 at2 rx1 wx rx3 c2 sc2
+permutation s1 at1 sc1 s2 rx1 at2 sc2 wx rx3 c2
+permutation s1 at1 sc1 s2 rx1 at2 wx sc2 rx3 c2
+permutation s1 at1 sc1 s2 rx1 at2 wx rx3 sc2 c2
+permutation s1 at1 sc1 s2 rx1 at2 wx rx3 c2 sc2
+permutation s1 at1 sc1 s2 rx1 wx at2 sc2 rx3 c2
+permutation s1 at1 sc1 s2 rx1 wx at2 rx3 sc2 c2
+permutation s1 at1 sc1 s2 rx1 wx at2 rx3 c2 sc2
+permutation s1 at1 sc1 s2 rx1 wx rx3 at2 sc2 c2
+permutation s1 at1 sc1 s2 rx1 wx rx3 at2 c2 sc2
+permutation s1 at1 sc1 s2 rx1 wx rx3 c2 at2 sc2
+permutation s1 at1 sc1 rx1 s2 at2 sc2 wx rx3 c2
+permutation s1 at1 sc1 rx1 s2 at2 wx sc2 rx3 c2
+permutation s1 at1 sc1 rx1 s2 at2 wx rx3 sc2 c2
+permutation s1 at1 sc1 rx1 s2 at2 wx rx3 c2 sc2
+permutation s1 at1 sc1 rx1 s2 wx at2 sc2 rx3 c2
+permutation s1 at1 sc1 rx1 s2 wx at2 rx3 sc2 c2
+permutation s1 at1 sc1 rx1 s2 wx at2 rx3 c2 sc2
+permutation s1 at1 sc1 rx1 s2 wx rx3 at2 sc2 c2
+permutation s1 at1 sc1 rx1 s2 wx rx3 at2 c2 sc2
+permutation s1 at1 sc1 rx1 s2 wx rx3 c2 at2 sc2
+permutation s1 at1 sc1 rx1 wx s2 at2 sc2 rx3 c2
+permutation s1 at1 sc1 rx1 wx s2 at2 rx3 sc2 c2
+permutation s1 at1 sc1 rx1 wx s2 at2 rx3 c2 sc2
+permutation s1 at1 sc1 rx1 wx s2 rx3 at2 sc2 c2
+permutation s1 at1 sc1 rx1 wx s2 rx3 at2 c2 sc2
+permutation s1 at1 sc1 rx1 wx s2 rx3 c2 at2 sc2
+permutation s1 at1 sc1 rx1 wx rx3 s2 at2 sc2 c2
+permutation s1 at1 sc1 rx1 wx rx3 s2 at2 c2 sc2
+permutation s1 at1 sc1 rx1 wx rx3 s2 c2 at2 sc2
+permutation s1 at1 sc1 rx1 wx rx3 c2 s2 at2 sc2
+permutation s1 at1 rx1 sc1 s2 at2 sc2 wx rx3 c2
+permutation s1 at1 rx1 sc1 s2 at2 wx sc2 rx3 c2
+permutation s1 at1 rx1 sc1 s2 at2 wx rx3 sc2 c2
+permutation s1 at1 rx1 sc1 s2 at2 wx rx3 c2 sc2
+permutation s1 at1 rx1 sc1 s2 wx at2 sc2 rx3 c2
+permutation s1 at1 rx1 sc1 s2 wx at2 rx3 sc2 c2
+permutation s1 at1 rx1 sc1 s2 wx at2 rx3 c2 sc2
+permutation s1 at1 rx1 sc1 s2 wx rx3 at2 sc2 c2
+permutation s1 at1 rx1 sc1 s2 wx rx3 at2 c2 sc2
+permutation s1 at1 rx1 sc1 s2 wx rx3 c2 at2 sc2
+permutation s1 at1 rx1 sc1 wx s2 at2 sc2 rx3 c2
+permutation s1 at1 rx1 sc1 wx s2 at2 rx3 sc2 c2
+permutation s1 at1 rx1 sc1 wx s2 at2 rx3 c2 sc2
+permutation s1 at1 rx1 sc1 wx s2 rx3 at2 sc2 c2
+permutation s1 at1 rx1 sc1 wx s2 rx3 at2 c2 sc2
+permutation s1 at1 rx1 sc1 wx s2 rx3 c2 at2 sc2
+permutation s1 at1 rx1 sc1 wx rx3 s2 at2 sc2 c2
+permutation s1 at1 rx1 sc1 wx rx3 s2 at2 c2 sc2
+permutation s1 at1 rx1 sc1 wx rx3 s2 c2 at2 sc2
+permutation s1 at1 rx1 sc1 wx rx3 c2 s2 at2 sc2
+permutation s1 at1 rx1 wx sc1 s2 at2 sc2 rx3 c2
+permutation s1 at1 rx1 wx sc1 s2 at2 rx3 sc2 c2
+permutation s1 at1 rx1 wx sc1 s2 at2 rx3 c2 sc2
+permutation s1 at1 rx1 wx sc1 s2 rx3 at2 sc2 c2
+permutation s1 at1 rx1 wx sc1 s2 rx3 at2 c2 sc2
+permutation s1 at1 rx1 wx sc1 s2 rx3 c2 at2 sc2
+permutation s1 at1 rx1 wx sc1 rx3 s2 at2 sc2 c2
+permutation s1 at1 rx1 wx sc1 rx3 s2 at2 c2 sc2
+permutation s1 at1 rx1 wx sc1 rx3 s2 c2 at2 sc2
+permutation s1 at1 rx1 wx sc1 rx3 c2 s2 at2 sc2
+permutation s1 rx1 at1 sc1 s2 at2 sc2 wx rx3 c2
+permutation s1 rx1 at1 sc1 s2 at2 wx sc2 rx3 c2
+permutation s1 rx1 at1 sc1 s2 at2 wx rx3 sc2 c2
+permutation s1 rx1 at1 sc1 s2 at2 wx rx3 c2 sc2
+permutation s1 rx1 at1 sc1 s2 wx at2 sc2 rx3 c2
+permutation s1 rx1 at1 sc1 s2 wx at2 rx3 sc2 c2
+permutation s1 rx1 at1 sc1 s2 wx at2 rx3 c2 sc2
+permutation s1 rx1 at1 sc1 s2 wx rx3 at2 sc2 c2
+permutation s1 rx1 at1 sc1 s2 wx rx3 at2 c2 sc2
+permutation s1 rx1 at1 sc1 s2 wx rx3 c2 at2 sc2
+permutation s1 rx1 at1 sc1 wx s2 at2 sc2 rx3 c2
+permutation s1 rx1 at1 sc1 wx s2 at2 rx3 sc2 c2
+permutation s1 rx1 at1 sc1 wx s2 at2 rx3 c2 sc2
+permutation s1 rx1 at1 sc1 wx s2 rx3 at2 sc2 c2
+permutation s1 rx1 at1 sc1 wx s2 rx3 at2 c2 sc2
+permutation s1 rx1 at1 sc1 wx s2 rx3 c2 at2 sc2
+permutation s1 rx1 at1 sc1 wx rx3 s2 at2 sc2 c2
+permutation s1 rx1 at1 sc1 wx rx3 s2 at2 c2 sc2
+permutation s1 rx1 at1 sc1 wx rx3 s2 c2 at2 sc2
+permutation s1 rx1 at1 sc1 wx rx3 c2 s2 at2 sc2
+permutation s1 rx1 at1 wx sc1 s2 at2 sc2 rx3 c2
+permutation s1 rx1 at1 wx sc1 s2 at2 rx3 sc2 c2
+permutation s1 rx1 at1 wx sc1 s2 at2 rx3 c2 sc2
+permutation s1 rx1 at1 wx sc1 s2 rx3 at2 sc2 c2
+permutation s1 rx1 at1 wx sc1 s2 rx3 at2 c2 sc2
+permutation s1 rx1 at1 wx sc1 s2 rx3 c2 at2 sc2
+permutation s1 rx1 at1 wx sc1 rx3 s2 at2 sc2 c2
+permutation s1 rx1 at1 wx sc1 rx3 s2 at2 c2 sc2
+permutation s1 rx1 at1 wx sc1 rx3 s2 c2 at2 sc2
+permutation s1 rx1 at1 wx sc1 rx3 c2 s2 at2 sc2
+permutation s1 rx1 wx at1 rx3 c2 sc1 s2 at2 sc2
+permutation s1 rx1 wx rx3 at1 c2 sc1 s2 at2 sc2
+permutation s1 rx1 wx rx3 c2 at1 sc1 s2 at2 sc2
+permutation rx1 s1 at1 sc1 s2 at2 sc2 wx rx3 c2
+permutation rx1 s1 at1 sc1 s2 at2 wx sc2 rx3 c2
+permutation rx1 s1 at1 sc1 s2 at2 wx rx3 sc2 c2
+permutation rx1 s1 at1 sc1 s2 at2 wx rx3 c2 sc2
+permutation rx1 s1 at1 sc1 s2 wx at2 sc2 rx3 c2
+permutation rx1 s1 at1 sc1 s2 wx at2 rx3 sc2 c2
+permutation rx1 s1 at1 sc1 s2 wx at2 rx3 c2 sc2
+permutation rx1 s1 at1 sc1 s2 wx rx3 at2 sc2 c2
+permutation rx1 s1 at1 sc1 s2 wx rx3 at2 c2 sc2
+permutation rx1 s1 at1 sc1 s2 wx rx3 c2 at2 sc2
+permutation rx1 s1 at1 sc1 wx s2 at2 sc2 rx3 c2
+permutation rx1 s1 at1 sc1 wx s2 at2 rx3 sc2 c2
+permutation rx1 s1 at1 sc1 wx s2 at2 rx3 c2 sc2
+permutation rx1 s1 at1 sc1 wx s2 rx3 at2 sc2 c2
+permutation rx1 s1 at1 sc1 wx s2 rx3 at2 c2 sc2
+permutation rx1 s1 at1 sc1 wx s2 rx3 c2 at2 sc2
+permutation rx1 s1 at1 sc1 wx rx3 s2 at2 sc2 c2
+permutation rx1 s1 at1 sc1 wx rx3 s2 at2 c2 sc2
+permutation rx1 s1 at1 sc1 wx rx3 s2 c2 at2 sc2
+permutation rx1 s1 at1 sc1 wx rx3 c2 s2 at2 sc2
+permutation rx1 s1 at1 wx sc1 s2 at2 sc2 rx3 c2
+permutation rx1 s1 at1 wx sc1 s2 at2 rx3 sc2 c2
+permutation rx1 s1 at1 wx sc1 s2 at2 rx3 c2 sc2
+permutation rx1 s1 at1 wx sc1 s2 rx3 at2 sc2 c2
+permutation rx1 s1 at1 wx sc1 s2 rx3 at2 c2 sc2
+permutation rx1 s1 at1 wx sc1 s2 rx3 c2 at2 sc2
+permutation rx1 s1 at1 wx sc1 rx3 s2 at2 sc2 c2
+permutation rx1 s1 at1 wx sc1 rx3 s2 at2 c2 sc2
+permutation rx1 s1 at1 wx sc1 rx3 s2 c2 at2 sc2
+permutation rx1 s1 at1 wx sc1 rx3 c2 s2 at2 sc2
+permutation rx1 s1 wx at1 rx3 c2 sc1 s2 at2 sc2
+permutation rx1 s1 wx rx3 at1 c2 sc1 s2 at2 sc2
+permutation rx1 s1 wx rx3 c2 at1 sc1 s2 at2 sc2
+permutation rx1 wx s1 at1 rx3 c2 sc1 s2 at2 sc2
+permutation rx1 wx s1 rx3 at1 c2 sc1 s2 at2 sc2
+permutation rx1 wx s1 rx3 c2 at1 sc1 s2 at2 sc2
+permutation rx1 wx rx3 s1 at1 c2 sc1 s2 at2 sc2
+permutation rx1 wx rx3 s1 c2 at1 sc1 s2 at2 sc2
+permutation rx1 wx rx3 c2 s1 at1 sc1 s2 at2 sc2
diff --git a/src/test/isolation/specs/alter-table-2.spec b/src/test/isolation/specs/alter-table-2.spec
new file mode 100644
index 0000000..a3e3131
--- /dev/null
+++ b/src/test/isolation/specs/alter-table-2.spec
@@ -0,0 +1,79 @@
+# ALTER TABLE - Add foreign keys with concurrent reads
+#
+# ADD CONSTRAINT uses ShareRowExclusiveLock so we mix writes with it
+# to see what works or waits.
+
+setup
+{
+ CREATE TABLE a (i int PRIMARY KEY);
+ CREATE TABLE b (a_id int);
+ INSERT INTO a VALUES (0), (1), (2), (3);
+ INSERT INTO b SELECT generate_series(1,1000) % 4;
+}
+
+teardown
+{
+ DROP TABLE a, b;
+}
+
+session s1
+step s1a { BEGIN; }
+step s1b { ALTER TABLE b ADD CONSTRAINT bfk FOREIGN KEY (a_id) REFERENCES a (i) NOT VALID; }
+step s1c { COMMIT; }
+
+session s2
+step s2a { BEGIN; }
+step s2b { SELECT * FROM a WHERE i = 1 LIMIT 1 FOR UPDATE; }
+step s2c { SELECT * FROM b WHERE a_id = 3 LIMIT 1 FOR UPDATE; }
+step s2d { INSERT INTO b VALUES (0); }
+step s2e { INSERT INTO a VALUES (4); }
+step s2f { COMMIT; }
+
+permutation s1a s1b s1c s2a s2b s2c s2d s2e s2f
+permutation s1a s1b s2a s1c s2b s2c s2d s2e s2f
+permutation s1a s1b s2a s2b s1c s2c s2d s2e s2f
+permutation s1a s1b s2a s2b s2c s1c s2d s2e s2f
+permutation s1a s1b s2a s2b s2c s2d s1c s2e s2f
+permutation s1a s2a s1b s1c s2b s2c s2d s2e s2f
+permutation s1a s2a s1b s2b s1c s2c s2d s2e s2f
+permutation s1a s2a s1b s2b s2c s1c s2d s2e s2f
+permutation s1a s2a s1b s2b s2c s2d s1c s2e s2f
+permutation s1a s2a s2b s1b s1c s2c s2d s2e s2f
+permutation s1a s2a s2b s1b s2c s1c s2d s2e s2f
+permutation s1a s2a s2b s1b s2c s2d s1c s2e s2f
+permutation s1a s2a s2b s2c s1b s1c s2d s2e s2f
+permutation s1a s2a s2b s2c s1b s2d s1c s2e s2f
+permutation s1a s2a s2b s2c s2d s1b s2e s2f s1c
+permutation s1a s2a s2b s2c s2d s2e s1b s2f s1c
+permutation s1a s2a s2b s2c s2d s2e s2f s1b s1c
+permutation s2a s1a s1b s1c s2b s2c s2d s2e s2f
+permutation s2a s1a s1b s2b s1c s2c s2d s2e s2f
+permutation s2a s1a s1b s2b s2c s1c s2d s2e s2f
+permutation s2a s1a s1b s2b s2c s2d s1c s2e s2f
+permutation s2a s1a s2b s1b s1c s2c s2d s2e s2f
+permutation s2a s1a s2b s1b s2c s1c s2d s2e s2f
+permutation s2a s1a s2b s1b s2c s2d s1c s2e s2f
+permutation s2a s1a s2b s2c s1b s1c s2d s2e s2f
+permutation s2a s1a s2b s2c s1b s2d s1c s2e s2f
+permutation s2a s1a s2b s2c s2d s1b s2e s2f s1c
+permutation s2a s1a s2b s2c s2d s2e s1b s2f s1c
+permutation s2a s1a s2b s2c s2d s2e s2f s1b s1c
+permutation s2a s2b s1a s1b s1c s2c s2d s2e s2f
+permutation s2a s2b s1a s1b s2c s1c s2d s2e s2f
+permutation s2a s2b s1a s1b s2c s2d s1c s2e s2f
+permutation s2a s2b s1a s2c s1b s1c s2d s2e s2f
+permutation s2a s2b s1a s2c s1b s2d s1c s2e s2f
+permutation s2a s2b s1a s2c s2d s1b s2e s2f s1c
+permutation s2a s2b s1a s2c s2d s2e s1b s2f s1c
+permutation s2a s2b s1a s2c s2d s2e s2f s1b s1c
+permutation s2a s2b s2c s1a s1b s1c s2d s2e s2f
+permutation s2a s2b s2c s1a s1b s2d s1c s2e s2f
+permutation s2a s2b s2c s1a s2d s1b s2e s2f s1c
+permutation s2a s2b s2c s1a s2d s2e s1b s2f s1c
+permutation s2a s2b s2c s1a s2d s2e s2f s1b s1c
+permutation s2a s2b s2c s2d s1a s1b s2e s2f s1c
+permutation s2a s2b s2c s2d s1a s2e s1b s2f s1c
+permutation s2a s2b s2c s2d s1a s2e s2f s1b s1c
+permutation s2a s2b s2c s2d s2e s1a s1b s2f s1c
+permutation s2a s2b s2c s2d s2e s1a s2f s1b s1c
+permutation s2a s2b s2c s2d s2e s2f s1a s1b s1c
diff --git a/src/test/isolation/specs/alter-table-3.spec b/src/test/isolation/specs/alter-table-3.spec
new file mode 100644
index 0000000..f70d9c0
--- /dev/null
+++ b/src/test/isolation/specs/alter-table-3.spec
@@ -0,0 +1,79 @@
+# ALTER TABLE - Enable and disable triggers with concurrent reads
+#
+# ENABLE/DISABLE TRIGGER uses ShareRowExclusiveLock so we mix writes with
+# it to see what works or waits.
+
+setup
+{
+ CREATE TABLE a (i int PRIMARY KEY);
+ INSERT INTO a VALUES (0), (1), (2), (3);
+ CREATE FUNCTION f() RETURNS TRIGGER LANGUAGE plpgsql AS 'BEGIN RETURN NULL; END;';
+ CREATE TRIGGER t AFTER UPDATE ON a EXECUTE PROCEDURE f();
+}
+
+teardown
+{
+ DROP TABLE a;
+ DROP FUNCTION f();
+}
+
+session s1
+step s1a { BEGIN; }
+step s1b { ALTER TABLE a DISABLE TRIGGER t; }
+step s1c { ALTER TABLE a ENABLE TRIGGER t; }
+step s1d { COMMIT; }
+
+session s2
+step s2a { BEGIN; }
+step s2b { SELECT * FROM a WHERE i = 1 LIMIT 1 FOR UPDATE; }
+step s2c { INSERT INTO a VALUES (0); }
+step s2d { COMMIT; }
+
+permutation s1a s1b s1c s1d s2a s2b s2c s2d
+permutation s1a s1b s1c s2a s1d s2b s2c s2d
+permutation s1a s1b s1c s2a s2b s1d s2c s2d
+permutation s1a s1b s1c s2a s2b s2c s1d s2d
+permutation s1a s1b s2a s1c s1d s2b s2c s2d
+permutation s1a s1b s2a s1c s2b s1d s2c s2d
+permutation s1a s1b s2a s1c s2b s2c s1d s2d
+permutation s1a s1b s2a s2b s1c s1d s2c s2d
+permutation s1a s1b s2a s2b s1c s2c s1d s2d
+permutation s1a s1b s2a s2b s2c s1c s1d s2d
+permutation s1a s2a s1b s1c s1d s2b s2c s2d
+permutation s1a s2a s1b s1c s2b s1d s2c s2d
+permutation s1a s2a s1b s1c s2b s2c s1d s2d
+permutation s1a s2a s1b s2b s1c s1d s2c s2d
+permutation s1a s2a s1b s2b s1c s2c s1d s2d
+permutation s1a s2a s1b s2b s2c s1c s1d s2d
+permutation s1a s2a s2b s1b s1c s1d s2c s2d
+permutation s1a s2a s2b s1b s1c s2c s1d s2d
+permutation s1a s2a s2b s1b s2c s1c s1d s2d
+permutation s1a s2a s2b s2c s1b s1c s1d s2d
+permutation s1a s2a s2b s2c s1b s1c s2d s1d
+permutation s1a s2a s2b s2c s1b s2d s1c s1d
+permutation s1a s2a s2b s2c s2d s1b s1c s1d
+permutation s2a s1a s1b s1c s1d s2b s2c s2d
+permutation s2a s1a s1b s1c s2b s1d s2c s2d
+permutation s2a s1a s1b s1c s2b s2c s1d s2d
+permutation s2a s1a s1b s2b s1c s1d s2c s2d
+permutation s2a s1a s1b s2b s1c s2c s1d s2d
+permutation s2a s1a s1b s2b s2c s1c s1d s2d
+permutation s2a s1a s2b s1b s1c s1d s2c s2d
+permutation s2a s1a s2b s1b s1c s2c s1d s2d
+permutation s2a s1a s2b s1b s2c s1c s1d s2d
+permutation s2a s1a s2b s2c s1b s1c s1d s2d
+permutation s2a s1a s2b s2c s1b s1c s2d s1d
+permutation s2a s1a s2b s2c s1b s2d s1c s1d
+permutation s2a s1a s2b s2c s2d s1b s1c s1d
+permutation s2a s2b s1a s1b s1c s1d s2c s2d
+permutation s2a s2b s1a s1b s1c s2c s1d s2d
+permutation s2a s2b s1a s1b s2c s1c s1d s2d
+permutation s2a s2b s1a s2c s1b s1c s1d s2d
+permutation s2a s2b s1a s2c s1b s1c s2d s1d
+permutation s2a s2b s1a s2c s1b s2d s1c s1d
+permutation s2a s2b s1a s2c s2d s1b s1c s1d
+permutation s2a s2b s2c s1a s1b s1c s1d s2d
+permutation s2a s2b s2c s1a s1b s1c s2d s1d
+permutation s2a s2b s2c s1a s1b s2d s1c s1d
+permutation s2a s2b s2c s1a s2d s1b s1c s1d
+permutation s2a s2b s2c s2d s1a s1b s1c s1d
diff --git a/src/test/isolation/specs/alter-table-4.spec b/src/test/isolation/specs/alter-table-4.spec
new file mode 100644
index 0000000..f143b79
--- /dev/null
+++ b/src/test/isolation/specs/alter-table-4.spec
@@ -0,0 +1,37 @@
+# ALTER TABLE - Add and remove inheritance with concurrent reads
+
+setup
+{
+ CREATE TABLE p (a integer);
+ INSERT INTO p VALUES(1);
+ CREATE TABLE c1 () INHERITS (p);
+ INSERT INTO c1 VALUES(10);
+ CREATE TABLE c2 (a integer);
+ INSERT INTO c2 VALUES(100);
+}
+
+teardown
+{
+ DROP TABLE IF EXISTS c1, c2, p;
+}
+
+session s1
+step s1b { BEGIN; }
+step s1delc1 { ALTER TABLE c1 NO INHERIT p; }
+step s1modc1a { ALTER TABLE c1 ALTER COLUMN a TYPE float; }
+step s1addc2 { ALTER TABLE c2 INHERIT p; }
+step s1dropc1 { DROP TABLE c1; }
+step s1c { COMMIT; }
+
+session s2
+step s2sel { SELECT SUM(a) FROM p; }
+
+# NO INHERIT will not be visible to concurrent select,
+# since we identify children before locking them
+permutation s1b s1delc1 s2sel s1c s2sel
+# adding inheritance likewise is not seen if s1 commits after s2 locks p
+permutation s1b s1delc1 s1addc2 s2sel s1c s2sel
+# but we do cope with DROP on a child table
+permutation s1b s1dropc1 s2sel s1c s2sel
+# this case currently results in an error; doesn't seem worth preventing
+permutation s1b s1delc1 s1modc1a s2sel s1c s2sel
diff --git a/src/test/isolation/specs/async-notify.spec b/src/test/isolation/specs/async-notify.spec
new file mode 100644
index 0000000..0b8cfd9
--- /dev/null
+++ b/src/test/isolation/specs/async-notify.spec
@@ -0,0 +1,84 @@
+# Tests for LISTEN/NOTIFY
+
+# Most of these tests use only the "notifier" session and hence exercise only
+# self-notifies, which are convenient because they minimize timing concerns.
+# Note we assume that each step is delivered to the backend as a single Query
+# message so it will run as one transaction.
+
+session notifier
+step listenc { LISTEN c1; LISTEN c2; }
+step notify1 { NOTIFY c1; }
+step notify2 { NOTIFY c2, 'payload'; }
+step notify3 { NOTIFY c3, 'payload3'; } # not listening to c3
+step notifyf { SELECT pg_notify('c2', NULL); }
+step notifyd1 { NOTIFY c2, 'payload'; NOTIFY c1; NOTIFY "c2", 'payload'; }
+step notifyd2 { NOTIFY c1; NOTIFY c1; NOTIFY c1, 'p1'; NOTIFY c1, 'p2'; }
+step notifys1 {
+ BEGIN;
+ NOTIFY c1, 'payload'; NOTIFY "c2", 'payload';
+ NOTIFY c1, 'payload'; NOTIFY "c2", 'payload';
+ SAVEPOINT s1;
+ NOTIFY c1, 'payload'; NOTIFY "c2", 'payload';
+ NOTIFY c1, 'payloads'; NOTIFY "c2", 'payloads';
+ NOTIFY c1, 'payload'; NOTIFY "c2", 'payload';
+ NOTIFY c1, 'payloads'; NOTIFY "c2", 'payloads';
+ RELEASE SAVEPOINT s1;
+ SAVEPOINT s2;
+ NOTIFY c1, 'rpayload'; NOTIFY "c2", 'rpayload';
+ NOTIFY c1, 'rpayloads'; NOTIFY "c2", 'rpayloads';
+ NOTIFY c1, 'rpayload'; NOTIFY "c2", 'rpayload';
+ NOTIFY c1, 'rpayloads'; NOTIFY "c2", 'rpayloads';
+ ROLLBACK TO SAVEPOINT s2;
+ COMMIT;
+}
+step usage { SELECT pg_notification_queue_usage() > 0 AS nonzero; }
+step bignotify { SELECT count(pg_notify('c1', s::text)) FROM generate_series(1, 1000) s; }
+teardown { UNLISTEN *; }
+
+# The listener session is used for cross-backend notify checks.
+
+session listener
+step llisten { LISTEN c1; LISTEN c2; }
+step lcheck { SELECT 1 AS x; }
+step lbegin { BEGIN; }
+step lbegins { BEGIN ISOLATION LEVEL SERIALIZABLE; }
+step lcommit { COMMIT; }
+teardown { UNLISTEN *; }
+
+# In some tests we need a second listener, just to block the queue.
+
+session listener2
+step l2listen { LISTEN c1; }
+step l2begin { BEGIN; }
+step l2commit { COMMIT; }
+step l2stop { UNLISTEN *; }
+
+
+# Trivial cases.
+permutation listenc notify1 notify2 notify3 notifyf
+
+# Check simple and less-simple deduplication.
+permutation listenc notifyd1 notifyd2 notifys1
+
+# Cross-backend notification delivery. We use a "select 1" to force the
+# listener session to check for notifies. In principle we could just wait
+# for delivery, but that would require extra support in isolationtester
+# and might have portability-of-timing issues.
+permutation llisten notify1 notify2 notify3 notifyf lcheck
+
+# Again, with local delivery too.
+permutation listenc llisten notify1 notify2 notify3 notifyf lcheck
+
+# Check for bug when initial listen is only action in a serializable xact,
+# and notify queue is not empty
+permutation l2listen l2begin notify1 lbegins llisten lcommit l2commit l2stop
+
+# Verify that pg_notification_queue_usage correctly reports a non-zero result,
+# after submitting notifications while another connection is listening for
+# those notifications and waiting inside an active transaction. We have to
+# fill a page of the notify SLRU to make this happen, which is a good deal
+# of traffic. To not bloat the expected output, we intentionally don't
+# commit the listener's transaction, so that it never reports these events.
+# Hence, this should be the last test in this script.
+
+permutation llisten lbegin usage bignotify usage
diff --git a/src/test/isolation/specs/classroom-scheduling.spec b/src/test/isolation/specs/classroom-scheduling.spec
new file mode 100644
index 0000000..770715b
--- /dev/null
+++ b/src/test/isolation/specs/classroom-scheduling.spec
@@ -0,0 +1,29 @@
+# Classroom Scheduling test
+#
+# Ensure that the classroom is not scheduled more than once
+# for any moment in time.
+#
+# Any overlap between the transactions must cause a serialization failure.
+
+setup
+{
+ CREATE TABLE room_reservation (room_id text NOT NULL, start_time timestamp with time zone NOT NULL, end_time timestamp with time zone NOT NULL, description text NOT NULL, CONSTRAINT room_reservation_pkey PRIMARY KEY (room_id, start_time));
+ INSERT INTO room_reservation VALUES ('101', TIMESTAMP WITH TIME ZONE '2010-04-01 10:00', TIMESTAMP WITH TIME ZONE '2010-04-01 11:00', 'Bob');
+}
+
+teardown
+{
+ DROP TABLE room_reservation;
+}
+
+session s1
+setup { BEGIN ISOLATION LEVEL SERIALIZABLE; }
+step rx1 { SELECT count(*) FROM room_reservation WHERE room_id = '101' AND start_time < TIMESTAMP WITH TIME ZONE '2010-04-01 14:00' AND end_time > TIMESTAMP WITH TIME ZONE '2010-04-01 13:00'; }
+step wy1 { INSERT INTO room_reservation VALUES ('101', TIMESTAMP WITH TIME ZONE '2010-04-01 13:00', TIMESTAMP WITH TIME ZONE '2010-04-01 14:00', 'Carol'); }
+step c1 { COMMIT; }
+
+session s2
+setup { BEGIN ISOLATION LEVEL SERIALIZABLE; }
+step ry2 { SELECT count(*) FROM room_reservation WHERE room_id = '101' AND start_time < TIMESTAMP WITH TIME ZONE '2010-04-01 14:30' AND end_time > TIMESTAMP WITH TIME ZONE '2010-04-01 13:30'; }
+step wx2 { UPDATE room_reservation SET start_time = TIMESTAMP WITH TIME ZONE '2010-04-01 13:30', end_time = TIMESTAMP WITH TIME ZONE '2010-04-01 14:30' WHERE room_id = '101' AND start_time = TIMESTAMP WITH TIME ZONE '2010-04-01 10:00'; }
+step c2 { COMMIT; }
diff --git a/src/test/isolation/specs/cluster-conflict-partition.spec b/src/test/isolation/specs/cluster-conflict-partition.spec
new file mode 100644
index 0000000..5091f68
--- /dev/null
+++ b/src/test/isolation/specs/cluster-conflict-partition.spec
@@ -0,0 +1,37 @@
+# Tests for locking conflicts with CLUSTER command and partitions.
+
+setup
+{
+ CREATE ROLE regress_cluster_part;
+ CREATE TABLE cluster_part_tab (a int) PARTITION BY LIST (a);
+ CREATE TABLE cluster_part_tab1 PARTITION OF cluster_part_tab FOR VALUES IN (1);
+ CREATE TABLE cluster_part_tab2 PARTITION OF cluster_part_tab FOR VALUES IN (2);
+ CREATE INDEX cluster_part_ind ON cluster_part_tab(a);
+ ALTER TABLE cluster_part_tab OWNER TO regress_cluster_part;
+}
+
+teardown
+{
+ DROP TABLE cluster_part_tab;
+ DROP ROLE regress_cluster_part;
+}
+
+session s1
+step s1_begin { BEGIN; }
+step s1_lock_parent { LOCK cluster_part_tab IN SHARE UPDATE EXCLUSIVE MODE; }
+step s1_lock_child { LOCK cluster_part_tab1 IN SHARE UPDATE EXCLUSIVE MODE; }
+step s1_commit { COMMIT; }
+
+session s2
+step s2_auth { SET ROLE regress_cluster_part; }
+step s2_cluster { CLUSTER cluster_part_tab USING cluster_part_ind; }
+step s2_reset { RESET ROLE; }
+
+# CLUSTER on the parent waits if locked, passes for all cases.
+permutation s1_begin s1_lock_parent s2_auth s2_cluster s1_commit s2_reset
+permutation s1_begin s2_auth s1_lock_parent s2_cluster s1_commit s2_reset
+
+# When taking a lock on a partition leaf, CLUSTER on the parent skips
+# the leaf, passes for all cases.
+permutation s1_begin s1_lock_child s2_auth s2_cluster s1_commit s2_reset
+permutation s1_begin s2_auth s1_lock_child s2_cluster s1_commit s2_reset
diff --git a/src/test/isolation/specs/cluster-conflict.spec b/src/test/isolation/specs/cluster-conflict.spec
new file mode 100644
index 0000000..2e1d547
--- /dev/null
+++ b/src/test/isolation/specs/cluster-conflict.spec
@@ -0,0 +1,30 @@
+# Tests for locking conflicts with CLUSTER command.
+
+setup
+{
+ CREATE ROLE regress_cluster_conflict;
+ CREATE TABLE cluster_tab (a int);
+ CREATE INDEX cluster_ind ON cluster_tab(a);
+ ALTER TABLE cluster_tab OWNER TO regress_cluster_conflict;
+}
+
+teardown
+{
+ DROP TABLE cluster_tab;
+ DROP ROLE regress_cluster_conflict;
+}
+
+session s1
+step s1_begin { BEGIN; }
+step s1_lock { LOCK cluster_tab IN SHARE UPDATE EXCLUSIVE MODE; }
+step s1_commit { COMMIT; }
+
+session s2
+step s2_auth { SET ROLE regress_cluster_conflict; }
+step s2_cluster { CLUSTER cluster_tab USING cluster_ind; }
+step s2_reset { RESET ROLE; }
+
+# The role has privileges to cluster the table, CLUSTER will block if
+# another session holds a lock on the table and succeed in all cases.
+permutation s1_begin s1_lock s2_auth s2_cluster s1_commit s2_reset
+permutation s1_begin s2_auth s1_lock s2_cluster s1_commit s2_reset
diff --git a/src/test/isolation/specs/create-trigger.spec b/src/test/isolation/specs/create-trigger.spec
new file mode 100644
index 0000000..9d4710c
--- /dev/null
+++ b/src/test/isolation/specs/create-trigger.spec
@@ -0,0 +1,54 @@
+# CREATE TRIGGER - Add trigger with concurrent reads
+#
+# CREATE TRIGGER uses ShareRowExclusiveLock so we mix writes with it
+# to see what works or waits.
+
+setup
+{
+ CREATE TABLE a (i int);
+ CREATE FUNCTION f() RETURNS TRIGGER LANGUAGE plpgsql AS 'BEGIN RETURN NULL; END;';
+ INSERT INTO a VALUES (0), (1), (2), (3);
+}
+
+teardown
+{
+ DROP TABLE a;
+ DROP FUNCTION f();
+}
+
+session s1
+step s1a { BEGIN; }
+step s1b { CREATE TRIGGER t AFTER UPDATE ON a EXECUTE PROCEDURE f(); }
+step s1c { COMMIT; }
+
+session s2
+step s2a { BEGIN; }
+step s2b { SELECT * FROM a WHERE i = 1 FOR UPDATE; }
+step s2c { UPDATE a SET i = 4 WHERE i = 3; }
+step s2d { COMMIT; }
+
+permutation s1a s1b s1c s2a s2b s2c s2d
+permutation s1a s1b s2a s1c s2b s2c s2d
+permutation s1a s1b s2a s2b s1c s2c s2d
+permutation s1a s1b s2a s2b s2c s1c s2d
+permutation s1a s2a s1b s1c s2b s2c s2d
+permutation s1a s2a s1b s2b s1c s2c s2d
+permutation s1a s2a s1b s2b s2c s1c s2d
+permutation s1a s2a s2b s1b s1c s2c s2d
+permutation s1a s2a s2b s1b s2c s1c s2d
+permutation s1a s2a s2b s2c s1b s2d s1c
+permutation s1a s2a s2b s2c s2d s1b s1c
+permutation s2a s1a s1b s1c s2b s2c s2d
+permutation s2a s1a s1b s2b s1c s2c s2d
+permutation s2a s1a s1b s2b s2c s1c s2d
+permutation s2a s1a s2b s1b s1c s2c s2d
+permutation s2a s1a s2b s1b s2c s1c s2d
+permutation s2a s1a s2b s2c s1b s2d s1c
+permutation s2a s1a s2b s2c s2d s1b s1c
+permutation s2a s2b s1a s1b s1c s2c s2d
+permutation s2a s2b s1a s1b s2c s1c s2d
+permutation s2a s2b s1a s2c s1b s2d s1c
+permutation s2a s2b s1a s2c s2d s1b s1c
+permutation s2a s2b s2c s1a s1b s2d s1c
+permutation s2a s2b s2c s1a s2d s1b s1c
+permutation s2a s2b s2c s2d s1a s1b s1c
diff --git a/src/test/isolation/specs/deadlock-hard.spec b/src/test/isolation/specs/deadlock-hard.spec
new file mode 100644
index 0000000..60bedca
--- /dev/null
+++ b/src/test/isolation/specs/deadlock-hard.spec
@@ -0,0 +1,79 @@
+# This is a straightforward deadlock scenario. Since it involves more than
+# two processes, the main lock detector will find the problem and rollback
+# the session that first discovers it. Set deadlock_timeout in each session
+# so that it's predictable which session fails.
+
+setup
+{
+ CREATE TABLE a1 ();
+ CREATE TABLE a2 ();
+ CREATE TABLE a3 ();
+ CREATE TABLE a4 ();
+ CREATE TABLE a5 ();
+ CREATE TABLE a6 ();
+ CREATE TABLE a7 ();
+ CREATE TABLE a8 ();
+}
+
+teardown
+{
+ DROP TABLE a1, a2, a3, a4, a5, a6, a7, a8;
+}
+
+session s1
+setup { BEGIN; SET deadlock_timeout = '100s'; }
+step s1a1 { LOCK TABLE a1; }
+step s1a2 { LOCK TABLE a2; }
+step s1c { COMMIT; }
+
+session s2
+setup { BEGIN; SET deadlock_timeout = '100s'; }
+step s2a2 { LOCK TABLE a2; }
+step s2a3 { LOCK TABLE a3; }
+step s2c { COMMIT; }
+
+session s3
+setup { BEGIN; SET deadlock_timeout = '100s'; }
+step s3a3 { LOCK TABLE a3; }
+step s3a4 { LOCK TABLE a4; }
+step s3c { COMMIT; }
+
+session s4
+setup { BEGIN; SET deadlock_timeout = '100s'; }
+step s4a4 { LOCK TABLE a4; }
+step s4a5 { LOCK TABLE a5; }
+step s4c { COMMIT; }
+
+session s5
+setup { BEGIN; SET deadlock_timeout = '100s'; }
+step s5a5 { LOCK TABLE a5; }
+step s5a6 { LOCK TABLE a6; }
+step s5c { COMMIT; }
+
+session s6
+setup { BEGIN; SET deadlock_timeout = '100s'; }
+step s6a6 { LOCK TABLE a6; }
+step s6a7 { LOCK TABLE a7; }
+step s6c { COMMIT; }
+
+session s7
+setup { BEGIN; SET deadlock_timeout = '100s'; }
+step s7a7 { LOCK TABLE a7; }
+step s7a8 { LOCK TABLE a8; }
+step s7c { COMMIT; }
+
+session s8
+setup { BEGIN; SET deadlock_timeout = '10ms'; }
+step s8a8 { LOCK TABLE a8; }
+step s8a1 { LOCK TABLE a1; }
+step s8c { COMMIT; }
+
+# Note: when s8a1 detects the deadlock and fails, s7a8 is released, making
+# it timing-dependent which query completion is received first by the tester.
+# To ensure output stability, add a blocking mark to force s8a1's completion
+# to be reported first. There is a second timing dependency, too: the tester
+# might or might not observe s8a1 during its short lock wait state. Apply a
+# dummy blocking mark to s8a1 to ensure it will be reported as "waiting"
+# regardless of that.
+
+permutation s1a1 s2a2 s3a3 s4a4 s5a5 s6a6 s7a7 s8a8 s1a2 s2a3 s3a4 s4a5 s5a6 s6a7 s7a8(s8a1) s8a1(*) s8c s7c s6c s5c s4c s3c s2c s1c
diff --git a/src/test/isolation/specs/deadlock-parallel.spec b/src/test/isolation/specs/deadlock-parallel.spec
new file mode 100644
index 0000000..2016bcd
--- /dev/null
+++ b/src/test/isolation/specs/deadlock-parallel.spec
@@ -0,0 +1,113 @@
+# Test deadlock resolution with parallel process groups.
+
+# It's fairly hard to get parallel worker processes to block on locks,
+# since generally they don't want any locks their leader didn't already
+# take. We cheat like mad here by making a function that takes a lock,
+# and is incorrectly marked parallel-safe so that it can execute in a worker.
+
+# Note that we explicitly override any global settings of isolation level
+# or debug_parallel_query, to ensure we're testing what we intend to.
+
+# Otherwise, this is morally equivalent to deadlock-soft.spec:
+# Four-process deadlock with two hard edges and two soft edges.
+# d2 waits for e1 (soft edge), e1 waits for d1 (hard edge),
+# d1 waits for e2 (soft edge), e2 waits for d2 (hard edge).
+# The deadlock detector resolves the deadlock by reversing the d1-e2 edge,
+# unblocking d1.
+
+# However ... it's not actually that well-defined whether the deadlock
+# detector will prefer to unblock d1 or d2. It depends on which backend
+# is first to run DeadLockCheck after the deadlock condition is created:
+# that backend will search outwards from its own wait condition, and will
+# first find a loop involving the *other* lock. We encourage that to be
+# one of the d2a1 parallel workers, which will therefore unblock d1a2
+# workers, by setting a shorter deadlock_timeout in session d2. But on
+# slow machines, one or more d1a2 workers may not yet have reached their
+# lock waits, so that they're not unblocked by the first DeadLockCheck.
+# The next DeadLockCheck may choose to unblock the d2a1 workers instead,
+# which would allow d2a1 to complete before d1a2, causing the test to
+# freeze up because isolationtester isn't expecting that completion order.
+# (In effect, we have an undetectable deadlock because d2 is waiting for
+# d1's completion, but on the client side.) To fix this, introduce an
+# additional lock (advisory lock 3), which is initially taken by d1 and
+# then d2a1 will wait for it after completing the main part of the test.
+# In this way, the deadlock detector can see that d1 must be completed
+# first, regardless of timing.
+
+setup
+{
+ create function lock_share(int,int) returns int language sql as
+ 'select pg_advisory_xact_lock_shared($1); select 1;' parallel safe;
+
+ create function lock_excl(int,int) returns int language sql as
+ 'select pg_advisory_xact_lock($1); select 1;' parallel safe;
+
+ create table bigt as select x from generate_series(1, 10000) x;
+ analyze bigt;
+}
+
+teardown
+{
+ drop function lock_share(int,int);
+ drop function lock_excl(int,int);
+ drop table bigt;
+}
+
+session d1
+setup { BEGIN isolation level repeatable read;
+ SET debug_parallel_query = off;
+ SET deadlock_timeout = '10s';
+}
+# these locks will be taken in the leader, so they will persist:
+step d1a1 { SELECT lock_share(1,x), lock_excl(3,x) FROM bigt LIMIT 1; }
+# this causes all the parallel workers to take locks:
+step d1a2 { SET debug_parallel_query = on;
+ SET parallel_setup_cost = 0;
+ SET parallel_tuple_cost = 0;
+ SET min_parallel_table_scan_size = 0;
+ SET parallel_leader_participation = off;
+ SET max_parallel_workers_per_gather = 3;
+ SELECT sum(lock_share(2,x)) FROM bigt; }
+step d1c { COMMIT; }
+
+session d2
+setup { BEGIN isolation level repeatable read;
+ SET debug_parallel_query = off;
+ SET deadlock_timeout = '10ms';
+}
+# this lock will be taken in the leader, so it will persist:
+step d2a2 { select lock_share(2,x) FROM bigt LIMIT 1; }
+# this causes all the parallel workers to take locks;
+# after which, make the leader take lock 3 to prevent client-driven deadlock
+step d2a1 { SET debug_parallel_query = on;
+ SET parallel_setup_cost = 0;
+ SET parallel_tuple_cost = 0;
+ SET min_parallel_table_scan_size = 0;
+ SET parallel_leader_participation = off;
+ SET max_parallel_workers_per_gather = 3;
+ SELECT sum(lock_share(1,x)) FROM bigt;
+ SET debug_parallel_query = off;
+ RESET parallel_setup_cost;
+ RESET parallel_tuple_cost;
+ SELECT lock_share(3,x) FROM bigt LIMIT 1; }
+step d2c { COMMIT; }
+
+session e1
+setup { BEGIN isolation level repeatable read;
+ SET debug_parallel_query = on;
+ SET deadlock_timeout = '10s';
+}
+# this lock will be taken in a parallel worker, but we don't need it to persist
+step e1l { SELECT lock_excl(1,x) FROM bigt LIMIT 1; }
+step e1c { COMMIT; }
+
+session e2
+setup { BEGIN isolation level repeatable read;
+ SET debug_parallel_query = on;
+ SET deadlock_timeout = '10s';
+}
+# this lock will be taken in a parallel worker, but we don't need it to persist
+step e2l { SELECT lock_excl(2,x) FROM bigt LIMIT 1; }
+step e2c { COMMIT; }
+
+permutation d1a1 d2a2 e1l e2l d1a2 d2a1 d1c e1c d2c e2c
diff --git a/src/test/isolation/specs/deadlock-simple.spec b/src/test/isolation/specs/deadlock-simple.spec
new file mode 100644
index 0000000..3086dc7
--- /dev/null
+++ b/src/test/isolation/specs/deadlock-simple.spec
@@ -0,0 +1,29 @@
+# The deadlock detector has a special case for "simple" deadlocks. A simple
+# deadlock occurs when we attempt a lock upgrade while another process waits
+# for a lock upgrade on the same object; and the sought locks conflict with
+# those already held, so that neither process can complete its upgrade until
+# the other releases locks. Test this scenario.
+
+setup
+{
+ CREATE TABLE a1 ();
+}
+
+teardown
+{
+ DROP TABLE a1;
+}
+
+session s1
+setup { BEGIN; }
+step s1as { LOCK TABLE a1 IN ACCESS SHARE MODE; }
+step s1ae { LOCK TABLE a1 IN ACCESS EXCLUSIVE MODE; }
+step s1c { COMMIT; }
+
+session s2
+setup { BEGIN; }
+step s2as { LOCK TABLE a1 IN ACCESS SHARE MODE; }
+step s2ae { LOCK TABLE a1 IN ACCESS EXCLUSIVE MODE; }
+step s2c { COMMIT; }
+
+permutation s1as s2as s1ae s2ae s1c s2c
diff --git a/src/test/isolation/specs/deadlock-soft-2.spec b/src/test/isolation/specs/deadlock-soft-2.spec
new file mode 100644
index 0000000..26d9c62
--- /dev/null
+++ b/src/test/isolation/specs/deadlock-soft-2.spec
@@ -0,0 +1,43 @@
+# Soft deadlock requiring reversal of multiple wait-edges. s1 must
+# jump over both s3 and s4 and acquire the lock on a2 immediately,
+# since s3 and s4 are hard-blocked on a1.
+
+setup
+{
+ CREATE TABLE a1 ();
+ CREATE TABLE a2 ();
+}
+
+teardown
+{
+ DROP TABLE a1, a2;
+}
+
+session s1
+setup { BEGIN; SET deadlock_timeout = '10ms'; }
+step s1a { LOCK TABLE a1 IN SHARE UPDATE EXCLUSIVE MODE; }
+step s1b { LOCK TABLE a2 IN SHARE UPDATE EXCLUSIVE MODE; }
+step s1c { COMMIT; }
+
+session s2
+setup { BEGIN; SET deadlock_timeout = '100s'; }
+step s2a { LOCK TABLE a2 IN ACCESS SHARE MODE; }
+step s2b { LOCK TABLE a1 IN SHARE UPDATE EXCLUSIVE MODE; }
+step s2c { COMMIT; }
+
+session s3
+setup { BEGIN; SET deadlock_timeout = '100s'; }
+step s3a { LOCK TABLE a2 IN ACCESS EXCLUSIVE MODE; }
+step s3c { COMMIT; }
+
+session s4
+setup { BEGIN; SET deadlock_timeout = '100s'; }
+step s4a { LOCK TABLE a2 IN ACCESS EXCLUSIVE MODE; }
+step s4c { COMMIT; }
+
+# The expected output for this test assumes that isolationtester will
+# detect step s1b as waiting before the deadlock detector runs and
+# releases s1 from its blocked state. To ensure that happens even in
+# very slow (debug_discard_caches) cases, apply a (*) annotation.
+
+permutation s1a s2a s2b s3a s4a s1b(*) s1c s2c s3c s4c
diff --git a/src/test/isolation/specs/deadlock-soft.spec b/src/test/isolation/specs/deadlock-soft.spec
new file mode 100644
index 0000000..bc9c6a7
--- /dev/null
+++ b/src/test/isolation/specs/deadlock-soft.spec
@@ -0,0 +1,40 @@
+# Four-process deadlock with two hard edges and two soft edges.
+# d2 waits for e1 (soft edge), e1 waits for d1 (hard edge),
+# d1 waits for e2 (soft edge), e2 waits for d2 (hard edge).
+# The deadlock detector resolves the deadlock by reversing the d1-e2 edge,
+# unblocking d1.
+
+setup
+{
+ CREATE TABLE a1 ();
+ CREATE TABLE a2 ();
+}
+
+teardown
+{
+ DROP TABLE a1, a2;
+}
+
+session d1
+setup { BEGIN; SET deadlock_timeout = '10s'; }
+step d1a1 { LOCK TABLE a1 IN ACCESS SHARE MODE; }
+step d1a2 { LOCK TABLE a2 IN ACCESS SHARE MODE; }
+step d1c { COMMIT; }
+
+session d2
+setup { BEGIN; SET deadlock_timeout = '10ms'; }
+step d2a2 { LOCK TABLE a2 IN ACCESS SHARE MODE; }
+step d2a1 { LOCK TABLE a1 IN ACCESS SHARE MODE; }
+step d2c { COMMIT; }
+
+session e1
+setup { BEGIN; SET deadlock_timeout = '10s'; }
+step e1l { LOCK TABLE a1 IN ACCESS EXCLUSIVE MODE; }
+step e1c { COMMIT; }
+
+session e2
+setup { BEGIN; SET deadlock_timeout = '10s'; }
+step e2l { LOCK TABLE a2 IN ACCESS EXCLUSIVE MODE; }
+step e2c { COMMIT; }
+
+permutation d1a1 d2a2 e1l e2l d1a2 d2a1 d1c e1c d2c e2c
diff --git a/src/test/isolation/specs/delete-abort-savept-2.spec b/src/test/isolation/specs/delete-abort-savept-2.spec
new file mode 100644
index 0000000..65bd936
--- /dev/null
+++ b/src/test/isolation/specs/delete-abort-savept-2.spec
@@ -0,0 +1,34 @@
+# A funkier version of delete-abort-savept
+setup
+{
+ CREATE TABLE foo (
+ key INT PRIMARY KEY,
+ value INT
+ );
+
+ INSERT INTO foo VALUES (1, 1);
+}
+
+teardown
+{
+ DROP TABLE foo;
+}
+
+session s1
+setup { BEGIN; }
+step s1l { SELECT * FROM foo FOR KEY SHARE; }
+step s1svp { SAVEPOINT f; }
+step s1d { SELECT * FROM foo FOR NO KEY UPDATE; }
+step s1r { ROLLBACK TO f; }
+step s1c { COMMIT; }
+
+session s2
+setup { BEGIN; }
+step s2l { SELECT * FROM foo FOR UPDATE; }
+step s2l2 { SELECT * FROM foo FOR NO KEY UPDATE; }
+step s2c { COMMIT; }
+
+permutation s1l s1svp s1d s1r s2l s1c s2c
+permutation s1l s1svp s1d s2l s1r s1c s2c
+permutation s1l s1svp s1d s1r s2l2 s1c s2c
+permutation s1l s1svp s1d s2l2 s1r s1c s2c
diff --git a/src/test/isolation/specs/delete-abort-savept.spec b/src/test/isolation/specs/delete-abort-savept.spec
new file mode 100644
index 0000000..498ffed
--- /dev/null
+++ b/src/test/isolation/specs/delete-abort-savept.spec
@@ -0,0 +1,37 @@
+# After rolling back a subtransaction that upgraded a lock, the previously
+# held lock should still be held.
+setup
+{
+ CREATE TABLE foo (
+ key INT PRIMARY KEY,
+ value INT
+ );
+
+ INSERT INTO foo VALUES (1, 1);
+}
+
+teardown
+{
+ DROP TABLE foo;
+}
+
+session s1
+setup { BEGIN; }
+step s1l { SELECT * FROM foo FOR KEY SHARE; }
+step s1svp { SAVEPOINT f; }
+step s1d { DELETE FROM foo; }
+step s1r { ROLLBACK TO f; }
+step s1c { COMMIT; }
+
+session s2
+setup { BEGIN; }
+step s2l { SELECT * FROM foo FOR UPDATE; }
+step s2c { COMMIT; }
+
+permutation s1l s1svp s1d s1r s1c s2l s2c
+permutation s1l s1svp s1d s1r s2l s1c s2c
+permutation s1l s1svp s1d s2l s1r s1c s2c
+permutation s1l s1svp s2l s1d s1r s1c s2c
+permutation s1l s2l s1svp s1d s1r s1c s2c
+permutation s2l s1l s2c s1svp s1d s1r s1c
+permutation s2l s2c s1l s1svp s1d s1r s1c
diff --git a/src/test/isolation/specs/detach-partition-concurrently-1.spec b/src/test/isolation/specs/detach-partition-concurrently-1.spec
new file mode 100644
index 0000000..835fe89
--- /dev/null
+++ b/src/test/isolation/specs/detach-partition-concurrently-1.spec
@@ -0,0 +1,69 @@
+# Test that detach partition concurrently makes the partition invisible at the
+# correct time.
+
+setup
+{
+ DROP TABLE IF EXISTS d_listp, d_listp1, d_listp2;
+ CREATE TABLE d_listp (a int) PARTITION BY LIST(a);
+ CREATE TABLE d_listp1 PARTITION OF d_listp FOR VALUES IN (1);
+ CREATE TABLE d_listp2 PARTITION OF d_listp FOR VALUES IN (2);
+ INSERT INTO d_listp VALUES (1),(2);
+}
+
+teardown {
+ DROP TABLE IF EXISTS d_listp, d_listp2, d_listp_foobar;
+}
+
+session s1
+step s1b { BEGIN; }
+step s1brr { BEGIN ISOLATION LEVEL REPEATABLE READ; }
+step s1s { SELECT * FROM d_listp; }
+step s1ins { INSERT INTO d_listp VALUES (1); }
+step s1ins2 { INSERT INTO d_listp VALUES (2); }
+step s1prep { PREPARE f(int) AS INSERT INTO d_listp VALUES ($1); }
+step s1prep1 { PREPARE f(int) AS INSERT INTO d_listp VALUES (1); }
+step s1prep2 { PREPARE f(int) AS INSERT INTO d_listp VALUES (2); }
+step s1exec1 { EXECUTE f(1); }
+step s1exec2 { EXECUTE f(2); }
+step s1dealloc { DEALLOCATE f; }
+step s1c { COMMIT; }
+
+session s2
+step s2detach { ALTER TABLE d_listp DETACH PARTITION d_listp2 CONCURRENTLY; }
+step s2drop { DROP TABLE d_listp2; }
+
+session s3
+step s3s { SELECT * FROM d_listp; }
+step s3i { SELECT relpartbound IS NULL FROM pg_class where relname = 'd_listp2'; }
+step s3ins2 { INSERT INTO d_listp VALUES (2); }
+
+# The transaction that detaches hangs until it sees any older transaction
+# terminate, as does anybody else.
+permutation s1b s1s s2detach s1s s1c s1s
+
+# relpartbound remains set until s1 commits
+# XXX this could be timing dependent :-(
+permutation s1b s1s s2detach s1s s3s s3i s1c s3i s2drop s1s
+
+# In read-committed mode, the partition disappears from view of concurrent
+# transactions immediately. But if a write lock is held, then the detach
+# has to wait.
+permutation s1b s1s s2detach s1ins s1s s1c
+permutation s1b s1s s1ins2 s2detach s1ins s1s s1c
+
+# In repeatable-read mode, the partition remains visible until commit even
+# if the to-be-detached partition is not locked for write.
+permutation s1brr s1s s2detach s1ins s1s s1c
+permutation s1brr s1s s2detach s1s s1c
+
+# Another process trying to acquire a write lock will be blocked behind the
+# detacher
+permutation s1b s1ins2 s2detach s3ins2 s1c
+
+# a prepared query is not blocked
+permutation s1brr s1prep s1s s2detach s1s s1exec1 s3s s1dealloc s1c
+permutation s1brr s1prep s1exec2 s2detach s1s s1exec2 s3s s1c s1dealloc
+permutation s1brr s1prep s1s s2detach s1s s1exec2 s1c s1dealloc
+permutation s1brr s1prep s2detach s1s s1exec2 s1c s1dealloc
+permutation s1brr s1prep1 s2detach s1s s1exec2 s1c s1dealloc
+permutation s1brr s1prep2 s2detach s1s s1exec2 s1c s1dealloc
diff --git a/src/test/isolation/specs/detach-partition-concurrently-2.spec b/src/test/isolation/specs/detach-partition-concurrently-2.spec
new file mode 100644
index 0000000..fa767ea
--- /dev/null
+++ b/src/test/isolation/specs/detach-partition-concurrently-2.spec
@@ -0,0 +1,41 @@
+# Test that detach partition concurrently makes the partition safe
+# for foreign keys that reference it.
+
+setup
+{
+ DROP TABLE IF EXISTS d_lp_fk, d_lp_fk_1, d_lp_fk_2, d_lp_fk_r;
+
+ CREATE TABLE d_lp_fk (a int PRIMARY KEY) PARTITION BY LIST(a);
+ CREATE TABLE d_lp_fk_1 PARTITION OF d_lp_fk FOR VALUES IN (1);
+ CREATE TABLE d_lp_fk_2 PARTITION OF d_lp_fk FOR VALUES IN (2);
+ INSERT INTO d_lp_fk VALUES (1), (2);
+
+ CREATE TABLE d_lp_fk_r (a int references d_lp_fk);
+}
+
+teardown { DROP TABLE IF EXISTS d_lp_fk, d_lp_fk_1, d_lp_fk_2, d_lp_fk_r; }
+
+session s1
+step s1b { BEGIN; }
+step s1s { SELECT * FROM d_lp_fk; }
+step s1c { COMMIT; }
+
+session s2
+step s2d { ALTER TABLE d_lp_fk DETACH PARTITION d_lp_fk_1 CONCURRENTLY; }
+
+session s3
+step s3b { BEGIN; }
+step s3i1 { INSERT INTO d_lp_fk_r VALUES (1); }
+step s3i2 { INSERT INTO d_lp_fk_r VALUES (2); }
+step s3c { COMMIT; }
+
+# The transaction that detaches hangs until it sees any older transaction
+# terminate.
+permutation s1b s1s s2d s3i1 s1c
+permutation s1b s1s s2d s3i2 s3i2 s1c
+
+permutation s1b s1s s3i1 s2d s1c
+permutation s1b s1s s3i2 s2d s1c
+
+# what if s3 has an uncommitted insertion?
+permutation s1b s1s s3b s2d s3i1 s1c s3c
diff --git a/src/test/isolation/specs/detach-partition-concurrently-3.spec b/src/test/isolation/specs/detach-partition-concurrently-3.spec
new file mode 100644
index 0000000..31aa308
--- /dev/null
+++ b/src/test/isolation/specs/detach-partition-concurrently-3.spec
@@ -0,0 +1,86 @@
+# Try various things to happen to a partition with an incomplete detach
+#
+# Note: When using "s1cancel", mark the target step (the one to be canceled)
+# as blocking "s1cancel". This ensures consistent reporting regardless of
+# whether "s1cancel" finishes before or after the other step reports failure.
+# Also, ensure the step after "s1cancel" is also an s1 step (use "s1noop" if
+# necessary). This ensures we won't move on to the next step until the cancel
+# is complete.
+
+setup
+{
+ CREATE TABLE d3_listp (a int) PARTITION BY LIST(a);
+ CREATE TABLE d3_listp1 PARTITION OF d3_listp FOR VALUES IN (1);
+ CREATE TABLE d3_listp2 PARTITION OF d3_listp FOR VALUES IN (2);
+ CREATE TABLE d3_pid (pid int);
+ INSERT INTO d3_listp VALUES (1);
+}
+
+teardown {
+ DROP TABLE IF EXISTS d3_listp, d3_listp1, d3_listp2, d3_pid;
+}
+
+session s1
+step s1b { BEGIN; }
+step s1brr { BEGIN ISOLATION LEVEL REPEATABLE READ; }
+step s1s { SELECT * FROM d3_listp; }
+step s1spart { SELECT * FROM d3_listp1; }
+step s1cancel { SELECT pg_cancel_backend(pid) FROM d3_pid; }
+step s1noop { }
+step s1c { COMMIT; }
+step s1alter { ALTER TABLE d3_listp1 ALTER a DROP NOT NULL; }
+step s1insert { INSERT INTO d3_listp VALUES (1); }
+step s1insertpart { INSERT INTO d3_listp1 VALUES (1); }
+step s1drop { DROP TABLE d3_listp; }
+step s1droppart { DROP TABLE d3_listp1; }
+step s1trunc { TRUNCATE TABLE d3_listp; }
+step s1list { SELECT relname FROM pg_catalog.pg_class
+ WHERE relname LIKE 'd3_listp%' ORDER BY 1; }
+step s1describe { SELECT 'd3_listp' AS root, * FROM pg_partition_tree('d3_listp')
+ UNION ALL SELECT 'd3_listp1', * FROM pg_partition_tree('d3_listp1'); }
+
+session s2
+step s2begin { BEGIN; }
+step s2snitch { INSERT INTO d3_pid SELECT pg_backend_pid(); }
+step s2detach { ALTER TABLE d3_listp DETACH PARTITION d3_listp1 CONCURRENTLY; }
+step s2detach2 { ALTER TABLE d3_listp DETACH PARTITION d3_listp2 CONCURRENTLY; }
+step s2detachfinal { ALTER TABLE d3_listp DETACH PARTITION d3_listp1 FINALIZE; }
+step s2drop { DROP TABLE d3_listp1; }
+step s2commit { COMMIT; }
+
+# Try various things while the partition is in "being detached" state, with
+# no session waiting.
+permutation s2snitch s1b s1s s2detach s1cancel(s2detach) s1c s1describe s1alter
+permutation s2snitch s1b s1s s2detach s1cancel(s2detach) s1insert s1c
+permutation s2snitch s1brr s1s s2detach s1cancel(s2detach) s1insert s1c s1spart
+permutation s2snitch s1b s1s s2detach s1cancel(s2detach) s1c s1insertpart
+
+# Test partition descriptor caching
+permutation s2snitch s1b s1s s2detach2 s1cancel(s2detach2) s1c s1brr s1insert s1s s1insert s1c
+permutation s2snitch s1b s1s s2detach2 s1cancel(s2detach2) s1c s1brr s1s s1insert s1s s1c
+
+# "drop" here does both tables
+permutation s2snitch s1b s1s s2detach s1cancel(s2detach) s1c s1drop s1list
+# "truncate" only does parent, not partition
+permutation s2snitch s1b s1s s2detach s1cancel(s2detach) s1c s1trunc s1spart
+
+# If a partition pending detach exists, we cannot drop another one
+permutation s2snitch s1b s1s s2detach s1cancel(s2detach) s1noop s2detach2 s1c
+permutation s2snitch s1b s1s s2detach s1cancel(s2detach) s1noop s2detachfinal s1c s2detach2
+permutation s2snitch s1b s1s s2detach s1cancel(s2detach) s1c s1droppart s2detach2
+
+# When a partition with incomplete detach is dropped, we grab lock on parent too.
+permutation s2snitch s1b s1s s2detach s1cancel(s2detach) s1c s2begin s2drop s1s s2commit
+
+# Partially detach, then select and try to complete the detach. Reading
+# from partition blocks (AEL is required on partition); reading from parent
+# does not block.
+permutation s2snitch s1b s1s s2detach s1cancel(s2detach) s1c s1b s1spart s2detachfinal s1c
+permutation s2snitch s1b s1s s2detach s1cancel(s2detach) s1c s1b s1s s2detachfinal s1c
+
+# DETACH FINALIZE in a transaction block. No insert/select on the partition
+# is allowed concurrently with that.
+permutation s2snitch s1b s1s s2detach s1cancel(s2detach) s1c s1b s1spart s2detachfinal s1c
+permutation s2snitch s1b s1s s2detach s1cancel(s2detach) s1c s2begin s2detachfinal s2commit
+permutation s2snitch s1b s1s s2detach s1cancel(s2detach) s1c s2begin s2detachfinal s1spart s2commit
+permutation s2snitch s1b s1s s2detach s1cancel(s2detach) s1c s2begin s2detachfinal s1insertpart s2commit
diff --git a/src/test/isolation/specs/detach-partition-concurrently-4.spec b/src/test/isolation/specs/detach-partition-concurrently-4.spec
new file mode 100644
index 0000000..2c02cae
--- /dev/null
+++ b/src/test/isolation/specs/detach-partition-concurrently-4.spec
@@ -0,0 +1,83 @@
+# This test exercises behavior of foreign keys in the face of concurrent
+# detach of partitions in the referenced table.
+# (The cases where the detaching transaction is cancelled is interesting
+# because the locking situation is completely different. I didn't verify
+# that keeping both variants adds any extra coverage.)
+#
+# Note: When using "s1cancel", mark the target step (the one to be canceled)
+# as blocking "s1cancel". This ensures consistent reporting regardless of
+# whether "s1cancel" finishes before or after the other step reports failure.
+# Also, ensure the step after "s1cancel" is also an s1 step (use "s1noop" if
+# necessary). This ensures we won't move on to the next step until the cancel
+# is complete.
+
+setup {
+ drop table if exists d4_primary, d4_primary1, d4_fk, d4_pid;
+ create table d4_primary (a int primary key) partition by list (a);
+ create table d4_primary1 partition of d4_primary for values in (1);
+ create table d4_primary2 partition of d4_primary for values in (2);
+ insert into d4_primary values (1);
+ insert into d4_primary values (2);
+ create table d4_fk (a int references d4_primary);
+ insert into d4_fk values (2);
+ create table d4_pid (pid int);
+}
+
+session s1
+step s1b { begin; }
+step s1brr { begin isolation level repeatable read; }
+step s1s { select * from d4_primary; }
+step s1cancel { select pg_cancel_backend(pid) from d4_pid; }
+step s1noop { }
+step s1insert { insert into d4_fk values (1); }
+step s1c { commit; }
+step s1declare { declare f cursor for select * from d4_primary; }
+step s1declare2 { declare f cursor for select * from d4_fk where a = 2; }
+step s1fetchall { fetch all from f; }
+step s1fetchone { fetch 1 from f; }
+step s1updcur { update d4_fk set a = 1 where current of f; }
+step s1svpt { savepoint f; }
+step s1rollback { rollback to f; }
+
+session s2
+step s2snitch { insert into d4_pid select pg_backend_pid(); }
+step s2detach { alter table d4_primary detach partition d4_primary1 concurrently; }
+
+session s3
+step s3brr { begin isolation level repeatable read; }
+step s3insert { insert into d4_fk values (1); }
+step s3commit { commit; }
+step s3vacfreeze { vacuum freeze pg_catalog.pg_inherits; }
+
+# Trying to insert into a partially detached partition is rejected
+permutation s2snitch s1b s1s s2detach s1cancel(s2detach) s1insert s1c
+permutation s2snitch s1b s1s s2detach s1insert s1c
+# ... even under REPEATABLE READ mode.
+permutation s2snitch s1brr s1s s2detach s1cancel(s2detach) s1insert s1c
+permutation s2snitch s1brr s1s s2detach s1insert s1c
+
+# If you read the referenced table using a cursor, you can see a row that the
+# RI query does not see.
+permutation s2snitch s1b s1declare s2detach s1cancel(s2detach) s1fetchall s1insert s1c
+permutation s2snitch s1b s1declare s2detach s1fetchall s1insert s1c
+permutation s2snitch s1b s1declare s2detach s1cancel(s2detach) s1svpt s1insert s1rollback s1fetchall s1c
+permutation s2snitch s1b s1declare s2detach s1svpt s1insert s1rollback s1fetchall s1c
+permutation s2snitch s1b s2detach s1declare s1cancel(s2detach) s1fetchall s1insert s1c
+permutation s2snitch s1b s2detach s1declare s1fetchall s1insert s1c
+permutation s2snitch s1b s2detach s1declare s1cancel(s2detach) s1svpt s1insert s1rollback s1fetchall s1c
+permutation s2snitch s1b s2detach s1declare s1svpt s1insert s1rollback s1fetchall s1c
+
+# Creating the referencing row using a cursor
+permutation s2snitch s1brr s1declare2 s1fetchone s2detach s1cancel(s2detach) s1updcur s1c
+permutation s2snitch s1brr s1declare2 s1fetchone s2detach s1updcur s1c
+permutation s2snitch s1brr s1declare2 s1fetchone s1updcur s2detach s1c
+
+# Try reading the table from an independent session.
+permutation s2snitch s1b s1s s2detach s3insert s1c
+permutation s2snitch s1b s1s s2detach s3brr s3insert s3commit s1cancel(s2detach) s1c
+permutation s2snitch s1b s1s s2detach s3brr s3insert s3commit s1c
+
+# Try one where we VACUUM FREEZE pg_inherits (to verify that xmin change is
+# handled correctly).
+permutation s2snitch s1brr s1s s2detach s1cancel(s2detach) s1noop s3vacfreeze s1s s1insert s1c
+permutation s2snitch s1b s1s s2detach s1cancel(s2detach) s1noop s3vacfreeze s1s s1insert s1c
diff --git a/src/test/isolation/specs/drop-index-concurrently-1.spec b/src/test/isolation/specs/drop-index-concurrently-1.spec
new file mode 100644
index 0000000..a57a024
--- /dev/null
+++ b/src/test/isolation/specs/drop-index-concurrently-1.spec
@@ -0,0 +1,43 @@
+# DROP INDEX CONCURRENTLY
+#
+# This test shows that the concurrent write behaviour works correctly
+# with the expected output being 2 rows at the READ COMMITTED and READ
+# UNCOMMITTED transaction isolation levels, and 1 row at the other
+# transaction isolation levels. We ensure this is the case by checking
+# the returned rows in an index scan plan and a seq scan plan.
+#
+setup
+{
+ CREATE TABLE test_dc(id serial primary key, data int);
+ INSERT INTO test_dc(data) SELECT * FROM generate_series(1, 100);
+ CREATE INDEX test_dc_data ON test_dc(data);
+}
+
+teardown
+{
+ DROP TABLE test_dc;
+}
+
+session s1
+step chkiso { SELECT (setting in ('read committed','read uncommitted')) AS is_read_committed FROM pg_settings WHERE name = 'default_transaction_isolation'; }
+step prepi { PREPARE getrow_idxscan AS SELECT * FROM test_dc WHERE data = 34 ORDER BY id,data; }
+step preps { PREPARE getrow_seqscan AS SELECT * FROM test_dc WHERE data = 34 ORDER BY id,data; }
+step begin { BEGIN; }
+step disableseq { SET enable_seqscan = false; }
+step explaini { EXPLAIN (COSTS OFF) EXECUTE getrow_idxscan; }
+step enableseq { SET enable_seqscan = true; }
+step explains { EXPLAIN (COSTS OFF) EXECUTE getrow_seqscan; }
+step selecti { EXECUTE getrow_idxscan; }
+step selects { EXECUTE getrow_seqscan; }
+step end { COMMIT; }
+
+session s2
+setup { BEGIN; }
+step select2 { SELECT * FROM test_dc WHERE data = 34 ORDER BY id,data; }
+step insert2 { INSERT INTO test_dc(data) SELECT * FROM generate_series(1, 100); }
+step end2 { COMMIT; }
+
+session s3
+step drop { DROP INDEX CONCURRENTLY test_dc_data; }
+
+permutation chkiso prepi preps begin disableseq explaini enableseq explains select2 drop insert2 end2 selecti selects end
diff --git a/src/test/isolation/specs/eval-plan-qual-trigger.spec b/src/test/isolation/specs/eval-plan-qual-trigger.spec
new file mode 100644
index 0000000..b512edd
--- /dev/null
+++ b/src/test/isolation/specs/eval-plan-qual-trigger.spec
@@ -0,0 +1,410 @@
+setup
+{
+ CREATE TABLE trigtest(key text primary key, data text);
+
+ CREATE FUNCTION noisy_oper(p_comment text, p_a anynonarray, p_op text, p_b anynonarray)
+ RETURNS bool LANGUAGE plpgsql AS $body$
+ DECLARE
+ r bool;
+ BEGIN
+ EXECUTE format('SELECT $1 %s $2', p_op) INTO r USING p_a, p_b;
+ RAISE NOTICE '%: % % % % %: %', p_comment, pg_typeof(p_a), p_a, p_op, pg_typeof(p_b), p_b, r;
+ RETURN r;
+ END;$body$;
+
+ CREATE FUNCTION trig_report() RETURNS TRIGGER LANGUAGE plpgsql AS $body$
+ DECLARE
+ r_new text;
+ r_old text;
+ r_ret record;
+ BEGIN
+ -- In older releases it wasn't allowed to reference OLD/NEW
+ -- when not applicable for TG_WHEN
+ IF TG_OP = 'INSERT' THEN
+ r_old = NULL;
+ r_new = NEW;
+ r_ret = NEW;
+ ELSIF TG_OP = 'DELETE' THEN
+ r_old = OLD;
+ r_new = NULL;
+ r_ret = OLD;
+ ELSIF TG_OP = 'UPDATE' THEN
+ r_old = OLD;
+ r_new = NEW;
+ r_ret = NEW;
+ END IF;
+
+ IF TG_WHEN = 'AFTER' THEN
+ r_ret = NULL;
+ END IF;
+
+ RAISE NOTICE 'trigger: name %; when: %; lev: %s; op: %; old: % new: %',
+ TG_NAME, TG_WHEN, TG_LEVEL, TG_OP, r_old, r_new;
+
+ RETURN r_ret;
+ END;
+ $body$;
+}
+
+teardown
+{
+ DROP TABLE trigtest;
+ DROP FUNCTION noisy_oper(text, anynonarray, text, anynonarray);
+ DROP FUNCTION trig_report();
+}
+
+
+session s0
+step s0_rep { SELECT * FROM trigtest ORDER BY key, data }
+
+session s1
+#setup { }
+step s1_b_rc { BEGIN ISOLATION LEVEL READ COMMITTED; SELECT 1; }
+step s1_b_rr { BEGIN ISOLATION LEVEL REPEATABLE READ; SELECT 1; }
+step s1_c { COMMIT; }
+step s1_r { ROLLBACK; }
+step s1_trig_rep_b_i { CREATE TRIGGER rep_b_i BEFORE INSERT ON trigtest FOR EACH ROW EXECUTE PROCEDURE trig_report(); }
+step s1_trig_rep_a_i { CREATE TRIGGER rep_a_i AFTER INSERT ON trigtest FOR EACH ROW EXECUTE PROCEDURE trig_report(); }
+step s1_trig_rep_b_u { CREATE TRIGGER rep_b_u BEFORE UPDATE ON trigtest FOR EACH ROW EXECUTE PROCEDURE trig_report(); }
+step s1_trig_rep_a_u { CREATE TRIGGER rep_a_u AFTER UPDATE ON trigtest FOR EACH ROW EXECUTE PROCEDURE trig_report(); }
+step s1_trig_rep_b_d { CREATE TRIGGER rep_b_d BEFORE DELETE ON trigtest FOR EACH ROW EXECUTE PROCEDURE trig_report(); }
+step s1_trig_rep_a_d { CREATE TRIGGER rep_a_d AFTER DELETE ON trigtest FOR EACH ROW EXECUTE PROCEDURE trig_report(); }
+step s1_ins_a { INSERT INTO trigtest VALUES ('key-a', 'val-a-s1') RETURNING *; }
+step s1_ins_b { INSERT INTO trigtest VALUES ('key-b', 'val-b-s1') RETURNING *; }
+step s1_ins_c { INSERT INTO trigtest VALUES ('key-c', 'val-c-s1') RETURNING *; }
+step s1_del_a {
+ DELETE FROM trigtest
+ WHERE
+ noisy_oper('upd', key, '=', 'key-a') AND
+ noisy_oper('upk', data, '<>', 'mismatch')
+ RETURNING *
+}
+step s1_del_b {
+ DELETE FROM trigtest
+ WHERE
+ noisy_oper('upd', key, '=', 'key-b') AND
+ noisy_oper('upk', data, '<>', 'mismatch')
+ RETURNING *
+}
+step s1_upd_a_data {
+ UPDATE trigtest SET data = data || '-ups1'
+ WHERE
+ noisy_oper('upd', key, '=', 'key-a') AND
+ noisy_oper('upk', data, '<>', 'mismatch')
+ RETURNING *;
+}
+step s1_upd_b_data {
+ UPDATE trigtest SET data = data || '-ups1'
+ WHERE
+ noisy_oper('upd', key, '=', 'key-b') AND
+ noisy_oper('upk', data, '<>', 'mismatch')
+ RETURNING *;
+}
+step s1_upd_a_tob {
+ UPDATE trigtest SET key = 'key-b', data = data || '-tobs1'
+ WHERE
+ noisy_oper('upk', key, '=', 'key-a') AND
+ noisy_oper('upk', data, '<>', 'mismatch')
+ RETURNING *;
+}
+
+session s2
+#setup { }
+step s2_b_rc { BEGIN ISOLATION LEVEL READ COMMITTED; SELECT 1; }
+step s2_b_rr { BEGIN ISOLATION LEVEL REPEATABLE READ; SELECT 1; }
+step s2_c { COMMIT; }
+step s2_r { ROLLBACK; }
+step s2_ins_a { INSERT INTO trigtest VALUES ('key-a', 'val-a-s2') RETURNING *; }
+step s2_del_a {
+ DELETE FROM trigtest
+ WHERE
+ noisy_oper('upd', key, '=', 'key-a') AND
+ noisy_oper('upk', data, '<>', 'mismatch')
+ RETURNING *
+}
+step s2_upd_a_data {
+ UPDATE trigtest SET data = data || '-ups2'
+ WHERE
+ noisy_oper('upd', key, '=', 'key-a') AND
+ noisy_oper('upk', data, '<>', 'mismatch')
+ RETURNING *;
+}
+step s2_upd_b_data {
+ UPDATE trigtest SET data = data || '-ups2'
+ WHERE
+ noisy_oper('upd', key, '=', 'key-b') AND
+ noisy_oper('upk', data, '<>', 'mismatch')
+ RETURNING *;
+}
+step s2_upd_all_data {
+ UPDATE trigtest SET data = data || '-ups2'
+ WHERE
+ noisy_oper('upd', key, '<>', 'mismatch') AND
+ noisy_oper('upk', data, '<>', 'mismatch')
+ RETURNING *;
+}
+step s2_upsert_a_data {
+ INSERT INTO trigtest VALUES ('key-a', 'val-a-upss2')
+ ON CONFLICT (key)
+ DO UPDATE SET data = trigtest.data || '-upserts2'
+ WHERE
+ noisy_oper('upd', trigtest.key, '=', 'key-a') AND
+ noisy_oper('upk', trigtest.data, '<>', 'mismatch')
+ RETURNING *;
+}
+
+session s3
+#setup { }
+step s3_b_rc { BEGIN ISOLATION LEVEL READ COMMITTED; SELECT 1; }
+step s3_c { COMMIT; }
+step s3_r { ROLLBACK; }
+step s3_del_a {
+ DELETE FROM trigtest
+ WHERE
+ noisy_oper('upd', key, '=', 'key-a') AND
+ noisy_oper('upk', data, '<>', 'mismatch')
+ RETURNING *
+}
+step s3_upd_a_data {
+ UPDATE trigtest SET data = data || '-ups3'
+ WHERE
+ noisy_oper('upd', key, '=', 'key-a') AND
+ noisy_oper('upk', data, '<>', 'mismatch')
+ RETURNING *;
+}
+
+### base case verifying that triggers see performed modifications
+# s1 updates, s1 commits, s2 updates
+permutation s1_trig_rep_b_u s1_trig_rep_a_u
+ s1_ins_a s1_ins_b s1_b_rc s2_b_rc
+ s1_upd_a_data s1_c s2_upd_a_data s2_c
+ s0_rep
+# s1 updates, s1 rolls back, s2 updates
+permutation s1_trig_rep_b_u s1_trig_rep_a_u
+ s1_ins_a s1_ins_b s1_b_rc s2_b_rc
+ s1_upd_a_data s1_r s2_upd_a_data s2_c
+ s0_rep
+# s1 updates, s1 commits back, s2 deletes
+permutation s1_trig_rep_b_d s1_trig_rep_b_u s1_trig_rep_a_d s1_trig_rep_a_u
+ s1_ins_a s1_ins_b s1_b_rc s2_b_rc
+ s1_upd_a_data s1_c s2_del_a s2_c
+ s0_rep
+# s1 updates, s1 rolls back back, s2 deletes
+permutation s1_trig_rep_b_d s1_trig_rep_b_u s1_trig_rep_a_d s1_trig_rep_a_u
+ s1_ins_a s1_ins_b s1_b_rc s2_b_rc
+ s1_upd_a_data s1_r s2_del_a s2_c
+ s0_rep
+
+### Verify EPQ is performed if necessary, and skipped if transaction rolled back
+# s1 updates, s2 updates, s1 commits, EPQ
+permutation s1_trig_rep_b_u s1_trig_rep_a_u
+ s1_ins_a s1_ins_b s1_b_rc s2_b_rc
+ s1_upd_a_data s2_upd_a_data s1_c s2_c
+ s0_rep
+# s1 updates, s2 updates, s1 rolls back, no EPQ
+permutation s1_trig_rep_b_u s1_trig_rep_a_u
+ s1_ins_a s1_ins_b s1_b_rc s2_b_rc
+ s1_upd_a_data s2_upd_a_data s1_r s2_c
+ s0_rep
+# s1 updates, s2 deletes, s1 commits, EPQ
+permutation s1_trig_rep_b_d s1_trig_rep_b_u s1_trig_rep_a_d s1_trig_rep_a_u
+ s1_ins_a s1_ins_b s1_b_rc s2_b_rc
+ s1_upd_a_data s2_upd_a_data s1_c s2_c
+ s0_rep
+# s1 updates, s2 deletes, s1 rolls back, no EPQ
+permutation s1_trig_rep_b_d s1_trig_rep_b_u s1_trig_rep_a_d s1_trig_rep_a_u
+ s1_ins_a s1_ins_b s1_b_rc s2_b_rc
+ s1_upd_a_data s2_upd_a_data s1_r s2_c
+ s0_rep
+# s1 deletes, s2 updates, s1 commits, EPQ
+permutation s1_trig_rep_b_d s1_trig_rep_b_u s1_trig_rep_a_d s1_trig_rep_a_u
+ s1_ins_a s1_ins_b s1_b_rc s2_b_rc
+ s1_del_a s2_upd_a_data s1_c s2_c
+ s0_rep
+# s1 deletes, s2 updates, s1 rolls back, no EPQ
+permutation s1_trig_rep_b_d s1_trig_rep_b_u s1_trig_rep_a_d s1_trig_rep_a_u
+ s1_ins_a s1_ins_b s1_b_rc s2_b_rc
+ s1_del_a s2_upd_a_data s1_r s2_c
+ s0_rep
+# s1 inserts, s2 inserts, s1 commits, s2 inserts, unique conflict
+permutation s1_trig_rep_b_i s1_trig_rep_b_d s1_trig_rep_a_i s1_trig_rep_a_d
+ s1_b_rc s2_b_rc
+ s1_ins_a s2_ins_a s1_c s2_c
+ s0_rep
+# s1 inserts, s2 inserts, s1 rolls back, s2 inserts, no unique conflict
+permutation s1_trig_rep_b_i s1_trig_rep_b_d s1_trig_rep_a_i s1_trig_rep_a_d
+ s1_b_rc s2_b_rc
+ s1_ins_a s2_ins_a s1_r s2_c
+ s0_rep
+# s1 updates, s2 upserts, s1 commits, EPQ
+permutation s1_trig_rep_b_i s1_trig_rep_b_d s1_trig_rep_b_u s1_trig_rep_a_i s1_trig_rep_a_d s1_trig_rep_a_u
+ s1_ins_a s1_ins_b s1_b_rc s2_b_rc
+ s1_upd_a_data s2_upsert_a_data s1_c s2_c
+ s0_rep
+# s1 updates, s2 upserts, s1 rolls back, no EPQ
+permutation s1_trig_rep_b_i s1_trig_rep_b_d s1_trig_rep_b_u s1_trig_rep_a_i s1_trig_rep_a_d s1_trig_rep_a_u
+ s1_ins_a s1_ins_b s1_b_rc s2_b_rc
+ s1_upd_a_data s2_upsert_a_data s1_c s2_c
+ s0_rep
+# s1 inserts, s2 upserts, s1 commits
+permutation s1_trig_rep_b_i s1_trig_rep_b_d s1_trig_rep_b_u s1_trig_rep_a_i s1_trig_rep_a_d s1_trig_rep_a_u
+ s1_b_rc s2_b_rc
+ s1_ins_a s2_upsert_a_data s1_c s2_c
+ s0_rep
+# s1 inserts, s2 upserts, s1 rolls back
+permutation s1_trig_rep_b_i s1_trig_rep_b_d s1_trig_rep_b_u s1_trig_rep_a_i s1_trig_rep_a_d s1_trig_rep_a_u
+ s1_b_rc s2_b_rc
+ s1_ins_a s2_upsert_a_data s1_r s2_c
+ s0_rep
+# s1 inserts, s2 upserts, s1 updates, s1 commits, EPQ
+permutation s1_trig_rep_b_i s1_trig_rep_b_d s1_trig_rep_b_u s1_trig_rep_a_i s1_trig_rep_a_d s1_trig_rep_a_u
+ s1_b_rc s2_b_rc
+ s1_ins_a s1_upd_a_data s2_upsert_a_data s1_c s2_c
+ s0_rep
+# s1 inserts, s2 upserts, s1 updates, s1 rolls back, no EPQ
+permutation s1_trig_rep_b_i s1_trig_rep_b_d s1_trig_rep_b_u s1_trig_rep_a_i s1_trig_rep_a_d s1_trig_rep_a_u
+ s1_b_rc s2_b_rc
+ s1_ins_a s1_upd_a_data s2_upsert_a_data s1_r s2_c
+ s0_rep
+
+### Verify EPQ is performed if necessary, and skipped if transaction rolled back,
+### just without before triggers (for comparison, no additional row locks)
+# s1 updates, s2 updates, s1 commits, EPQ
+permutation s1_trig_rep_a_u
+ s1_ins_a s1_ins_b s1_b_rc s2_b_rc
+ s1_upd_a_data s2_upd_a_data s1_c s2_c
+ s0_rep
+# s1 updates, s2 updates, s1 rolls back, no EPQ
+permutation s1_trig_rep_a_u
+ s1_ins_a s1_ins_b s1_b_rc s2_b_rc
+ s1_upd_a_data s2_upd_a_data s1_r s2_c
+ s0_rep
+# s1 updates, s2 deletes, s1 commits, EPQ
+permutation s1_trig_rep_a_d s1_trig_rep_a_u
+ s1_ins_a s1_ins_b s1_b_rc s2_b_rc
+ s1_upd_a_data s2_del_a s1_c s2_c
+ s0_rep
+# s1 updates, s2 deletes, s1 rolls back, no EPQ
+permutation s1_trig_rep_a_d s1_trig_rep_a_u
+ s1_ins_a s1_ins_b s1_b_rc s2_b_rc
+ s1_upd_a_data s2_del_a s1_r s2_c
+ s0_rep
+# s1 deletes, s2 updates, s1 commits, EPQ
+permutation s1_trig_rep_a_d s1_trig_rep_a_u
+ s1_ins_a s1_ins_b s1_b_rc s2_b_rc
+ s1_del_a s2_upd_a_data s1_c s2_c
+ s0_rep
+# s1 deletes, s2 updates, s1 rolls back, no EPQ
+permutation s1_trig_rep_a_d s1_trig_rep_a_u
+ s1_ins_a s1_ins_b s1_b_rc s2_b_rc
+ s1_del_a s2_upd_a_data s1_r s2_c
+ s0_rep
+# s1 deletes, s2 deletes, s1 commits, EPQ
+permutation s1_trig_rep_a_d
+ s1_ins_a s1_ins_b s1_b_rc s2_b_rc
+ s1_del_a s2_del_a s1_c s2_c
+ s0_rep
+# s1 deletes, s2 deletes, s1 rolls back, no EPQ
+permutation s1_trig_rep_a_d
+ s1_ins_a s1_ins_b s1_b_rc s2_b_rc
+ s1_del_a s2_del_a s1_r s2_c
+ s0_rep
+
+### Verify that an update affecting a row that has been
+### updated/deleted to not match the where clause anymore works
+### correctly
+# s1 updates to different key, s2 updates old key, s1 commits, EPQ failure should lead to no update
+permutation s1_trig_rep_b_u s1_trig_rep_a_u
+ s1_ins_a s1_ins_c s1_b_rc s2_b_rc
+ s1_upd_a_tob s2_upd_a_data s1_c s2_c
+ s0_rep
+# s1 updates to different key, s2 updates old key, s1 rolls back, no EPQ failure
+permutation s1_trig_rep_b_u s1_trig_rep_a_u
+ s1_ins_a s1_ins_c s1_b_rc s2_b_rc
+ s1_upd_a_tob s2_upd_a_data s1_r s2_c
+ s0_rep
+# s1 updates to different key, s2 updates new key, s1 commits, s2 will
+# not see tuple with new key and not block
+permutation s1_trig_rep_b_u s1_trig_rep_a_u
+ s1_ins_a s1_ins_c s1_b_rc s2_b_rc
+ s1_upd_a_tob s2_upd_b_data s1_c s2_c
+ s0_rep
+# s1 updates to different key, s2 updates all keys, s1 commits, s2,
+# will not see tuple with old key, but block on old, and then follow
+# the chain
+permutation s1_trig_rep_b_u s1_trig_rep_a_u
+ s1_ins_a s1_ins_c s1_b_rc s2_b_rc
+ s1_upd_a_tob s2_upd_all_data s1_c s2_c
+ s0_rep
+# s1 deletes, s2 updates, s1 committs, EPQ failure should lead to no update
+permutation s1_trig_rep_b_d s1_trig_rep_b_u s1_trig_rep_a_d s1_trig_rep_a_u
+ s1_ins_a s1_ins_c s1_b_rc s2_b_rc
+ s1_del_a s2_upd_a_data s1_c s2_c
+ s0_rep
+# s1 deletes, s2 updates, s1 rolls back, no EPQ failure
+permutation s1_trig_rep_b_d s1_trig_rep_b_u s1_trig_rep_a_d s1_trig_rep_a_u
+ s1_ins_a s1_ins_c s1_b_rc s2_b_rc
+ s1_del_a s2_upd_a_data s1_r s2_c
+ s0_rep
+# s1 deletes, s2 deletes, s1 committs, EPQ failure should lead to no delete
+permutation s1_trig_rep_b_d s1_trig_rep_a_d
+ s1_ins_a s1_ins_c s1_b_rc s2_b_rc
+ s1_del_a s2_del_a s1_c s2_c
+ s0_rep
+# s1 deletes, s2 deletes, s1 rolls back, no EPQ failure
+permutation s1_trig_rep_b_d s1_trig_rep_a_d
+ s1_ins_a s1_ins_c s1_b_rc s2_b_rc
+ s1_del_a s2_del_a s1_r s2_c
+ s0_rep
+
+### Verify EPQ with more than two participants works
+## XXX: Disable tests, there is some potential for instability here that's not yet fully understood
+## s1 updates, s2 updates, s3 updates, s1 commits, s2 EPQ, s2 commits, s3 EPQ
+#permutation s1_trig_rep_b_u s1_trig_rep_a_u
+# s1_ins_a s1_ins_b s1_b_rc s2_b_rc s3_b_rc
+# s1_upd_a_data s2_upd_a_data s3_upd_a_data s1_c s2_c s3_c
+# s0_rep
+## s1 updates, s2 updates, s3 updates, s1 commits, s2 EPQ, s2 rolls back, s3 EPQ
+#permutation s1_trig_rep_b_u s1_trig_rep_a_u
+# s1_ins_a s1_ins_b s1_b_rc s2_b_rc s3_b_rc
+# s1_upd_a_data s2_upd_a_data s3_upd_a_data s1_c s2_r s3_c
+# s0_rep
+## s1 updates, s3 updates, s2 upserts, s1 updates, s1 commits, s3 EPQ, s3 deletes, s3 commits, s2 inserts without EPQ recheck
+#permutation s1_trig_rep_b_i s1_trig_rep_b_d s1_trig_rep_b_u s1_trig_rep_a_i s1_trig_rep_a_d s1_trig_rep_a_u
+# s1_ins_a s1_b_rc s2_b_rc s3_b_rc
+# s1_upd_a_data s3_upd_a_data s2_upsert_a_data s1_upd_a_data s1_c s3_del_a s3_c s2_c
+# s0_rep
+## s1 updates, s3 updates, s2 upserts, s1 updates, s1 commits, s3 EPQ, s3 deletes, s3 rolls back, s2 EPQ
+#permutation s1_trig_rep_b_i s1_trig_rep_b_d s1_trig_rep_b_u s1_trig_rep_a_i s1_trig_rep_a_d s1_trig_rep_a_u
+# s1_ins_a s1_b_rc s2_b_rc s3_b_rc
+# s1_upd_a_data s3_upd_a_data s2_upsert_a_data s1_upd_a_data s1_c s3_del_a s3_r s2_c
+# s0_rep
+
+### Document that EPQ doesn't "leap" onto a tuple that would match after blocking
+# s1 inserts a, s1 updates b, s2 updates b, s1 deletes b, s1 updates a to b, s1 commits, s2 EPQ finds tuple deleted
+permutation s1_trig_rep_b_i s1_trig_rep_b_d s1_trig_rep_b_u s1_trig_rep_a_i s1_trig_rep_a_d s1_trig_rep_a_u
+ s1_ins_b s1_b_rc s2_b_rc
+ s1_ins_a s1_upd_b_data s2_upd_b_data s1_del_b s1_upd_a_tob s1_c s2_c
+ s0_rep
+
+### Triggers for EPQ detect serialization failures
+# s1 updates, s2 updates, s1 commits, serialization failure
+permutation s1_trig_rep_b_u s1_trig_rep_a_u
+ s1_ins_a s1_ins_b s1_b_rr s2_b_rr
+ s1_upd_a_data s2_upd_a_data s1_c s2_c
+ s0_rep
+# s1 updates, s2 updates, s1 rolls back, s2 succeeds
+permutation s1_trig_rep_b_u s1_trig_rep_a_u
+ s1_ins_a s1_ins_b s1_b_rr s2_b_rr
+ s1_upd_a_data s2_upd_a_data s1_r s2_c
+ s0_rep
+# s1 deletes, s2 updates, s1 commits, serialization failure
+permutation s1_trig_rep_b_d s1_trig_rep_b_u s1_trig_rep_a_d s1_trig_rep_a_u
+ s1_ins_a s1_ins_b s1_b_rr s2_b_rr
+ s1_del_a s2_upd_a_data s1_c s2_c
+ s0_rep
+# s1 deletes, s2 updates, s1 rolls back, s2 succeeds
+permutation s1_trig_rep_b_d s1_trig_rep_b_u s1_trig_rep_a_d s1_trig_rep_a_u
+ s1_ins_a s1_ins_b s1_b_rr s2_b_rr
+ s1_del_a s2_upd_a_data s1_r s2_c
+ s0_rep
diff --git a/src/test/isolation/specs/eval-plan-qual.spec b/src/test/isolation/specs/eval-plan-qual.spec
new file mode 100644
index 0000000..735c671
--- /dev/null
+++ b/src/test/isolation/specs/eval-plan-qual.spec
@@ -0,0 +1,378 @@
+# Tests for the EvalPlanQual mechanism
+#
+# EvalPlanQual is used in READ COMMITTED isolation level to attempt to
+# re-execute UPDATE and DELETE operations against rows that were updated
+# by some concurrent transaction.
+
+setup
+{
+ CREATE TABLE accounts (accountid text PRIMARY KEY, balance numeric not null,
+ balance2 numeric GENERATED ALWAYS AS (balance * 2) STORED);
+ INSERT INTO accounts VALUES ('checking', 600), ('savings', 600);
+
+ CREATE FUNCTION update_checking(int) RETURNS bool LANGUAGE sql AS $$
+ UPDATE accounts SET balance = balance + 1 WHERE accountid = 'checking'; SELECT true;$$;
+
+ CREATE TABLE accounts_ext (accountid text PRIMARY KEY, balance numeric not null, other text);
+ INSERT INTO accounts_ext VALUES ('checking', 600, 'other'), ('savings', 700, null);
+ ALTER TABLE accounts_ext ADD COLUMN newcol int DEFAULT 42;
+ ALTER TABLE accounts_ext ADD COLUMN newcol2 text DEFAULT NULL;
+
+ CREATE TABLE p (a int, b int, c int);
+ CREATE TABLE c1 () INHERITS (p);
+ CREATE TABLE c2 () INHERITS (p);
+ CREATE TABLE c3 () INHERITS (p);
+ INSERT INTO c1 SELECT 0, a / 3, a % 3 FROM generate_series(0, 9) a;
+ INSERT INTO c2 SELECT 1, a / 3, a % 3 FROM generate_series(0, 9) a;
+ INSERT INTO c3 SELECT 2, a / 3, a % 3 FROM generate_series(0, 9) a;
+
+ CREATE TABLE table_a (id integer, value text);
+ CREATE TABLE table_b (id integer, value text);
+ INSERT INTO table_a VALUES (1, 'tableAValue');
+ INSERT INTO table_b VALUES (1, 'tableBValue');
+
+ CREATE TABLE jointest AS SELECT generate_series(1,10) AS id, 0 AS data;
+ CREATE INDEX ON jointest(id);
+
+ CREATE TABLE parttbl (a int, b int, c int,
+ d int GENERATED ALWAYS AS (a + b) STORED) PARTITION BY LIST (a);
+ CREATE TABLE parttbl1 PARTITION OF parttbl FOR VALUES IN (1);
+ CREATE TABLE parttbl2 PARTITION OF parttbl
+ (d WITH OPTIONS GENERATED ALWAYS AS (a + b + 1000) STORED)
+ FOR VALUES IN (2);
+ INSERT INTO parttbl VALUES (1, 1, 1), (2, 2, 2);
+
+ CREATE TABLE another_parttbl (a int, b int, c int) PARTITION BY LIST (a);
+ CREATE TABLE another_parttbl1 PARTITION OF another_parttbl FOR VALUES IN (1);
+ CREATE TABLE another_parttbl2 PARTITION OF another_parttbl FOR VALUES IN (2);
+ INSERT INTO another_parttbl VALUES (1, 1, 1);
+
+ CREATE FUNCTION noisy_oper(p_comment text, p_a anynonarray, p_op text, p_b anynonarray)
+ RETURNS bool LANGUAGE plpgsql AS $$
+ DECLARE
+ r bool;
+ BEGIN
+ EXECUTE format('SELECT $1 %s $2', p_op) INTO r USING p_a, p_b;
+ RAISE NOTICE '%: % % % % %: %', p_comment, pg_typeof(p_a), p_a, p_op, pg_typeof(p_b), p_b, r;
+ RETURN r;
+ END;$$;
+}
+
+teardown
+{
+ DROP TABLE accounts;
+ DROP FUNCTION update_checking(int);
+ DROP TABLE accounts_ext;
+ DROP TABLE p CASCADE;
+ DROP TABLE table_a, table_b, jointest;
+ DROP TABLE parttbl;
+ DROP TABLE another_parttbl;
+ DROP FUNCTION noisy_oper(text, anynonarray, text, anynonarray)
+}
+
+session s1
+setup { BEGIN ISOLATION LEVEL READ COMMITTED; }
+# wx1 then wx2 checks the basic case of re-fetching up-to-date values
+step wx1 { UPDATE accounts SET balance = balance - 200 WHERE accountid = 'checking' RETURNING balance; }
+# wy1 then wy2 checks the case where quals pass then fail
+step wy1 { UPDATE accounts SET balance = balance + 500 WHERE accountid = 'checking' RETURNING balance; }
+
+step wxext1 { UPDATE accounts_ext SET balance = balance - 200 WHERE accountid = 'checking' RETURNING balance; }
+step tocds1 { UPDATE accounts SET accountid = 'cds' WHERE accountid = 'checking'; }
+step tocdsext1 { UPDATE accounts_ext SET accountid = 'cds' WHERE accountid = 'checking'; }
+
+# d1 then wx1 checks that update can deal with the updated row vanishing
+# wx2 then d1 checks that the delete affects the updated row
+# wx2, wx2 then d1 checks that the delete checks the quals correctly (balance too high)
+# wx2, d2, then d1 checks that delete handles a vanishing row correctly
+step d1 { DELETE FROM accounts WHERE accountid = 'checking' AND balance < 1500 RETURNING balance; }
+
+# upsert tests are to check writable-CTE cases
+step upsert1 {
+ WITH upsert AS
+ (UPDATE accounts SET balance = balance + 500
+ WHERE accountid = 'savings'
+ RETURNING accountid)
+ INSERT INTO accounts SELECT 'savings', 500
+ WHERE NOT EXISTS (SELECT 1 FROM upsert);
+}
+
+# tests with table p check inheritance cases:
+# readp1/writep1/readp2 tests a bug where nodeLockRows did the wrong thing
+# when the first updated tuple was in a non-first child table.
+# writep2/returningp1 tests a memory allocation issue
+# writep3a/writep3b tests updates touching more than one table
+# writep4a/writep4b tests a case where matches in another table confused EPQ
+# writep4a/deletep4 tests the same case in the DELETE path
+
+step readp { SELECT tableoid::regclass, ctid, * FROM p; }
+step readp1 { SELECT tableoid::regclass, ctid, * FROM p WHERE b IN (0, 1) AND c = 0 FOR UPDATE; }
+step writep1 { UPDATE p SET b = -1 WHERE a = 1 AND b = 1 AND c = 0; }
+step writep2 { UPDATE p SET b = -b WHERE a = 1 AND c = 0; }
+step writep3a { UPDATE p SET b = -b WHERE c = 0; }
+step writep4a { UPDATE p SET c = 4 WHERE c = 0; }
+step c1 { COMMIT; }
+step r1 { ROLLBACK; }
+
+# these tests are meant to exercise EvalPlanQualFetchRowMark,
+# ie, handling non-locked tables in an EvalPlanQual recheck
+
+step partiallock {
+ SELECT * FROM accounts a1, accounts a2
+ WHERE a1.accountid = a2.accountid
+ FOR UPDATE OF a1;
+}
+step lockwithvalues {
+ -- Reference rowmark column that differs in type from targetlist at some attno.
+ -- See CAHU7rYZo_C4ULsAx_LAj8az9zqgrD8WDd4hTegDTMM1LMqrBsg@mail.gmail.com
+ SELECT a1.*, v.id FROM accounts a1, (values('checking'::text, 'nan'::text),('savings', 'nan')) v(id, notnumeric)
+ WHERE a1.accountid = v.id AND v.notnumeric != 'einszwei'
+ FOR UPDATE OF a1;
+}
+step partiallock_ext {
+ SELECT * FROM accounts_ext a1, accounts_ext a2
+ WHERE a1.accountid = a2.accountid
+ FOR UPDATE OF a1;
+}
+
+# these tests exercise EvalPlanQual with a SubLink sub-select (which should be
+# unaffected by any EPQ recheck behavior in the outer query); cf bug #14034
+
+step updateforss {
+ UPDATE table_a SET value = 'newTableAValue' WHERE id = 1;
+ UPDATE table_b SET value = 'newTableBValue' WHERE id = 1;
+}
+
+# these tests exercise EvalPlanQual with conditional InitPlans which
+# have not been executed prior to the EPQ
+
+step updateforcip {
+ UPDATE table_a SET value = NULL WHERE id = 1;
+}
+
+# these tests exercise mark/restore during EPQ recheck, cf bug #15032
+
+step selectjoinforupdate {
+ set local enable_nestloop to 0;
+ set local enable_hashjoin to 0;
+ set local enable_seqscan to 0;
+ explain (costs off)
+ select * from jointest a join jointest b on a.id=b.id for update;
+ select * from jointest a join jointest b on a.id=b.id for update;
+}
+
+# these tests exercise Result plan nodes participating in EPQ
+
+step selectresultforupdate {
+ select * from (select 1 as x) ss1 join (select 7 as y) ss2 on true
+ left join table_a a on a.id = x, jointest jt
+ where jt.id = y;
+ explain (verbose, costs off)
+ select * from (select 1 as x) ss1 join (select 7 as y) ss2 on true
+ left join table_a a on a.id = x, jointest jt
+ where jt.id = y for update of jt, ss1, ss2;
+ select * from (select 1 as x) ss1 join (select 7 as y) ss2 on true
+ left join table_a a on a.id = x, jointest jt
+ where jt.id = y for update of jt, ss1, ss2;
+}
+
+# test for EPQ on a partitioned result table
+
+step simplepartupdate {
+ update parttbl set b = b + 10;
+}
+
+# test scenarios where update may cause row movement
+
+step simplepartupdate_route1to2 {
+ update parttbl set a = 2 where c = 1 returning *;
+}
+
+step simplepartupdate_noroute {
+ update parttbl set b = 2 where c = 1 returning *;
+}
+
+
+session s2
+setup { BEGIN ISOLATION LEVEL READ COMMITTED; }
+step wx2 { UPDATE accounts SET balance = balance + 450 WHERE accountid = 'checking' RETURNING balance; }
+step wy2 { UPDATE accounts SET balance = balance + 1000 WHERE accountid = 'checking' AND balance < 1000 RETURNING balance; }
+step d2 { DELETE FROM accounts WHERE accountid = 'checking'; }
+
+step upsert2 {
+ WITH upsert AS
+ (UPDATE accounts SET balance = balance + 1234
+ WHERE accountid = 'savings'
+ RETURNING accountid)
+ INSERT INTO accounts SELECT 'savings', 1234
+ WHERE NOT EXISTS (SELECT 1 FROM upsert);
+}
+step wx2_ext { UPDATE accounts_ext SET balance = balance + 450; }
+step readp2 { SELECT tableoid::regclass, ctid, * FROM p WHERE b IN (0, 1) AND c = 0 FOR UPDATE; }
+step returningp1 {
+ WITH u AS ( UPDATE p SET b = b WHERE a > 0 RETURNING * )
+ SELECT * FROM u;
+}
+step writep3b { UPDATE p SET b = -b WHERE c = 0; }
+step writep4b { UPDATE p SET b = -4 WHERE c = 0; }
+step deletep4 { DELETE FROM p WHERE c = 0; }
+step readforss {
+ SELECT ta.id AS ta_id, ta.value AS ta_value,
+ (SELECT ROW(tb.id, tb.value)
+ FROM table_b tb WHERE ta.id = tb.id) AS tb_row
+ FROM table_a ta
+ WHERE ta.id = 1 FOR UPDATE OF ta;
+}
+step updateforcip2 {
+ UPDATE table_a SET value = COALESCE(value, (SELECT text 'newValue')) WHERE id = 1;
+}
+step updateforcip3 {
+ WITH d(val) AS (SELECT text 'newValue' FROM generate_series(1,1))
+ UPDATE table_a SET value = COALESCE(value, (SELECT val FROM d)) WHERE id = 1;
+}
+step wrtwcte { UPDATE table_a SET value = 'tableAValue2' WHERE id = 1; }
+step wrjt { UPDATE jointest SET data = 42 WHERE id = 7; }
+
+step conditionalpartupdate {
+ update parttbl set c = -c where b < 10;
+}
+
+step complexpartupdate {
+ with u as (update parttbl set b = b + 1 returning parttbl.*)
+ update parttbl p set b = u.b + 100 from u where p.a = u.a;
+}
+
+step complexpartupdate_route_err1 {
+ with u as (update another_parttbl set a = 1 returning another_parttbl.*)
+ update parttbl p set a = u.a from u where p.a = u.a and p.c = 1 returning p.*;
+}
+
+step complexpartupdate_route {
+ with u as (update another_parttbl set a = 1 returning another_parttbl.*)
+ update parttbl p set a = p.b from u where p.a = u.a and p.c = 1 returning p.*;
+}
+
+step complexpartupdate_doesnt_route {
+ with u as (update another_parttbl set a = 1 returning another_parttbl.*)
+ update parttbl p set a = 3 - p.b from u where p.a = u.a and p.c = 1 returning p.*;
+}
+
+# Use writable CTEs to create self-updated rows, that then are
+# (updated|deleted). The *fail versions of the tests additionally
+# perform an update, via a function, in a different command, to test
+# behaviour relating to that.
+step updwcte { WITH doup AS (UPDATE accounts SET balance = balance + 1100 WHERE accountid = 'checking' RETURNING *) UPDATE accounts a SET balance = doup.balance + 100 FROM doup RETURNING *; }
+step updwctefail { WITH doup AS (UPDATE accounts SET balance = balance + 1100 WHERE accountid = 'checking' RETURNING *, update_checking(999)) UPDATE accounts a SET balance = doup.balance + 100 FROM doup RETURNING *; }
+step delwcte { WITH doup AS (UPDATE accounts SET balance = balance + 1100 WHERE accountid = 'checking' RETURNING *) DELETE FROM accounts a USING doup RETURNING *; }
+step delwctefail { WITH doup AS (UPDATE accounts SET balance = balance + 1100 WHERE accountid = 'checking' RETURNING *, update_checking(999)) DELETE FROM accounts a USING doup RETURNING *; }
+
+# Check that nested EPQ works correctly
+step wnested2 {
+ UPDATE accounts SET balance = balance - 1200
+ WHERE noisy_oper('upid', accountid, '=', 'checking')
+ AND noisy_oper('up', balance, '>', 200.0)
+ AND EXISTS (
+ SELECT accountid
+ FROM accounts_ext ae
+ WHERE noisy_oper('lock_id', ae.accountid, '=', accounts.accountid)
+ AND noisy_oper('lock_bal', ae.balance, '>', 200.0)
+ FOR UPDATE
+ );
+}
+
+step c2 { COMMIT; }
+step r2 { ROLLBACK; }
+
+session s3
+setup { BEGIN ISOLATION LEVEL READ COMMITTED; }
+step read { SELECT * FROM accounts ORDER BY accountid; }
+step read_ext { SELECT * FROM accounts_ext ORDER BY accountid; }
+step read_a { SELECT * FROM table_a ORDER BY id; }
+step read_part { SELECT * FROM parttbl ORDER BY a, c; }
+
+# this test exercises EvalPlanQual with a CTE, cf bug #14328
+step readwcte {
+ WITH
+ cte1 AS (
+ SELECT id FROM table_b WHERE value = 'tableBValue'
+ ),
+ cte2 AS (
+ SELECT * FROM table_a
+ WHERE id = (SELECT id FROM cte1)
+ FOR UPDATE
+ )
+ SELECT * FROM cte2;
+}
+
+# this test exercises a different CTE misbehavior, cf bug #14870
+step multireadwcte {
+ WITH updated AS (
+ UPDATE table_a SET value = 'tableAValue3' WHERE id = 1 RETURNING id
+ )
+ SELECT (SELECT id FROM updated) AS subid, * FROM updated;
+}
+
+teardown { COMMIT; }
+
+# test that normal update follows update chains, and reverifies quals
+permutation wx1 wx2 c1 c2 read
+permutation wy1 wy2 c1 c2 read
+permutation wx1 wx2 r1 c2 read
+permutation wy1 wy2 r1 c2 read
+
+# test that deletes follow chains, and if necessary reverifies quals
+permutation wx1 d1 wx2 c1 c2 read
+permutation wx2 d1 c2 c1 read
+permutation wx2 wx2 d1 c2 c1 read
+permutation wx2 d2 d1 c2 c1 read
+permutation wx1 d1 wx2 r1 c2 read
+permutation wx2 d1 r2 c1 read
+permutation wx2 wx2 d1 r2 c1 read
+permutation wx2 d2 d1 r2 c1 read
+permutation d1 wx2 c1 c2 read
+permutation d1 wx2 r1 c2 read
+
+# Check that nested EPQ works correctly
+permutation wnested2 c1 c2 read
+permutation wx1 wxext1 wnested2 c1 c2 read
+permutation wx1 wx1 wxext1 wnested2 c1 c2 read
+permutation wx1 wx1 wxext1 wxext1 wnested2 c1 c2 read
+permutation wx1 wxext1 wxext1 wnested2 c1 c2 read
+permutation wx1 tocds1 wnested2 c1 c2 read
+permutation wx1 tocdsext1 wnested2 c1 c2 read
+
+# test that an update to a self-modified row is ignored when
+# previously updated by the same cid
+permutation wx1 updwcte c1 c2 read
+# test that an update to a self-modified row throws error when
+# previously updated by a different cid
+permutation wx1 updwctefail c1 c2 read
+# test that a delete to a self-modified row is ignored when
+# previously updated by the same cid
+permutation wx1 delwcte c1 c2 read
+# test that a delete to a self-modified row throws error when
+# previously updated by a different cid
+permutation wx1 delwctefail c1 c2 read
+
+permutation upsert1 upsert2 c1 c2 read
+permutation readp1 writep1 readp2 c1 c2
+permutation writep2 returningp1 c1 c2
+permutation writep3a writep3b c1 c2
+permutation writep4a writep4b c1 c2 readp
+permutation writep4a deletep4 c1 c2 readp
+permutation wx2 partiallock c2 c1 read
+permutation wx2 lockwithvalues c2 c1 read
+permutation wx2_ext partiallock_ext c2 c1 read_ext
+permutation updateforss readforss c1 c2
+permutation updateforcip updateforcip2 c1 c2 read_a
+permutation updateforcip updateforcip3 c1 c2 read_a
+permutation wrtwcte readwcte c1 c2
+permutation wrjt selectjoinforupdate c2 c1
+permutation wrjt selectresultforupdate c2 c1
+permutation wrtwcte multireadwcte c1 c2
+
+permutation simplepartupdate conditionalpartupdate c1 c2 read_part
+permutation simplepartupdate complexpartupdate c1 c2 read_part
+permutation simplepartupdate_route1to2 complexpartupdate_route_err1 c1 c2 read_part
+permutation simplepartupdate_noroute complexpartupdate_route c1 c2 read_part
+permutation simplepartupdate_noroute complexpartupdate_doesnt_route c1 c2 read_part
diff --git a/src/test/isolation/specs/fk-contention.spec b/src/test/isolation/specs/fk-contention.spec
new file mode 100644
index 0000000..f11a1d8
--- /dev/null
+++ b/src/test/isolation/specs/fk-contention.spec
@@ -0,0 +1,19 @@
+setup
+{
+ CREATE TABLE foo (a int PRIMARY KEY, b text);
+ CREATE TABLE bar (a int NOT NULL REFERENCES foo);
+ INSERT INTO foo VALUES (42);
+}
+
+teardown
+{
+ DROP TABLE foo, bar;
+}
+
+session s1
+setup { BEGIN; }
+step ins { INSERT INTO bar VALUES (42); }
+step com { COMMIT; }
+
+session s2
+step upd { UPDATE foo SET b = 'Hello World'; }
diff --git a/src/test/isolation/specs/fk-deadlock.spec b/src/test/isolation/specs/fk-deadlock.spec
new file mode 100644
index 0000000..b4970dd
--- /dev/null
+++ b/src/test/isolation/specs/fk-deadlock.spec
@@ -0,0 +1,46 @@
+setup
+{
+ CREATE TABLE parent (
+ parent_key int PRIMARY KEY,
+ aux text NOT NULL
+ );
+
+ CREATE TABLE child (
+ child_key int PRIMARY KEY,
+ parent_key int NOT NULL REFERENCES parent
+ );
+
+ INSERT INTO parent VALUES (1, 'foo');
+}
+
+teardown
+{
+ DROP TABLE parent, child;
+}
+
+session s1
+setup { BEGIN; SET deadlock_timeout = '100ms'; }
+step s1i { INSERT INTO child VALUES (1, 1); }
+step s1u { UPDATE parent SET aux = 'bar'; }
+step s1c { COMMIT; }
+
+session s2
+setup { BEGIN; SET deadlock_timeout = '10s'; }
+step s2i { INSERT INTO child VALUES (2, 1); }
+step s2u { UPDATE parent SET aux = 'baz'; }
+step s2c { COMMIT; }
+
+permutation s1i s1u s1c s2i s2u s2c
+permutation s1i s1u s2i s1c s2u s2c
+permutation s1i s1u s2i s2u s1c s2c
+permutation s1i s2i s1u s1c s2u s2c
+permutation s1i s2i s1u s2u s1c s2c
+permutation s1i s2i s2u s1u s2c s1c
+permutation s1i s2i s2u s2c s1u s1c
+permutation s2i s1i s1u s1c s2u s2c
+permutation s2i s1i s1u s2u s1c s2c
+permutation s2i s1i s2u s1u s2c s1c
+permutation s2i s1i s2u s2c s1u s1c
+permutation s2i s2u s1i s1u s2c s1c
+permutation s2i s2u s1i s2c s1u s1c
+permutation s2i s2u s2c s1i s1u s1c
diff --git a/src/test/isolation/specs/fk-deadlock2.spec b/src/test/isolation/specs/fk-deadlock2.spec
new file mode 100644
index 0000000..c8e0e4e
--- /dev/null
+++ b/src/test/isolation/specs/fk-deadlock2.spec
@@ -0,0 +1,48 @@
+setup
+{
+ CREATE TABLE A (
+ AID integer not null,
+ Col1 integer,
+ PRIMARY KEY (AID)
+ );
+
+ CREATE TABLE B (
+ BID integer not null,
+ AID integer not null,
+ Col2 integer,
+ PRIMARY KEY (BID),
+ FOREIGN KEY (AID) REFERENCES A(AID)
+ );
+
+ INSERT INTO A (AID) VALUES (1);
+ INSERT INTO B (BID,AID) VALUES (2,1);
+}
+
+teardown
+{
+ DROP TABLE a, b;
+}
+
+session s1
+setup { BEGIN; SET deadlock_timeout = '100ms'; }
+step s1u1 { UPDATE A SET Col1 = 1 WHERE AID = 1; }
+step s1u2 { UPDATE B SET Col2 = 1 WHERE BID = 2; }
+step s1c { COMMIT; }
+
+session s2
+setup { BEGIN; SET deadlock_timeout = '10s'; }
+step s2u1 { UPDATE B SET Col2 = 1 WHERE BID = 2; }
+step s2u2 { UPDATE B SET Col2 = 1 WHERE BID = 2; }
+step s2c { COMMIT; }
+
+permutation s1u1 s1u2 s1c s2u1 s2u2 s2c
+permutation s1u1 s1u2 s2u1 s1c s2u2 s2c
+permutation s1u1 s2u1 s1u2 s2u2 s2c s1c
+permutation s1u1 s2u1 s2u2 s1u2 s2c s1c
+permutation s1u1 s2u1 s2u2 s2c s1u2 s1c
+permutation s2u1 s1u1 s1u2 s2u2 s2c s1c
+permutation s2u1 s1u1 s2u2 s1u2 s2c s1c
+permutation s2u1 s1u1 s2u2 s2c s1u2 s1c
+permutation s2u1 s2u2 s1u1 s1u2 s2c s1c
+permutation s2u1 s2u2 s1u1 s2c s1u2 s1c
+permutation s2u1 s2u2 s2c s1u1 s1u2 s1c
diff --git a/src/test/isolation/specs/fk-partitioned-1.spec b/src/test/isolation/specs/fk-partitioned-1.spec
new file mode 100644
index 0000000..f71ee5c
--- /dev/null
+++ b/src/test/isolation/specs/fk-partitioned-1.spec
@@ -0,0 +1,45 @@
+# Verify that cloning a foreign key constraint to a partition ensures
+# that referenced values exist, even if they're being concurrently
+# deleted.
+setup {
+drop table if exists ppk, pfk, pfk1;
+ create table ppk (a int primary key) partition by list (a);
+ create table ppk1 partition of ppk for values in (1);
+ insert into ppk values (1);
+ create table pfk (a int references ppk) partition by list (a);
+ create table pfk1 (a int not null);
+ insert into pfk1 values (1);
+}
+
+session s1
+step s1b { begin; }
+step s1d { delete from ppk1 where a = 1; }
+step s1c { commit; }
+
+session s2
+step s2b { begin; }
+step s2a { alter table pfk attach partition pfk1 for values in (1); }
+step s2c { commit; }
+
+teardown { drop table ppk, pfk, pfk1; }
+
+permutation s1b s1d s1c s2b s2a s2c
+permutation s1b s1d s2b s1c s2a s2c
+permutation s1b s1d s2b s2a s1c s2c
+#permutation s1b s1d s2b s2a s2c s1c
+permutation s1b s2b s1d s1c s2a s2c
+permutation s1b s2b s1d s2a s1c s2c
+#permutation s1b s2b s1d s2a s2c s1c
+#permutation s1b s2b s2a s1d s1c s2c
+permutation s1b s2b s2a s1d s2c s1c
+permutation s1b s2b s2a s2c s1d s1c
+permutation s2b s1b s1d s1c s2a s2c
+permutation s2b s1b s1d s2a s1c s2c
+#permutation s2b s1b s1d s2a s2c s1c
+#permutation s2b s1b s2a s1d s1c s2c
+permutation s2b s1b s2a s1d s2c s1c
+permutation s2b s1b s2a s2c s1d s1c
+#permutation s2b s2a s1b s1d s1c s2c
+permutation s2b s2a s1b s1d s2c s1c
+permutation s2b s2a s1b s2c s1d s1c
+permutation s2b s2a s2c s1b s1d s1c
diff --git a/src/test/isolation/specs/fk-partitioned-2.spec b/src/test/isolation/specs/fk-partitioned-2.spec
new file mode 100644
index 0000000..209ad59
--- /dev/null
+++ b/src/test/isolation/specs/fk-partitioned-2.spec
@@ -0,0 +1,29 @@
+# Make sure that FKs referencing partitioned tables actually work.
+setup {
+ drop table if exists ppk, pfk, pfk1;
+ create table ppk (a int primary key) partition by list (a);
+ create table ppk1 partition of ppk for values in (1);
+ insert into ppk values (1);
+ create table pfk (a int references ppk) partition by list (a);
+ create table pfk1 partition of pfk for values in (1);
+}
+
+session s1
+step s1b { begin; }
+step s1d { delete from ppk where a = 1; }
+step s1c { commit; }
+
+session s2
+step s2b { begin; }
+step s2bs { begin isolation level serializable; select 1; }
+step s2i { insert into pfk values (1); }
+step s2c { commit; }
+
+teardown { drop table ppk, pfk, pfk1; }
+
+permutation s1b s1d s2b s2i s1c s2c
+permutation s1b s1d s2bs s2i s1c s2c
+permutation s1b s2b s1d s2i s1c s2c
+permutation s1b s2bs s1d s2i s1c s2c
+permutation s1b s2b s2i s1d s2c s1c
+permutation s1b s2bs s2i s1d s2c s1c
diff --git a/src/test/isolation/specs/fk-snapshot.spec b/src/test/isolation/specs/fk-snapshot.spec
new file mode 100644
index 0000000..9fad57e
--- /dev/null
+++ b/src/test/isolation/specs/fk-snapshot.spec
@@ -0,0 +1,76 @@
+setup
+{
+ CREATE TABLE pk_noparted (
+ a int PRIMARY KEY
+ );
+
+ CREATE TABLE fk_parted_pk (
+ a int PRIMARY KEY REFERENCES pk_noparted ON DELETE CASCADE
+ ) PARTITION BY LIST (a);
+ CREATE TABLE fk_parted_pk_1 PARTITION OF fk_parted_pk FOR VALUES IN (1);
+ CREATE TABLE fk_parted_pk_2 PARTITION OF fk_parted_pk FOR VALUES IN (2);
+
+ CREATE TABLE fk_noparted (
+ a int REFERENCES fk_parted_pk ON DELETE NO ACTION INITIALLY DEFERRED
+ );
+
+ CREATE TABLE fk_noparted_sn (
+ a int REFERENCES pk_noparted ON DELETE SET NULL
+ );
+
+ INSERT INTO pk_noparted VALUES (1);
+ INSERT INTO fk_parted_pk VALUES (1);
+ INSERT INTO fk_noparted VALUES (1);
+}
+
+teardown
+{
+ DROP TABLE pk_noparted, fk_parted_pk, fk_noparted, fk_noparted_sn;
+}
+
+session s1
+step s1brr { BEGIN ISOLATION LEVEL REPEATABLE READ; }
+step s1brc { BEGIN ISOLATION LEVEL READ COMMITTED; }
+step s1ifp2 { INSERT INTO fk_parted_pk VALUES (2); }
+step s1ifp1 { INSERT INTO fk_parted_pk VALUES (1); }
+step s1ifn2 { INSERT INTO fk_noparted_sn VALUES (2); }
+step s1dfp { DELETE FROM fk_parted_pk WHERE a = 1; }
+step s1c { COMMIT; }
+step s1sfp { SELECT * FROM fk_parted_pk; }
+step s1sp { SELECT * FROM pk_noparted; }
+step s1sfn { SELECT * FROM fk_noparted; }
+
+session s2
+step s2brr { BEGIN ISOLATION LEVEL REPEATABLE READ; }
+step s2brc { BEGIN ISOLATION LEVEL READ COMMITTED; }
+step s2ip2 { INSERT INTO pk_noparted VALUES (2); }
+step s2dp2 { DELETE FROM pk_noparted WHERE a = 2; }
+step s2ifn2 { INSERT INTO fk_noparted VALUES (2); }
+step s2c { COMMIT; }
+step s2sfp { SELECT * FROM fk_parted_pk; }
+step s2sfn { SELECT * FROM fk_noparted; }
+
+# inserting into referencing tables in transaction-snapshot mode
+# PK table is non-partitioned
+permutation s1brr s2brc s2ip2 s1sp s2c s1sp s1ifp2 s1c s1sfp
+# PK table is partitioned: buggy, because s2's serialization transaction can
+# see the uncommitted row thanks to the latest snapshot taken for
+# partition lookup to work correctly also ends up getting used by the PK index
+# scan
+permutation s2ip2 s2brr s1brc s1ifp2 s2sfp s1c s2sfp s2ifn2 s2c s2sfn
+
+# inserting into referencing tables in up-to-date snapshot mode
+permutation s1brc s2brc s2ip2 s1sp s2c s1sp s1ifp2 s2brc s2sfp s1c s1sfp s2ifn2 s2c s2sfn
+
+# deleting a referenced row and then inserting again in the same transaction; works
+# the same no matter the snapshot mode
+permutation s1brr s1dfp s1ifp1 s1c s1sfn
+permutation s1brc s1dfp s1ifp1 s1c s1sfn
+
+# trying to delete a row through DELETE CASCADE, whilst that row is deleted
+# in a concurrent transaction
+permutation s2ip2 s1brr s1ifp2 s2brr s2dp2 s1c s2c
+
+# trying to update a row through DELETE SET NULL, whilst that row is deleted
+# in a concurrent transaction
+permutation s2ip2 s1brr s1ifn2 s2brr s2dp2 s1c s2c
diff --git a/src/test/isolation/specs/freeze-the-dead.spec b/src/test/isolation/specs/freeze-the-dead.spec
new file mode 100644
index 0000000..6c34904
--- /dev/null
+++ b/src/test/isolation/specs/freeze-the-dead.spec
@@ -0,0 +1,56 @@
+# Test for interactions of tuple freezing with dead, as well as recently-dead
+# tuples using multixacts via FOR KEY SHARE.
+setup
+{
+ CREATE TABLE tab_freeze (
+ id int PRIMARY KEY,
+ name char(3),
+ x int);
+ INSERT INTO tab_freeze VALUES (1, '111', 0);
+ INSERT INTO tab_freeze VALUES (3, '333', 0);
+}
+
+teardown
+{
+ DROP TABLE tab_freeze;
+}
+
+session s1
+step s1_begin { BEGIN; }
+step s1_update { UPDATE tab_freeze SET x = x + 1 WHERE id = 3; }
+step s1_commit { COMMIT; }
+step s1_selectone {
+ BEGIN;
+ SET LOCAL enable_seqscan = false;
+ SET LOCAL enable_bitmapscan = false;
+ SELECT * FROM tab_freeze WHERE id = 3;
+ COMMIT;
+}
+step s1_selectall { SELECT * FROM tab_freeze ORDER BY name, id; }
+
+session s2
+step s2_begin { BEGIN; }
+step s2_key_share { SELECT id FROM tab_freeze WHERE id = 3 FOR KEY SHARE; }
+step s2_commit { COMMIT; }
+step s2_vacuum { VACUUM FREEZE tab_freeze; }
+
+session s3
+step s3_begin { BEGIN; }
+step s3_key_share { SELECT id FROM tab_freeze WHERE id = 3 FOR KEY SHARE; }
+step s3_commit { COMMIT; }
+
+# This permutation verifies that a previous bug
+# https://postgr.es/m/E5711E62-8FDF-4DCA-A888-C200BF6B5742@amazon.com
+# https://postgr.es/m/20171102112019.33wb7g5wp4zpjelu@alap3.anarazel.de
+# is not reintroduced. We used to make wrong pruning / freezing
+# decision for multixacts, which could lead to a) broken hot chains b)
+# dead rows being revived.
+permutation s1_begin s2_begin s3_begin # start transactions
+ s1_update s2_key_share s3_key_share # have xmax be a multi with an updater, updater being oldest xid
+ s1_update # create additional row version that has multis
+ s1_commit s2_commit # commit both updater and share locker
+ s2_vacuum # due to bug in freezing logic, we used to *not* prune updated row, and then froze it
+ s1_selectone # if hot chain is broken, the row can't be found via index scan
+ s3_commit # commit remaining open xact
+ s2_vacuum # pruning / freezing in broken hot chains would unset xmax, reviving rows
+ s1_selectall # show borkedness
diff --git a/src/test/isolation/specs/horizons.spec b/src/test/isolation/specs/horizons.spec
new file mode 100644
index 0000000..d5239ff
--- /dev/null
+++ b/src/test/isolation/specs/horizons.spec
@@ -0,0 +1,169 @@
+# Test that pruning and vacuuming pay attention to concurrent sessions
+# in the right way. For normal relations that means that rows cannot
+# be pruned away if there's an older snapshot, in contrast to that
+# temporary tables should nearly always be prunable.
+#
+# NB: Think hard before adding a test showing that rows in permanent
+# tables get pruned - it's quite likely that it'd be racy, e.g. due to
+# an autovacuum worker holding a snapshot.
+
+setup {
+ CREATE OR REPLACE FUNCTION explain_json(p_query text)
+ RETURNS json
+ LANGUAGE plpgsql AS $$
+ DECLARE
+ v_ret json;
+ BEGIN
+ EXECUTE p_query INTO STRICT v_ret;
+ RETURN v_ret;
+ END;$$;
+}
+
+teardown {
+ DROP FUNCTION explain_json(text);
+}
+
+session lifeline
+
+# Start a transaction, force a snapshot to be held
+step ll_start
+{
+ BEGIN TRANSACTION ISOLATION LEVEL REPEATABLE READ;
+ SELECT 1;
+}
+
+step ll_commit { COMMIT; }
+
+
+session pruner
+
+setup
+{
+ SET enable_seqscan = false;
+ SET enable_indexscan = false;
+ SET enable_bitmapscan = false;
+}
+
+step pruner_create_temp
+{
+ CREATE TEMPORARY TABLE horizons_tst (data int unique) WITH (autovacuum_enabled = off);
+ INSERT INTO horizons_tst(data) VALUES(1),(2);
+}
+
+step pruner_create_perm
+{
+ CREATE TABLE horizons_tst (data int unique) WITH (autovacuum_enabled = off);
+ INSERT INTO horizons_tst(data) VALUES(1),(2);
+}
+
+# Temp tables cannot be dropped in the teardown, so just always do so
+# as part of the permutation
+step pruner_drop
+{
+ DROP TABLE horizons_tst;
+}
+
+step pruner_delete
+{
+ DELETE FROM horizons_tst;
+}
+
+step pruner_begin { BEGIN; }
+step pruner_commit { COMMIT; }
+
+step pruner_vacuum
+{
+ VACUUM horizons_tst;
+}
+
+# Show the heap fetches of an ordered index-only-scan (other plans
+# have been forbidden above) - that tells us how many non-killed leaf
+# entries there are.
+step pruner_query
+{
+ SELECT explain_json($$
+ EXPLAIN (FORMAT json, BUFFERS, ANALYZE)
+ SELECT * FROM horizons_tst ORDER BY data;$$)->0->'Plan'->'Heap Fetches';
+}
+
+# Verify that the query plan still is an IOS
+step pruner_query_plan
+{
+ EXPLAIN (COSTS OFF) SELECT * FROM horizons_tst ORDER BY data;
+}
+
+
+# Show that with a permanent relation deleted rows cannot be pruned
+# away if there's a concurrent session still seeing the rows.
+permutation
+ pruner_create_perm
+ ll_start
+ pruner_query_plan
+ # Run query that could do pruning twice, first has chance to prune,
+ # second would not perform heap fetches if first query did.
+ pruner_query
+ pruner_query
+ pruner_delete
+ pruner_query
+ pruner_query
+ ll_commit
+ pruner_drop
+
+# Show that with a temporary relation deleted rows can be pruned away,
+# even if there's a concurrent session with a snapshot from before the
+# deletion. That's safe because the session with the older snapshot
+# cannot access the temporary table.
+permutation
+ pruner_create_temp
+ ll_start
+ pruner_query_plan
+ pruner_query
+ pruner_query
+ pruner_delete
+ pruner_query
+ pruner_query
+ ll_commit
+ pruner_drop
+
+# Verify that pruning in temporary relations doesn't remove rows still
+# visible in the current session
+permutation
+ pruner_create_temp
+ ll_start
+ pruner_query
+ pruner_query
+ pruner_begin
+ pruner_delete
+ pruner_query
+ pruner_query
+ ll_commit
+ pruner_commit
+ pruner_drop
+
+# Show that vacuum cannot remove deleted rows still visible to another
+# session's snapshot, when accessing a permanent table.
+permutation
+ pruner_create_perm
+ ll_start
+ pruner_query
+ pruner_query
+ pruner_delete
+ pruner_vacuum
+ pruner_query
+ pruner_query
+ ll_commit
+ pruner_drop
+
+# Show that vacuum can remove deleted rows still visible to another
+# session's snapshot, when accessing a temporary table.
+permutation
+ pruner_create_temp
+ ll_start
+ pruner_query
+ pruner_query
+ pruner_delete
+ pruner_vacuum
+ pruner_query
+ pruner_query
+ ll_commit
+ pruner_drop
diff --git a/src/test/isolation/specs/index-only-scan.spec b/src/test/isolation/specs/index-only-scan.spec
new file mode 100644
index 0000000..4e4171c
--- /dev/null
+++ b/src/test/isolation/specs/index-only-scan.spec
@@ -0,0 +1,46 @@
+# index-only scan test
+#
+# This test tries to expose problems with the interaction between index-only
+# scans and SSI.
+#
+# Any overlap between the transactions must cause a serialization failure.
+
+setup
+{
+ CREATE TABLE tabx (id int NOT NULL);
+ INSERT INTO tabx SELECT generate_series(1,10000);
+ ALTER TABLE tabx ADD PRIMARY KEY (id);
+ CREATE TABLE taby (id int NOT NULL);
+ INSERT INTO taby SELECT generate_series(1,10000);
+ ALTER TABLE taby ADD PRIMARY KEY (id);
+}
+setup { VACUUM FREEZE ANALYZE tabx; }
+setup { VACUUM FREEZE ANALYZE taby; }
+
+teardown
+{
+ DROP TABLE tabx;
+ DROP TABLE taby;
+}
+
+session s1
+setup
+{
+ BEGIN ISOLATION LEVEL SERIALIZABLE;
+ SET LOCAL seq_page_cost = 0.1;
+ SET LOCAL random_page_cost = 0.1;
+ SET LOCAL cpu_tuple_cost = 0.03;
+}
+step rxwy1 { DELETE FROM taby WHERE id = (SELECT min(id) FROM tabx); }
+step c1 { COMMIT; }
+
+session s2
+setup
+{
+ BEGIN ISOLATION LEVEL SERIALIZABLE;
+ SET LOCAL seq_page_cost = 0.1;
+ SET LOCAL random_page_cost = 0.1;
+ SET LOCAL cpu_tuple_cost = 0.03;
+}
+step rywx2 { DELETE FROM tabx WHERE id = (SELECT min(id) FROM taby); }
+step c2 { COMMIT; }
diff --git a/src/test/isolation/specs/inherit-temp.spec b/src/test/isolation/specs/inherit-temp.spec
new file mode 100644
index 0000000..644f919
--- /dev/null
+++ b/src/test/isolation/specs/inherit-temp.spec
@@ -0,0 +1,78 @@
+# Tests for inheritance trees with temporary relations
+#
+# Inheritance trees are allowed to mix relations with different persistence
+# as long as a persistent child relation does not try to inherit from a
+# temporary parent. This checks several scenarios with SELECT, INSERT, UPDATE,
+# DELETE and TRUNCATE. Any temporary relation inheriting from the same
+# persistent parent should be isolated and handled only in its own session.
+
+setup
+{
+ CREATE TABLE inh_parent (a int);
+}
+
+teardown
+{
+ DROP TABLE inh_parent;
+}
+
+# Session 1 executes actions which act directly on both the parent and
+# its child. Abbreviation "c" is used for queries working on the child
+# and "p" on the parent.
+session s1
+setup
+{
+ CREATE TEMPORARY TABLE inh_temp_child_s1 () INHERITS (inh_parent);
+}
+step s1_begin { BEGIN; }
+step s1_truncate_p { TRUNCATE inh_parent; }
+step s1_select_p { SELECT a FROM inh_parent; }
+step s1_select_c { SELECT a FROM inh_temp_child_s1; }
+step s1_insert_p { INSERT INTO inh_parent VALUES (1), (2); }
+step s1_insert_c { INSERT INTO inh_temp_child_s1 VALUES (3), (4); }
+step s1_update_p { UPDATE inh_parent SET a = 11 WHERE a = 1; }
+step s1_update_c { UPDATE inh_parent SET a = 13 WHERE a IN (3, 5); }
+step s1_delete_p { DELETE FROM inh_parent WHERE a = 2; }
+step s1_delete_c { DELETE FROM inh_parent WHERE a IN (4, 6); }
+step s1_commit { COMMIT; }
+teardown
+{
+ DROP TABLE inh_temp_child_s1;
+}
+
+# Session 2 executes actions on the parent which act only on the child.
+session s2
+setup
+{
+ CREATE TEMPORARY TABLE inh_temp_child_s2 () INHERITS (inh_parent);
+}
+step s2_truncate_p { TRUNCATE inh_parent; }
+step s2_select_p { SELECT a FROM inh_parent; }
+step s2_select_c { SELECT a FROM inh_temp_child_s2; }
+step s2_insert_c { INSERT INTO inh_temp_child_s2 VALUES (5), (6); }
+step s2_update_c { UPDATE inh_parent SET a = 15 WHERE a IN (3, 5); }
+step s2_delete_c { DELETE FROM inh_parent WHERE a IN (4, 6); }
+teardown
+{
+ DROP TABLE inh_temp_child_s2;
+}
+
+# Check INSERT behavior across sessions
+permutation s1_insert_p s1_insert_c s2_insert_c s1_select_p s1_select_c s2_select_p s2_select_c
+
+# Check UPDATE behavior across sessions
+permutation s1_insert_p s1_insert_c s2_insert_c s1_update_p s1_update_c s1_select_p s1_select_c s2_select_p s2_select_c
+permutation s1_insert_p s1_insert_c s2_insert_c s2_update_c s1_select_p s1_select_c s2_select_p s2_select_c
+
+# Check DELETE behavior across sessions
+permutation s1_insert_p s1_insert_c s2_insert_c s1_delete_p s1_delete_c s1_select_p s1_select_c s2_select_p s2_select_c
+permutation s1_insert_p s1_insert_c s2_insert_c s2_delete_c s1_select_p s1_select_c s2_select_p s2_select_c
+
+# Check TRUNCATE behavior across sessions
+permutation s1_insert_p s1_insert_c s2_insert_c s1_truncate_p s1_select_p s1_select_c s2_select_p s2_select_c
+permutation s1_insert_p s1_insert_c s2_insert_c s2_truncate_p s1_select_p s1_select_c s2_select_p s2_select_c
+
+# TRUNCATE on a parent tree does not block access to temporary child relation
+# of another session, and blocks when scanning the parent.
+permutation s1_insert_p s1_insert_c s2_insert_c s1_begin s1_truncate_p s2_select_p s1_commit
+permutation s1_insert_p s1_insert_c s2_insert_c s1_begin s1_truncate_p s2_select_c s1_commit
diff --git a/src/test/isolation/specs/insert-conflict-do-nothing-2.spec b/src/test/isolation/specs/insert-conflict-do-nothing-2.spec
new file mode 100644
index 0000000..825b7d6
--- /dev/null
+++ b/src/test/isolation/specs/insert-conflict-do-nothing-2.spec
@@ -0,0 +1,34 @@
+# INSERT...ON CONFLICT DO NOTHING test with multiple rows
+# in higher isolation levels
+
+setup
+{
+ CREATE TABLE ints (key int, val text, PRIMARY KEY (key) INCLUDE (val));
+}
+
+teardown
+{
+ DROP TABLE ints;
+}
+
+session s1
+step beginrr1 { BEGIN ISOLATION LEVEL REPEATABLE READ; }
+step begins1 { BEGIN ISOLATION LEVEL SERIALIZABLE; }
+step donothing1 { INSERT INTO ints(key, val) VALUES(1, 'donothing1') ON CONFLICT DO NOTHING; }
+step c1 { COMMIT; }
+step show { SELECT * FROM ints; }
+
+session s2
+step beginrr2 { BEGIN ISOLATION LEVEL REPEATABLE READ; }
+step begins2 { BEGIN ISOLATION LEVEL SERIALIZABLE; }
+step donothing2 { INSERT INTO ints(key, val) VALUES(1, 'donothing2'), (1, 'donothing3') ON CONFLICT DO NOTHING; }
+step c2 { COMMIT; }
+
+permutation beginrr1 beginrr2 donothing1 c1 donothing2 c2 show
+permutation beginrr1 beginrr2 donothing2 c2 donothing1 c1 show
+permutation beginrr1 beginrr2 donothing1 donothing2 c1 c2 show
+permutation beginrr1 beginrr2 donothing2 donothing1 c2 c1 show
+permutation begins1 begins2 donothing1 c1 donothing2 c2 show
+permutation begins1 begins2 donothing2 c2 donothing1 c1 show
+permutation begins1 begins2 donothing1 donothing2 c1 c2 show
+permutation begins1 begins2 donothing2 donothing1 c2 c1 show
diff --git a/src/test/isolation/specs/insert-conflict-do-nothing.spec b/src/test/isolation/specs/insert-conflict-do-nothing.spec
new file mode 100644
index 0000000..b0e6a37
--- /dev/null
+++ b/src/test/isolation/specs/insert-conflict-do-nothing.spec
@@ -0,0 +1,40 @@
+# INSERT...ON CONFLICT DO NOTHING test
+#
+# This test tries to expose problems with the interaction between concurrent
+# sessions during INSERT...ON CONFLICT DO NOTHING.
+#
+# The convention here is that session 1 always ends up inserting, and session 2
+# always ends up doing nothing.
+
+setup
+{
+ CREATE TABLE ints (key int primary key, val text);
+}
+
+teardown
+{
+ DROP TABLE ints;
+}
+
+session s1
+setup
+{
+ BEGIN ISOLATION LEVEL READ COMMITTED;
+}
+step donothing1 { INSERT INTO ints(key, val) VALUES(1, 'donothing1') ON CONFLICT DO NOTHING; }
+step c1 { COMMIT; }
+step a1 { ABORT; }
+
+session s2
+setup
+{
+ BEGIN ISOLATION LEVEL READ COMMITTED;
+}
+step donothing2 { INSERT INTO ints(key, val) VALUES(1, 'donothing2') ON CONFLICT DO NOTHING; }
+step select2 { SELECT * FROM ints; }
+step c2 { COMMIT; }
+
+# Regular case where one session block-waits on another to determine if it
+# should proceed with an insert or do nothing.
+permutation donothing1 donothing2 c1 select2 c2
+permutation donothing1 donothing2 a1 select2 c2
diff --git a/src/test/isolation/specs/insert-conflict-do-update-2.spec b/src/test/isolation/specs/insert-conflict-do-update-2.spec
new file mode 100644
index 0000000..8a7c546
--- /dev/null
+++ b/src/test/isolation/specs/insert-conflict-do-update-2.spec
@@ -0,0 +1,40 @@
+# INSERT...ON CONFLICT DO UPDATE test
+#
+# This test shows a plausible scenario in which the user might wish to UPDATE a
+# value that is also constrained by the unique index that is the arbiter of
+# whether the alternative path should be taken.
+
+setup
+{
+ CREATE TABLE upsert (key text not null, payload text);
+ CREATE UNIQUE INDEX ON upsert(lower(key)) INCLUDE (payload);
+}
+
+teardown
+{
+ DROP TABLE upsert;
+}
+
+session s1
+setup
+{
+ BEGIN ISOLATION LEVEL READ COMMITTED;
+}
+step insert1 { INSERT INTO upsert(key, payload) VALUES('FooFoo', 'insert1') ON CONFLICT (lower(key)) DO UPDATE set key = EXCLUDED.key, payload = upsert.payload || ' updated by insert1'; }
+step c1 { COMMIT; }
+step a1 { ABORT; }
+
+session s2
+setup
+{
+ BEGIN ISOLATION LEVEL READ COMMITTED;
+}
+step insert2 { INSERT INTO upsert(key, payload) VALUES('FOOFOO', 'insert2') ON CONFLICT (lower(key)) DO UPDATE set key = EXCLUDED.key, payload = upsert.payload || ' updated by insert2'; }
+step select2 { SELECT * FROM upsert; }
+step c2 { COMMIT; }
+
+# One session (session 2) block-waits on another (session 1) to determine if it
+# should proceed with an insert or update. The user can still usefully UPDATE
+# a column constrained by a unique index, as the example illustrates.
+permutation insert1 insert2 c1 select2 c2
+permutation insert1 insert2 a1 select2 c2
diff --git a/src/test/isolation/specs/insert-conflict-do-update-3.spec b/src/test/isolation/specs/insert-conflict-do-update-3.spec
new file mode 100644
index 0000000..df67954
--- /dev/null
+++ b/src/test/isolation/specs/insert-conflict-do-update-3.spec
@@ -0,0 +1,69 @@
+# INSERT...ON CONFLICT DO UPDATE test
+#
+# Other INSERT...ON CONFLICT DO UPDATE isolation tests illustrate the "MVCC
+# violation" added to facilitate the feature, whereby a
+# not-visible-to-our-snapshot tuple can be updated by our command all the same.
+# This is generally needed to provide a guarantee of a successful INSERT or
+# UPDATE in READ COMMITTED mode. This MVCC violation is quite distinct from
+# the putative "MVCC violation" that has existed in PostgreSQL for many years,
+# the EvalPlanQual() mechanism, because that mechanism always starts from a
+# tuple that is visible to the command's MVCC snapshot. This test illustrates
+# a slightly distinct user-visible consequence of the same MVCC violation
+# generally associated with INSERT...ON CONFLICT DO UPDATE. The impact of the
+# MVCC violation goes a little beyond updating MVCC-invisible tuples.
+#
+# With INSERT...ON CONFLICT DO UPDATE, the UPDATE predicate is only evaluated
+# once, on this conclusively-locked tuple, and not any other version of the
+# same tuple. It is therefore possible (in READ COMMITTED mode) that the
+# predicate "fail to be satisfied" according to the command's MVCC snapshot.
+# It might simply be that there is no row version visible, but it's also
+# possible that there is some row version visible, but only as a version that
+# doesn't satisfy the predicate. If, however, the conclusively-locked version
+# satisfies the predicate, that's good enough, and the tuple is updated. The
+# MVCC-snapshot-visible row version is denied the opportunity to prevent the
+# UPDATE from taking place, because we don't walk the UPDATE chain in the usual
+# way.
+
+setup
+{
+ CREATE TABLE colors (key int4 PRIMARY KEY, color text, is_active boolean);
+ INSERT INTO colors (key, color, is_active) VALUES(1, 'Red', false);
+ INSERT INTO colors (key, color, is_active) VALUES(2, 'Green', false);
+ INSERT INTO colors (key, color, is_active) VALUES(3, 'Blue', false);
+}
+
+teardown
+{
+ DROP TABLE colors;
+}
+
+session s1
+setup
+{
+ BEGIN ISOLATION LEVEL READ COMMITTED;
+}
+step insert1 {
+ WITH t AS (
+ INSERT INTO colors(key, color, is_active)
+ VALUES(1, 'Brown', true), (2, 'Gray', true)
+ ON CONFLICT (key) DO UPDATE
+ SET color = EXCLUDED.color
+ WHERE colors.is_active)
+ SELECT * FROM colors ORDER BY key;}
+step select1surprise { SELECT * FROM colors ORDER BY key; }
+step c1 { COMMIT; }
+
+session s2
+setup
+{
+ BEGIN ISOLATION LEVEL READ COMMITTED;
+}
+step update2 { UPDATE colors SET is_active = true WHERE key = 1; }
+step c2 { COMMIT; }
+
+# Perhaps surprisingly, the session 1 MVCC-snapshot-visible tuple (the tuple
+# with the pre-populated color 'Red') is denied the opportunity to prevent the
+# UPDATE from taking place -- only the conclusively-locked tuple version
+# matters, and so the tuple with key value 1 was updated to 'Brown' (but not
+# tuple with key value 2, since nothing changed there):
+permutation update2 insert1 c2 select1surprise c1
diff --git a/src/test/isolation/specs/insert-conflict-do-update.spec b/src/test/isolation/specs/insert-conflict-do-update.spec
new file mode 100644
index 0000000..62cdafd
--- /dev/null
+++ b/src/test/isolation/specs/insert-conflict-do-update.spec
@@ -0,0 +1,39 @@
+# INSERT...ON CONFLICT DO UPDATE test
+#
+# This test tries to expose problems with the interaction between concurrent
+# sessions.
+
+setup
+{
+ CREATE TABLE upsert (key int primary key, val text);
+}
+
+teardown
+{
+ DROP TABLE upsert;
+}
+
+session s1
+setup
+{
+ BEGIN ISOLATION LEVEL READ COMMITTED;
+}
+step insert1 { INSERT INTO upsert(key, val) VALUES(1, 'insert1') ON CONFLICT (key) DO UPDATE set val = upsert.val || ' updated by insert1'; }
+step c1 { COMMIT; }
+step a1 { ABORT; }
+
+session s2
+setup
+{
+ BEGIN ISOLATION LEVEL READ COMMITTED;
+}
+step insert2 { INSERT INTO upsert(key, val) VALUES(1, 'insert2') ON CONFLICT (key) DO UPDATE set val = upsert.val || ' updated by insert2'; }
+step select2 { SELECT * FROM upsert; }
+step c2 { COMMIT; }
+
+# One session (session 2) block-waits on another (session 1) to determine if it
+# should proceed with an insert or update. Notably, this entails updating a
+# tuple while there is no version of that tuple visible to the updating
+# session's snapshot. This is permitted only in READ COMMITTED mode.
+permutation insert1 insert2 c1 select2 c2
+permutation insert1 insert2 a1 select2 c2
diff --git a/src/test/isolation/specs/insert-conflict-specconflict.spec b/src/test/isolation/specs/insert-conflict-specconflict.spec
new file mode 100644
index 0000000..0d55a01
--- /dev/null
+++ b/src/test/isolation/specs/insert-conflict-specconflict.spec
@@ -0,0 +1,259 @@
+# INSERT ... ON CONFLICT test verifying that speculative insertion
+# failures are handled
+#
+# Does this by using advisory locks controlling progress of
+# insertions. By waiting when building the index keys, it's possible
+# to schedule concurrent INSERT ON CONFLICTs so that there will always
+# be a speculative conflict.
+
+setup
+{
+ CREATE OR REPLACE FUNCTION blurt_and_lock_123(text) RETURNS text IMMUTABLE LANGUAGE plpgsql AS $$
+ BEGIN
+ RAISE NOTICE 'blurt_and_lock_123() called for % in session %', $1, current_setting('spec.session')::int;
+
+ -- depending on lock state, wait for lock 2 or 3
+ IF pg_try_advisory_xact_lock(current_setting('spec.session')::int, 1) THEN
+ RAISE NOTICE 'acquiring advisory lock on 2';
+ PERFORM pg_advisory_xact_lock(current_setting('spec.session')::int, 2);
+ ELSE
+ RAISE NOTICE 'acquiring advisory lock on 3';
+ PERFORM pg_advisory_xact_lock(current_setting('spec.session')::int, 3);
+ END IF;
+ RETURN $1;
+ END;$$;
+
+ CREATE OR REPLACE FUNCTION blurt_and_lock_4(text) RETURNS text IMMUTABLE LANGUAGE plpgsql AS $$
+ BEGIN
+ RAISE NOTICE 'blurt_and_lock_4() called for % in session %', $1, current_setting('spec.session')::int;
+ RAISE NOTICE 'acquiring advisory lock on 4';
+ PERFORM pg_advisory_xact_lock(current_setting('spec.session')::int, 4);
+ RETURN $1;
+ END;$$;
+
+ CREATE OR REPLACE FUNCTION ctoast_large_val() RETURNS TEXT LANGUAGE SQL AS 'select array_agg(md5(g::text))::text from generate_series(1, 256) g';
+
+ CREATE TABLE upserttest(key text, data text);
+
+ CREATE UNIQUE INDEX upserttest_key_uniq_idx ON upserttest((blurt_and_lock_123(key)));
+}
+
+teardown
+{
+ DROP TABLE upserttest;
+}
+
+session controller
+setup
+{
+ SET default_transaction_isolation = 'read committed';
+}
+step controller_locks {SELECT pg_advisory_lock(sess, lock), sess, lock FROM generate_series(1, 2) a(sess), generate_series(1,3) b(lock);}
+step controller_unlock_1_1 { SELECT pg_advisory_unlock(1, 1); }
+step controller_unlock_2_1 { SELECT pg_advisory_unlock(2, 1); }
+step controller_unlock_1_2 { SELECT pg_advisory_unlock(1, 2); }
+step controller_unlock_2_2 { SELECT pg_advisory_unlock(2, 2); }
+step controller_unlock_1_3 { SELECT pg_advisory_unlock(1, 3); }
+step controller_unlock_2_3 { SELECT pg_advisory_unlock(2, 3); }
+step controller_lock_2_4 { SELECT pg_advisory_lock(2, 4); }
+step controller_unlock_2_4 { SELECT pg_advisory_unlock(2, 4); }
+step controller_show {SELECT * FROM upserttest; }
+step controller_show_count {SELECT COUNT(*) FROM upserttest; }
+step controller_print_speculative_locks {
+ SELECT pa.application_name, locktype, mode, granted
+ FROM pg_locks pl JOIN pg_stat_activity pa USING (pid)
+ WHERE
+ locktype IN ('spectoken', 'transactionid')
+ AND pa.datname = current_database()
+ AND pa.application_name LIKE 'isolation/insert-conflict-specconflict/s%'
+ ORDER BY 1, 2, 3, 4;
+}
+
+session s1
+setup
+{
+ SET default_transaction_isolation = 'read committed';
+ SET spec.session = 1;
+}
+step s1_begin { BEGIN; }
+step s1_create_non_unique_index { CREATE INDEX upserttest_key_idx ON upserttest((blurt_and_lock_4(key))); }
+step s1_confirm_index_order { SELECT 'upserttest_key_uniq_idx'::regclass::int8 < 'upserttest_key_idx'::regclass::int8; }
+step s1_upsert { INSERT INTO upserttest(key, data) VALUES('k1', 'inserted s1') ON CONFLICT (blurt_and_lock_123(key)) DO UPDATE SET data = upserttest.data || ' with conflict update s1'; }
+step s1_insert_toast { INSERT INTO upserttest VALUES('k2', ctoast_large_val()) ON CONFLICT DO NOTHING; }
+step s1_commit { COMMIT; }
+step s1_noop { }
+
+session s2
+setup
+{
+ SET default_transaction_isolation = 'read committed';
+ SET spec.session = 2;
+}
+step s2_begin { BEGIN; }
+step s2_upsert { INSERT INTO upserttest(key, data) VALUES('k1', 'inserted s2') ON CONFLICT (blurt_and_lock_123(key)) DO UPDATE SET data = upserttest.data || ' with conflict update s2'; }
+step s2_insert_toast { INSERT INTO upserttest VALUES('k2', ctoast_large_val()) ON CONFLICT DO NOTHING; }
+step s2_commit { COMMIT; }
+step s2_noop { }
+
+# Test that speculative locks are correctly acquired and released, s2
+# inserts, s1 updates.
+permutation
+ # acquire a number of locks, to control execution flow - the
+ # blurt_and_lock_123 function acquires advisory locks that allow us to
+ # continue after a) the optimistic conflict probe b) after the
+ # insertion of the speculative tuple.
+ controller_locks
+ controller_show
+ s1_upsert s2_upsert
+ controller_show
+ # Switch both sessions to wait on the other lock next time (the speculative insertion)
+ controller_unlock_1_1 controller_unlock_2_1
+ # Allow both sessions to continue
+ controller_unlock_1_3 controller_unlock_2_3
+ controller_show
+ # Allow the second session to finish insertion
+ controller_unlock_2_2
+ # This should now show a successful insertion
+ controller_show
+ # Allow the first session to finish insertion
+ controller_unlock_1_2
+ # This should now show a successful UPSERT
+ controller_show
+
+# Test that speculative locks are correctly acquired and released, s1
+# inserts, s2 updates.
+permutation
+ # acquire a number of locks, to control execution flow - the
+ # blurt_and_lock_123 function acquires advisory locks that allow us to
+ # continue after a) the optimistic conflict probe b) after the
+ # insertion of the speculative tuple.
+ controller_locks
+ controller_show
+ s1_upsert s2_upsert
+ controller_show
+ # Switch both sessions to wait on the other lock next time (the speculative insertion)
+ controller_unlock_1_1 controller_unlock_2_1
+ # Allow both sessions to continue
+ controller_unlock_1_3 controller_unlock_2_3
+ controller_show
+ # Allow the first session to finish insertion
+ controller_unlock_1_2
+ # This should now show a successful insertion
+ controller_show
+ # Allow the second session to finish insertion
+ controller_unlock_2_2
+ # This should now show a successful UPSERT
+ controller_show
+
+# Test that speculatively inserted toast rows do not cause conflicts.
+# s1 inserts successfully, s2 does not.
+permutation
+ # acquire a number of locks, to control execution flow - the
+ # blurt_and_lock_123 function acquires advisory locks that allow us to
+ # continue after a) the optimistic conflict probe b) after the
+ # insertion of the speculative tuple.
+ controller_locks
+ controller_show
+ s1_insert_toast s2_insert_toast
+ controller_show
+ # Switch both sessions to wait on the other lock next time (the speculative insertion)
+ controller_unlock_1_1 controller_unlock_2_1
+ # Allow both sessions to continue
+ controller_unlock_1_3 controller_unlock_2_3
+ controller_show
+ # Allow the first session to finish insertion
+ controller_unlock_1_2
+ # This should now show that 1 additional tuple was inserted successfully
+ controller_show_count
+ # Allow the second session to finish insertion and kill the speculatively inserted tuple
+ controller_unlock_2_2
+ # This should show the same number of tuples as before s2 inserted
+ controller_show_count
+
+# Test that speculative locks are correctly acquired and released, s2
+# inserts, s1 updates. With the added complication that transactions
+# don't immediately commit.
+permutation
+ # acquire a number of locks, to control execution flow - the
+ # blurt_and_lock_123 function acquires advisory locks that allow us to
+ # continue after a) the optimistic conflict probe b) after the
+ # insertion of the speculative tuple.
+ controller_locks
+ controller_show
+ s1_begin s2_begin
+ s1_upsert s2_upsert
+ controller_show
+ # Switch both sessions to wait on the other lock next time (the speculative insertion)
+ controller_unlock_1_1 controller_unlock_2_1
+ # Allow both sessions to continue
+ controller_unlock_1_3 controller_unlock_2_3
+ controller_show
+ # Allow the first session to finish insertion
+ controller_unlock_1_2
+ # But the change isn't visible yet, nor should the second session continue
+ controller_show
+ # Allow the second session to finish insertion, but it's blocked
+ controller_unlock_2_2
+ controller_show
+ # But committing should unblock
+ s1_commit
+ controller_show
+ s2_commit
+ controller_show
+
+# Test that speculative wait is performed if a session sees a speculatively
+# inserted tuple. A speculatively inserted tuple is one which has been inserted
+# both into the table and the unique index but has yet to *complete* the
+# speculative insertion
+permutation
+ # acquire a number of advisory locks to control execution flow - the
+ # blurt_and_lock_123 function acquires advisory locks that allow us to
+ # continue after a) the optimistic conflict probe and b) after the
+ # insertion of the speculative tuple.
+ # blurt_and_lock_4 acquires an advisory lock which allows us to pause
+ # execution c) before completing the speculative insertion
+
+ # create the second index here to avoid affecting the other
+ # permutations.
+ s1_create_non_unique_index
+ # confirm that the insertion into the unique index will happen first
+ s1_confirm_index_order
+ controller_locks
+ controller_show
+ s2_begin
+ # Both sessions wait on advisory locks
+ # (but don't show s2_upsert as complete till we've seen all of s1's notices)
+ s1_upsert s2_upsert (s1_upsert notices 10)
+ controller_show
+ # Switch both sessions to wait on the other lock next time (the speculative insertion)
+ controller_unlock_1_1 controller_unlock_2_1
+ # Allow both sessions to do the optimistic conflict probe and do the
+ # speculative insertion into the table
+ # They will then be waiting on another advisory lock when they attempt to
+ # update the index
+ controller_unlock_1_3 controller_unlock_2_3
+ controller_show
+ # take lock to block second session after inserting in unique index but
+ # before completing the speculative insert
+ controller_lock_2_4
+ # Allow the second session to move forward
+ controller_unlock_2_2
+ # This should still not show a successful insertion
+ controller_show
+ # Allow the first session to continue, it should perform speculative wait
+ controller_unlock_1_2
+ # Should report s1 is waiting on speculative lock
+ controller_print_speculative_locks
+ # Allow s2 to insert into the non-unique index and complete. s1 will
+ # no longer wait on speculative lock, but proceed to wait on the
+ # transaction to finish. The no-op step is needed to ensure that
+ # we don't advance to the reporting step until s2_upsert has completed.
+ controller_unlock_2_4 s2_noop
+ # Should report that s1 is now waiting for s2 to commit
+ controller_print_speculative_locks
+ # Once s2 commits, s1 is finally free to continue to update
+ s2_commit s1_noop
+ # This should now show a successful UPSERT
+ controller_show
+ # Ensure no unexpected locks survive
+ controller_print_speculative_locks
diff --git a/src/test/isolation/specs/lock-committed-keyupdate.spec b/src/test/isolation/specs/lock-committed-keyupdate.spec
new file mode 100644
index 0000000..487f0e0
--- /dev/null
+++ b/src/test/isolation/specs/lock-committed-keyupdate.spec
@@ -0,0 +1,66 @@
+# Test locking of a tuple with a committed key-update. In this case,
+# the update conflicts with the lock, so failures are expected, except
+# in READ COMMITTED isolation mode.
+#
+# Some of the permutations are commented out that work fine in the
+# lock-committed-update test, because in this case the update blocks.
+
+setup
+{
+ DROP TABLE IF EXISTS lcku_table;
+ CREATE TABLE lcku_table (id INTEGER, value TEXT, PRIMARY KEY (id) INCLUDE (value));
+ INSERT INTO lcku_table VALUES (1, 'one');
+ INSERT INTO lcku_table VALUES (3, 'two');
+}
+
+teardown
+{
+ DROP TABLE lcku_table;
+}
+
+session s1
+step s1b { BEGIN; }
+step s1l { SELECT pg_advisory_lock(578902068); }
+step s1u { UPDATE lcku_table SET id = 2 WHERE id = 3; }
+step s1hint { SELECT * FROM lcku_table; }
+step s1ul { SELECT pg_advisory_unlock(578902068); }
+step s1c { COMMIT; }
+teardown { SELECT pg_advisory_unlock_all(); }
+
+session s2
+step s2b1 { BEGIN ISOLATION LEVEL READ COMMITTED; }
+step s2b2 { BEGIN ISOLATION LEVEL REPEATABLE READ; }
+step s2b3 { BEGIN ISOLATION LEVEL SERIALIZABLE; }
+step s2l { SELECT * FROM lcku_table WHERE pg_advisory_lock(578902068) IS NOT NULL FOR KEY SHARE; }
+step s2c { COMMIT; }
+teardown { SELECT pg_advisory_unlock_all(); }
+
+permutation s1b s2b1 s1l s2l s1u s1c s1ul s2c
+permutation s1b s2b1 s1l s1u s2l s1c s1ul s2c
+#permutation s1b s2b1 s1l s2l s1ul s1u s1c s2c
+permutation s1b s2b1 s1l s1u s1ul s2l s1c s2c
+
+permutation s1b s2b1 s1l s2l s1u s1c s1hint s1ul s2c
+permutation s1b s2b1 s1l s1u s2l s1c s1hint s1ul s2c
+#permutation s1b s2b1 s1l s2l s1ul s1u s1c s1hint s2c
+permutation s1b s2b1 s1l s1u s1ul s2l s1c s1hint s2c
+
+permutation s1b s2b2 s1l s2l s1u s1c s1ul s2c
+permutation s1b s2b2 s1l s1u s2l s1c s1ul s2c
+#permutation s1b s2b2 s1l s2l s1ul s1u s1c s2c
+permutation s1b s2b2 s1l s1u s1ul s2l s1c s2c
+
+permutation s1b s2b2 s1l s2l s1u s1c s1hint s1ul s2c
+permutation s1b s2b2 s1l s1u s2l s1c s1hint s1ul s2c
+#permutation s1b s2b2 s1l s2l s1ul s1u s1c s1hint s2c
+permutation s1b s2b2 s1l s1u s1ul s2l s1c s1hint s2c
+
+permutation s1b s2b3 s1l s2l s1u s1c s1ul s2c
+permutation s1b s2b3 s1l s1u s2l s1c s1ul s2c
+#permutation s1b s2b3 s1l s2l s1ul s1u s1c s2c
+permutation s1b s2b3 s1l s1u s1ul s2l s1c s2c
+
+permutation s1b s2b3 s1l s2l s1u s1c s1hint s1ul s2c
+permutation s1b s2b3 s1l s1u s2l s1c s1hint s1ul s2c
+#permutation s1b s2b3 s1l s2l s1ul s1u s1c s1hint s2c
+permutation s1b s2b3 s1l s1u s1ul s2l s1c s1hint s2c
diff --git a/src/test/isolation/specs/lock-committed-update.spec b/src/test/isolation/specs/lock-committed-update.spec
new file mode 100644
index 0000000..74d80d5
--- /dev/null
+++ b/src/test/isolation/specs/lock-committed-update.spec
@@ -0,0 +1,62 @@
+# Test locking of a tuple with a committed update. When the lock does not
+# conflict with the update, no blocking and no serializability errors should
+# occur.
+
+setup
+{
+ DROP TABLE IF EXISTS lcu_table;
+ CREATE TABLE lcu_table (id INTEGER PRIMARY KEY, value TEXT);
+ INSERT INTO lcu_table VALUES (1, 'one');
+}
+
+teardown
+{
+ DROP TABLE lcu_table;
+}
+
+session s1
+step s1b { BEGIN; }
+step s1l { SELECT pg_advisory_lock(380170116); }
+step s1u { UPDATE lcu_table SET value = 'two' WHERE id = 1; }
+step s1hint { SELECT * FROM lcu_table; }
+step s1ul { SELECT pg_advisory_unlock(380170116); }
+step s1c { COMMIT; }
+teardown { SELECT pg_advisory_unlock_all(); }
+
+session s2
+step s2b1 { BEGIN ISOLATION LEVEL READ COMMITTED; }
+step s2b2 { BEGIN ISOLATION LEVEL REPEATABLE READ; }
+step s2b3 { BEGIN ISOLATION LEVEL SERIALIZABLE; }
+step s2l { SELECT * FROM lcu_table WHERE pg_advisory_lock(380170116) IS NOT NULL FOR KEY SHARE; }
+step s2c { COMMIT; }
+teardown { SELECT pg_advisory_unlock_all(); }
+
+permutation s1b s2b1 s1l s2l s1u s1c s1ul s2c
+permutation s1b s2b1 s1l s1u s2l s1c s1ul s2c
+permutation s1b s2b1 s1l s2l s1ul s1u s1c s2c
+permutation s1b s2b1 s1l s1u s1ul s2l s1c s2c
+
+permutation s1b s2b1 s1l s2l s1u s1c s1hint s1ul s2c
+permutation s1b s2b1 s1l s1u s2l s1c s1hint s1ul s2c
+permutation s1b s2b1 s1l s2l s1ul s1u s1c s1hint s2c
+permutation s1b s2b1 s1l s1u s1ul s2l s1c s1hint s2c
+
+permutation s1b s2b2 s1l s2l s1u s1c s1ul s2c
+permutation s1b s2b2 s1l s1u s2l s1c s1ul s2c
+permutation s1b s2b2 s1l s2l s1ul s1u s1c s2c
+permutation s1b s2b2 s1l s1u s1ul s2l s1c s2c
+
+permutation s1b s2b2 s1l s2l s1u s1c s1hint s1ul s2c
+permutation s1b s2b2 s1l s1u s2l s1c s1hint s1ul s2c
+permutation s1b s2b2 s1l s2l s1ul s1u s1c s1hint s2c
+permutation s1b s2b2 s1l s1u s1ul s2l s1c s1hint s2c
+
+permutation s1b s2b3 s1l s2l s1u s1c s1ul s2c
+permutation s1b s2b3 s1l s1u s2l s1c s1ul s2c
+permutation s1b s2b3 s1l s2l s1ul s1u s1c s2c
+permutation s1b s2b3 s1l s1u s1ul s2l s1c s2c
+
+permutation s1b s2b3 s1l s2l s1u s1c s1hint s1ul s2c
+permutation s1b s2b3 s1l s1u s2l s1c s1hint s1ul s2c
+permutation s1b s2b3 s1l s2l s1ul s1u s1c s1hint s2c
+permutation s1b s2b3 s1l s1u s1ul s2l s1c s1hint s2c
diff --git a/src/test/isolation/specs/lock-update-delete.spec b/src/test/isolation/specs/lock-update-delete.spec
new file mode 100644
index 0000000..b9dd7d1
--- /dev/null
+++ b/src/test/isolation/specs/lock-update-delete.spec
@@ -0,0 +1,61 @@
+# This test verifies behavior when traversing an update chain during
+# locking an old version of the tuple. There are three tests here:
+# 1. update the tuple, then delete it; a second transaction locks the
+# first version. This should raise an error if the DELETE succeeds,
+# but be allowed to continue if it aborts.
+# 2. Same as (1), except that instead of deleting the tuple, we merely
+# update its key. The behavior should be the same as for (1).
+# 3. Same as (2), except that we update the tuple without modifying its
+# key. In this case, no error should be raised.
+# When run in REPEATABLE READ or SERIALIZABLE transaction isolation levels, all
+# permutations that commit s2 cause a serializability error; all permutations
+# that rollback s2 can get through.
+#
+# We use an advisory lock (which is locked during s1's setup) to let s2 obtain
+# its snapshot early and only allow it to actually traverse the update chain
+# when s1 is done creating it.
+
+setup
+{
+ DROP TABLE IF EXISTS foo;
+ CREATE TABLE foo (
+ key int PRIMARY KEY,
+ value int
+ );
+
+ INSERT INTO foo VALUES (1, 1);
+}
+
+teardown
+{
+ DROP TABLE foo;
+}
+
+session s1
+# obtain lock on the tuple, traversing its update chain
+step s1l { SELECT * FROM foo WHERE pg_advisory_xact_lock(0) IS NOT NULL AND key = 1 FOR KEY SHARE; }
+
+session s2
+setup { SELECT pg_advisory_lock(0); }
+step s2b { BEGIN; }
+step s2u { UPDATE foo SET value = 2 WHERE key = 1; }
+step s2_blocker1 { DELETE FROM foo; }
+step s2_blocker2 { UPDATE foo SET key = 2 WHERE key = 1; }
+step s2_blocker3 { UPDATE foo SET value = 2 WHERE key = 1; }
+step s2_unlock { SELECT pg_advisory_unlock(0); }
+step s2c { COMMIT; }
+step s2r { ROLLBACK; }
+
+permutation s2b s1l s2u s2_blocker1 s2_unlock s2c
+permutation s2b s1l s2u s2_blocker2 s2_unlock s2c
+permutation s2b s1l s2u s2_blocker3 s2_unlock s2c
+permutation s2b s1l s2u s2_blocker1 s2_unlock s2r
+permutation s2b s1l s2u s2_blocker2 s2_unlock s2r
+permutation s2b s1l s2u s2_blocker3 s2_unlock s2r
+
+permutation s2b s1l s2u s2_blocker1 s2c s2_unlock
+permutation s2b s1l s2u s2_blocker2 s2c s2_unlock
+permutation s2b s1l s2u s2_blocker3 s2c s2_unlock
+permutation s2b s1l s2u s2_blocker1 s2r s2_unlock
+permutation s2b s1l s2u s2_blocker2 s2r s2_unlock
+permutation s2b s1l s2u s2_blocker3 s2r s2_unlock
diff --git a/src/test/isolation/specs/lock-update-traversal.spec b/src/test/isolation/specs/lock-update-traversal.spec
new file mode 100644
index 0000000..9d3d32d
--- /dev/null
+++ b/src/test/isolation/specs/lock-update-traversal.spec
@@ -0,0 +1,39 @@
+# When a tuple that has been updated is locked, the locking command must
+# traverse the update chain; thus, a DELETE (on the newer version of the tuple)
+# should not be able to proceed until the lock has been released. An UPDATE
+# that changes the key should not be allowed to continue either; but an UPDATE
+# that doesn't modify the key should be able to continue immediately.
+
+setup
+{
+ CREATE TABLE foo (
+ key int,
+ value int,
+ PRIMARY KEY (key) INCLUDE (value)
+ );
+
+ INSERT INTO foo VALUES (1, 1);
+}
+
+teardown
+{
+ DROP TABLE foo;
+}
+
+session s1
+step s1b { BEGIN ISOLATION LEVEL REPEATABLE READ; }
+step s1s { SELECT * FROM foo; } # obtain snapshot
+step s1l { SELECT * FROM foo FOR KEY SHARE; } # obtain lock
+step s1c { COMMIT; }
+
+session s2
+step s2b { BEGIN; }
+step s2u { UPDATE foo SET value = 2 WHERE key = 1; }
+step s2c { COMMIT; }
+step s2d1 { DELETE FROM foo WHERE key = 1; }
+step s2d2 { UPDATE foo SET key = 3 WHERE key = 1; }
+step s2d3 { UPDATE foo SET value = 3 WHERE key = 1; }
+
+permutation s1b s2b s1s s2u s1l s2c s2d1 s1c
+permutation s1b s2b s1s s2u s1l s2c s2d2 s1c
+permutation s1b s2b s1s s2u s1l s2c s2d3 s1c
diff --git a/src/test/isolation/specs/matview-write-skew.spec b/src/test/isolation/specs/matview-write-skew.spec
new file mode 100644
index 0000000..5fe21f1
--- /dev/null
+++ b/src/test/isolation/specs/matview-write-skew.spec
@@ -0,0 +1,51 @@
+# Test write skew with a materialized view.
+#
+# This test uses two serializable transactions: one that refreshes a
+# materialized view containing a summary of some order information, and
+# one that looks at the materialized view while doing writes on its
+# parent relation.
+#
+# Any overlap between the transactions should cause a serialization failure.
+
+setup
+{
+ CREATE TABLE orders (date date, item text, num int);
+ INSERT INTO orders VALUES ('2022-04-01', 'apple', 10), ('2022-04-01', 'banana', 20);
+
+ CREATE MATERIALIZED VIEW order_summary AS
+ SELECT date, item, sum(num) FROM orders GROUP BY date, item;
+ CREATE UNIQUE INDEX ON order_summary(date, item);
+ -- Create a diff between the summary table and the parent orders.
+ INSERT INTO orders VALUES ('2022-04-02', 'apple', 20);
+}
+
+teardown
+{
+ DROP MATERIALIZED VIEW order_summary;
+ DROP TABLE orders;
+}
+
+session s1
+step s1_begin { BEGIN ISOLATION LEVEL SERIALIZABLE; }
+step s1_refresh { REFRESH MATERIALIZED VIEW CONCURRENTLY order_summary; }
+step s1_commit { COMMIT; }
+
+session s2
+step s2_begin { BEGIN ISOLATION LEVEL SERIALIZABLE; }
+step s2_read { SELECT max(date) FROM order_summary; }
+step s2_insert { INSERT INTO orders VALUES ('2022-04-02', 'orange', 15); }
+step s2_update { UPDATE orders SET num = num + 1; }
+step s2_commit { COMMIT; }
+
+# refresh -> read -> write
+permutation "s1_begin" "s2_begin" "s1_refresh" "s2_read" "s2_insert" "s1_commit" "s2_commit"
+permutation "s1_begin" "s2_begin" "s1_refresh" "s2_read" "s2_update" "s1_commit" "s2_commit"
+# read -> refresh -> write
+permutation "s1_begin" "s2_begin" "s2_read" "s1_refresh" "s2_insert" "s1_commit" "s2_commit"
+permutation "s1_begin" "s2_begin" "s2_read" "s1_refresh" "s2_update" "s1_commit" "s2_commit"
+# read -> write -> refresh
+permutation "s1_begin" "s2_begin" "s2_read" "s2_insert" "s1_refresh" "s1_commit" "s2_commit"
+permutation "s1_begin" "s2_begin" "s2_read" "s2_update" "s1_refresh" "s1_commit" "s2_commit"
+# refresh -> write -> read
+permutation "s1_begin" "s2_begin" "s1_refresh" "s2_insert" "s2_read" "s1_commit" "s2_commit"
+permutation "s1_begin" "s2_begin" "s1_refresh" "s2_update" "s2_read" "s1_commit" "s2_commit"
diff --git a/src/test/isolation/specs/merge-delete.spec b/src/test/isolation/specs/merge-delete.spec
new file mode 100644
index 0000000..ba5f70e
--- /dev/null
+++ b/src/test/isolation/specs/merge-delete.spec
@@ -0,0 +1,96 @@
+# MERGE DELETE
+#
+# This test looks at the interactions involving concurrent deletes
+# comparing the behavior of MERGE, DELETE and UPDATE
+
+setup
+{
+ CREATE TABLE target (key int primary key, val text);
+ INSERT INTO target VALUES (1, 'setup1');
+
+ CREATE TABLE target_pa (key int primary key, val text) PARTITION BY LIST (key);
+ CREATE TABLE target_pa1 PARTITION OF target_pa FOR VALUES IN (1);
+ CREATE TABLE target_pa2 PARTITION OF target_pa FOR VALUES IN (2);
+ INSERT INTO target_pa VALUES (1, 'setup1');
+
+ CREATE TABLE target_tg (key int primary key, val text);
+ CREATE FUNCTION target_tg_trig_fn() RETURNS trigger LANGUAGE plpgsql AS
+ $$
+ BEGIN
+ IF tg_op = 'INSERT' THEN
+ RAISE NOTICE 'Insert: %', NEW;
+ RETURN NEW;
+ ELSIF tg_op = 'UPDATE' THEN
+ RAISE NOTICE 'Update: % -> %', OLD, NEW;
+ RETURN NEW;
+ ELSE
+ RAISE NOTICE 'Delete: %', OLD;
+ RETURN OLD;
+ END IF;
+ END
+ $$;
+ CREATE TRIGGER target_tg_trig BEFORE INSERT OR UPDATE OR DELETE ON target_tg
+ FOR EACH ROW EXECUTE FUNCTION target_tg_trig_fn();
+ INSERT INTO target_tg VALUES (1, 'setup1');
+}
+
+teardown
+{
+ DROP TABLE target;
+ DROP TABLE target_pa;
+ DROP TABLE target_tg;
+ DROP FUNCTION target_tg_trig_fn;
+}
+
+session "s1"
+setup
+{
+ BEGIN ISOLATION LEVEL READ COMMITTED;
+}
+step "delete" { DELETE FROM target t WHERE t.key = 1; }
+step "delete_pa" { DELETE FROM target_pa t WHERE t.key = 1; }
+step "delete_tg" { DELETE FROM target_tg t WHERE t.key = 1; }
+step "c1" { COMMIT; }
+
+session "s2"
+setup
+{
+ BEGIN ISOLATION LEVEL READ COMMITTED;
+}
+step "update2" { UPDATE target t SET val = t.val || ' updated by update2' WHERE t.key = 1; }
+step "update2_pa" { UPDATE target_pa t SET val = t.val || ' updated by update2_pa' WHERE t.key = 1; }
+step "update2_tg" { UPDATE target_tg t SET val = t.val || ' updated by update2_tg' WHERE t.key = 1; }
+step "merge2" { MERGE INTO target t USING (SELECT 1 as key, 'merge2' as val) s ON s.key = t.key WHEN NOT MATCHED THEN INSERT VALUES (s.key, s.val) WHEN MATCHED THEN UPDATE set key = t.key + 1, val = t.val || ' updated by ' || s.val; }
+step "merge2_pa" { MERGE INTO target_pa t USING (SELECT 1 as key, 'merge2_pa' as val) s ON s.key = t.key WHEN NOT MATCHED THEN INSERT VALUES (s.key, s.val) WHEN MATCHED THEN UPDATE set key = t.key + 1, val = t.val || ' updated by ' || s.val; }
+step "merge2_tg" { MERGE INTO target_tg t USING (SELECT 1 as key, 'merge2_tg' as val) s ON s.key = t.key WHEN NOT MATCHED THEN INSERT VALUES (s.key, s.val) WHEN MATCHED THEN UPDATE set key = t.key + 1, val = t.val || ' updated by ' || s.val; }
+step "merge_delete2" { MERGE INTO target t USING (SELECT 1 as key, 'merge_delete2' as val) s ON s.key = t.key WHEN NOT MATCHED THEN INSERT VALUES (s.key, s.val) WHEN MATCHED THEN DELETE; }
+step "merge_delete2_tg" { MERGE INTO target_tg t USING (SELECT 1 as key, 'merge_delete2_tg' as val) s ON s.key = t.key WHEN NOT MATCHED THEN INSERT VALUES (s.key, s.val) WHEN MATCHED THEN DELETE; }
+step "select2" { SELECT * FROM target; }
+step "select2_pa" { SELECT * FROM target_pa; }
+step "select2_tg" { SELECT * FROM target_tg; }
+step "c2" { COMMIT; }
+
+# Basic effects
+permutation "delete" "c1" "select2" "c2"
+permutation "delete_pa" "c1" "select2_pa" "c2"
+permutation "delete_tg" "c1" "select2_tg" "c2"
+
+# One after the other, no concurrency
+permutation "delete" "c1" "update2" "select2" "c2"
+permutation "delete_pa" "c1" "update2_pa" "select2_pa" "c2"
+permutation "delete_tg" "c1" "update2_tg" "select2_tg" "c2"
+permutation "delete" "c1" "merge2" "select2" "c2"
+permutation "delete_pa" "c1" "merge2_pa" "select2_pa" "c2"
+permutation "delete_tg" "c1" "merge2_tg" "select2_tg" "c2"
+permutation "delete" "c1" "merge_delete2" "select2" "c2"
+permutation "delete_tg" "c1" "merge_delete2_tg" "select2_tg" "c2"
+
+# Now with concurrency
+permutation "delete" "update2" "c1" "select2" "c2"
+permutation "delete_pa" "update2_pa" "c1" "select2_pa" "c2"
+permutation "delete_tg" "update2_tg" "c1" "select2_tg" "c2"
+permutation "delete" "merge2" "c1" "select2" "c2"
+permutation "delete_pa" "merge2_pa" "c1" "select2_pa" "c2"
+permutation "delete_tg" "merge2_tg" "c1" "select2_tg" "c2"
+permutation "delete" "merge_delete2" "c1" "select2" "c2"
+permutation "delete_tg" "merge_delete2_tg" "c1" "select2_tg" "c2"
diff --git a/src/test/isolation/specs/merge-insert-update.spec b/src/test/isolation/specs/merge-insert-update.spec
new file mode 100644
index 0000000..1bf1ed4
--- /dev/null
+++ b/src/test/isolation/specs/merge-insert-update.spec
@@ -0,0 +1,51 @@
+# MERGE INSERT UPDATE
+#
+# This looks at how we handle concurrent INSERTs, illustrating how the
+# behavior differs from INSERT ... ON CONFLICT
+
+setup
+{
+ CREATE TABLE target (key int primary key, val text);
+}
+
+teardown
+{
+ DROP TABLE target;
+}
+
+session "s1"
+setup
+{
+ BEGIN ISOLATION LEVEL READ COMMITTED;
+}
+step "merge1" { MERGE INTO target t USING (SELECT 1 as key, 'merge1' as val) s ON s.key = t.key WHEN NOT MATCHED THEN INSERT VALUES (s.key, s.val) WHEN MATCHED THEN UPDATE set val = t.val || ' updated by merge1'; }
+step "delete1" { DELETE FROM target WHERE key = 1; }
+step "insert1" { INSERT INTO target VALUES (1, 'insert1'); }
+step "c1" { COMMIT; }
+step "a1" { ABORT; }
+
+session "s2"
+setup
+{
+ BEGIN ISOLATION LEVEL READ COMMITTED;
+}
+step "merge2" { MERGE INTO target t USING (SELECT 1 as key, 'merge2' as val) s ON s.key = t.key WHEN NOT MATCHED THEN INSERT VALUES (s.key, s.val) WHEN MATCHED THEN UPDATE set val = t.val || ' updated by merge2'; }
+
+step "merge2i" { MERGE INTO target t USING (SELECT 1 as key, 'merge2' as val) s ON s.key = t.key WHEN MATCHED THEN UPDATE set val = t.val || ' updated by merge2'; }
+
+step "select2" { SELECT * FROM target; }
+step "c2" { COMMIT; }
+
+# Basic effects
+permutation "merge1" "c1" "select2" "c2"
+permutation "merge1" "c1" "merge2" "select2" "c2"
+
+# check concurrent inserts
+permutation "insert1" "merge2" "c1" "select2" "c2"
+permutation "merge1" "merge2" "c1" "select2" "c2"
+permutation "merge1" "merge2" "a1" "select2" "c2"
+
+# check how we handle when visible row has been concurrently deleted, then same key re-inserted
+permutation "delete1" "insert1" "c1" "merge2" "select2" "c2"
+permutation "delete1" "insert1" "merge2" "c1" "select2" "c2"
+permutation "delete1" "insert1" "merge2i" "c1" "select2" "c2"
diff --git a/src/test/isolation/specs/merge-join.spec b/src/test/isolation/specs/merge-join.spec
new file mode 100644
index 0000000..e33a02c
--- /dev/null
+++ b/src/test/isolation/specs/merge-join.spec
@@ -0,0 +1,45 @@
+# MERGE JOIN
+#
+# This test checks the EPQ recheck mechanism during MERGE when joining to a
+# source table using different join methods, per bug #18103
+
+setup
+{
+ CREATE TABLE src (id int PRIMARY KEY, val int);
+ CREATE TABLE tgt (id int PRIMARY KEY, val int);
+ INSERT INTO src SELECT x, x*10 FROM generate_series(1,3) g(x);
+ INSERT INTO tgt SELECT x, x FROM generate_series(1,3) g(x);
+}
+
+teardown
+{
+ DROP TABLE src, tgt;
+}
+
+session s1
+step b1 { BEGIN ISOLATION LEVEL READ COMMITTED; }
+step m1 { MERGE INTO tgt USING src ON tgt.id = src.id
+ WHEN MATCHED THEN UPDATE SET val = src.val
+ WHEN NOT MATCHED THEN INSERT VALUES (src.id, src.val); }
+step s1 { SELECT * FROM tgt; }
+step c1 { COMMIT; }
+
+session s2
+step b2 { BEGIN ISOLATION LEVEL READ COMMITTED; }
+step hj { SET LOCAL enable_mergejoin = off; SET LOCAL enable_nestloop = off; }
+step mj { SET LOCAL enable_hashjoin = off; SET LOCAL enable_nestloop = off; }
+step nl { SET LOCAL enable_hashjoin = off; SET LOCAL enable_mergejoin = off; }
+step ex { EXPLAIN (verbose, costs off)
+ MERGE INTO tgt USING src ON tgt.id = src.id
+ WHEN MATCHED THEN UPDATE SET val = src.val
+ WHEN NOT MATCHED THEN INSERT VALUES (src.id, src.val); }
+step m2 { MERGE INTO tgt USING src ON tgt.id = src.id
+ WHEN MATCHED THEN UPDATE SET val = src.val
+ WHEN NOT MATCHED THEN INSERT VALUES (src.id, src.val); }
+step s2 { SELECT * FROM tgt; }
+step c2 { COMMIT; }
+
+permutation b1 m1 s1 c1 b2 m2 s2 c2
+permutation b1 b2 m1 hj ex m2 c1 c2 s1
+permutation b1 b2 m1 mj ex m2 c1 c2 s1
+permutation b1 b2 m1 nl ex m2 c1 c2 s1
diff --git a/src/test/isolation/specs/merge-match-recheck.spec b/src/test/isolation/specs/merge-match-recheck.spec
new file mode 100644
index 0000000..298b2bf
--- /dev/null
+++ b/src/test/isolation/specs/merge-match-recheck.spec
@@ -0,0 +1,184 @@
+# MERGE MATCHED RECHECK
+#
+# This test looks at what happens when we have complex
+# WHEN MATCHED AND conditions and a concurrent UPDATE causes a
+# recheck of the AND condition on the new row
+
+setup
+{
+ CREATE TABLE target (key int primary key, balance integer, status text, val text);
+ INSERT INTO target VALUES (1, 160, 's1', 'setup');
+
+ CREATE TABLE target_pa (key int, balance integer, status text, val text) PARTITION BY RANGE (balance);
+ CREATE TABLE target_pa1 PARTITION OF target_pa FOR VALUES FROM (0) TO (200);
+ CREATE TABLE target_pa2 PARTITION OF target_pa FOR VALUES FROM (200) TO (1000);
+ INSERT INTO target_pa VALUES (1, 160, 's1', 'setup');
+
+ CREATE TABLE target_tg (key int primary key, balance integer, status text, val text);
+ CREATE FUNCTION target_tg_trig_fn() RETURNS trigger LANGUAGE plpgsql AS
+ $$
+ BEGIN
+ IF tg_op = 'INSERT' THEN
+ RAISE NOTICE 'Insert: %', NEW;
+ RETURN NEW;
+ ELSIF tg_op = 'UPDATE' THEN
+ RAISE NOTICE 'Update: % -> %', OLD, NEW;
+ RETURN NEW;
+ ELSE
+ RAISE NOTICE 'Delete: %', OLD;
+ RETURN OLD;
+ END IF;
+ END
+ $$;
+ CREATE TRIGGER target_tg_trig BEFORE INSERT OR UPDATE OR DELETE ON target_tg
+ FOR EACH ROW EXECUTE FUNCTION target_tg_trig_fn();
+ INSERT INTO target_tg VALUES (1, 160, 's1', 'setup');
+}
+
+teardown
+{
+ DROP TABLE target;
+ DROP TABLE target_pa;
+ DROP TABLE target_tg;
+ DROP FUNCTION target_tg_trig_fn;
+}
+
+session "s1"
+setup
+{
+ BEGIN ISOLATION LEVEL READ COMMITTED;
+}
+step "merge_status"
+{
+ MERGE INTO target t
+ USING (SELECT 1 as key) s
+ ON s.key = t.key
+ WHEN MATCHED AND status = 's1' THEN
+ UPDATE SET status = 's2', val = t.val || ' when1'
+ WHEN MATCHED AND status = 's2' THEN
+ UPDATE SET status = 's3', val = t.val || ' when2'
+ WHEN MATCHED AND status = 's3' THEN
+ UPDATE SET status = 's4', val = t.val || ' when3';
+}
+step "merge_status_tg"
+{
+ MERGE INTO target_tg t
+ USING (SELECT 1 as key) s
+ ON s.key = t.key
+ WHEN MATCHED AND status = 's1' THEN
+ UPDATE SET status = 's2', val = t.val || ' when1'
+ WHEN MATCHED AND status = 's2' THEN
+ UPDATE SET status = 's3', val = t.val || ' when2'
+ WHEN MATCHED AND status = 's3' THEN
+ UPDATE SET status = 's4', val = t.val || ' when3';
+}
+
+step "merge_bal"
+{
+ MERGE INTO target t
+ USING (SELECT 1 as key) s
+ ON s.key = t.key
+ WHEN MATCHED AND balance < 100 THEN
+ UPDATE SET balance = balance * 2, val = t.val || ' when1'
+ WHEN MATCHED AND balance < 200 THEN
+ UPDATE SET balance = balance * 4, val = t.val || ' when2'
+ WHEN MATCHED AND balance < 300 THEN
+ UPDATE SET balance = balance * 8, val = t.val || ' when3';
+}
+step "merge_bal_pa"
+{
+ MERGE INTO target_pa t
+ USING (SELECT 1 as key) s
+ ON s.key = t.key
+ WHEN MATCHED AND balance < 100 THEN
+ UPDATE SET balance = balance * 2, val = t.val || ' when1'
+ WHEN MATCHED AND balance < 200 THEN
+ UPDATE SET balance = balance * 4, val = t.val || ' when2'
+ WHEN MATCHED AND balance < 300 THEN
+ UPDATE SET balance = balance * 8, val = t.val || ' when3';
+}
+step "merge_bal_tg"
+{
+ MERGE INTO target_tg t
+ USING (SELECT 1 as key) s
+ ON s.key = t.key
+ WHEN MATCHED AND balance < 100 THEN
+ UPDATE SET balance = balance * 2, val = t.val || ' when1'
+ WHEN MATCHED AND balance < 200 THEN
+ UPDATE SET balance = balance * 4, val = t.val || ' when2'
+ WHEN MATCHED AND balance < 300 THEN
+ UPDATE SET balance = balance * 8, val = t.val || ' when3';
+}
+
+step "merge_delete"
+{
+ MERGE INTO target t
+ USING (SELECT 1 as key) s
+ ON s.key = t.key
+ WHEN MATCHED AND balance < 100 THEN
+ UPDATE SET balance = balance * 2, val = t.val || ' when1'
+ WHEN MATCHED AND balance < 200 THEN
+ DELETE;
+}
+step "merge_delete_tg"
+{
+ MERGE INTO target_tg t
+ USING (SELECT 1 as key) s
+ ON s.key = t.key
+ WHEN MATCHED AND balance < 100 THEN
+ UPDATE SET balance = balance * 2, val = t.val || ' when1'
+ WHEN MATCHED AND balance < 200 THEN
+ DELETE;
+}
+
+step "select1" { SELECT * FROM target; }
+step "select1_pa" { SELECT * FROM target_pa; }
+step "select1_tg" { SELECT * FROM target_tg; }
+step "c1" { COMMIT; }
+
+session "s2"
+setup
+{
+ BEGIN ISOLATION LEVEL READ COMMITTED;
+}
+step "update1" { UPDATE target t SET balance = balance + 10, val = t.val || ' updated by update1' WHERE t.key = 1; }
+step "update1_tg" { UPDATE target_tg t SET balance = balance + 10, val = t.val || ' updated by update1_tg' WHERE t.key = 1; }
+step "update2" { UPDATE target t SET status = 's2', val = t.val || ' updated by update2' WHERE t.key = 1; }
+step "update2_tg" { UPDATE target_tg t SET status = 's2', val = t.val || ' updated by update2_tg' WHERE t.key = 1; }
+step "update3" { UPDATE target t SET status = 's3', val = t.val || ' updated by update3' WHERE t.key = 1; }
+step "update3_tg" { UPDATE target_tg t SET status = 's3', val = t.val || ' updated by update3_tg' WHERE t.key = 1; }
+step "update5" { UPDATE target t SET status = 's5', val = t.val || ' updated by update5' WHERE t.key = 1; }
+step "update5_tg" { UPDATE target_tg t SET status = 's5', val = t.val || ' updated by update5_tg' WHERE t.key = 1; }
+step "update_bal1" { UPDATE target t SET balance = 50, val = t.val || ' updated by update_bal1' WHERE t.key = 1; }
+step "update_bal1_pa" { UPDATE target_pa t SET balance = 50, val = t.val || ' updated by update_bal1_pa' WHERE t.key = 1; }
+step "update_bal1_tg" { UPDATE target_tg t SET balance = 50, val = t.val || ' updated by update_bal1_tg' WHERE t.key = 1; }
+step "c2" { COMMIT; }
+
+# merge_status sees concurrently updated row and rechecks WHEN conditions, but recheck passes and final status = 's2'
+permutation "update1" "merge_status" "c2" "select1" "c1"
+permutation "update1_tg" "merge_status_tg" "c2" "select1_tg" "c1"
+
+# merge_status sees concurrently updated row and rechecks WHEN conditions, recheck fails, so final status = 's3' not 's2'
+permutation "update2" "merge_status" "c2" "select1" "c1"
+permutation "update2_tg" "merge_status_tg" "c2" "select1_tg" "c1"
+
+# merge_status sees concurrently updated row and rechecks WHEN conditions, recheck fails, so final status = 's4' not 's2'
+permutation "update3" "merge_status" "c2" "select1" "c1"
+permutation "update3_tg" "merge_status_tg" "c2" "select1_tg" "c1"
+
+# merge_status sees concurrently updated row and rechecks WHEN conditions, recheck fails, but we skip update and MERGE does nothing
+permutation "update5" "merge_status" "c2" "select1" "c1"
+permutation "update5_tg" "merge_status_tg" "c2" "select1_tg" "c1"
+
+# merge_bal sees concurrently updated row and rechecks WHEN conditions, recheck fails, so final balance = 100 not 640
+permutation "update_bal1" "merge_bal" "c2" "select1" "c1"
+permutation "update_bal1_pa" "merge_bal_pa" "c2" "select1_pa" "c1"
+permutation "update_bal1_tg" "merge_bal_tg" "c2" "select1_tg" "c1"
+
+# merge_delete sees concurrently updated row and rechecks WHEN conditions, but recheck passes and row is deleted
+permutation "update1" "merge_delete" "c2" "select1" "c1"
+permutation "update1_tg" "merge_delete_tg" "c2" "select1_tg" "c1"
+
+# merge_delete sees concurrently updated row and rechecks WHEN conditions, recheck fails, so final balance is 100
+permutation "update_bal1" "merge_delete" "c2" "select1" "c1"
+permutation "update_bal1_tg" "merge_delete_tg" "c2" "select1_tg" "c1"
diff --git a/src/test/isolation/specs/merge-update.spec b/src/test/isolation/specs/merge-update.spec
new file mode 100644
index 0000000..e8d0166
--- /dev/null
+++ b/src/test/isolation/specs/merge-update.spec
@@ -0,0 +1,156 @@
+# MERGE UPDATE
+#
+# This test exercises atypical cases
+# 1. UPDATEs of PKs that change the join in the ON clause
+# 2. UPDATEs with WHEN conditions that would fail after concurrent update
+# 3. UPDATEs with extra ON conditions that would fail after concurrent update
+
+setup
+{
+ CREATE TABLE target (key int primary key, val text);
+ INSERT INTO target VALUES (1, 'setup1');
+
+ CREATE TABLE pa_target (key integer, val text)
+ PARTITION BY LIST (key);
+ CREATE TABLE part1 (key integer, val text);
+ CREATE TABLE part2 (val text, key integer);
+ CREATE TABLE part3 (key integer, val text);
+
+ ALTER TABLE pa_target ATTACH PARTITION part1 FOR VALUES IN (1,4);
+ ALTER TABLE pa_target ATTACH PARTITION part2 FOR VALUES IN (2,5,6);
+ ALTER TABLE pa_target ATTACH PARTITION part3 DEFAULT;
+
+ INSERT INTO pa_target VALUES (1, 'initial');
+ INSERT INTO pa_target VALUES (2, 'initial');
+}
+
+teardown
+{
+ DROP TABLE target;
+ DROP TABLE pa_target CASCADE;
+}
+
+session "s1"
+setup
+{
+ BEGIN ISOLATION LEVEL READ COMMITTED;
+}
+step "merge1"
+{
+ MERGE INTO target t
+ USING (SELECT 1 as key, 'merge1' as val) s
+ ON s.key = t.key
+ WHEN NOT MATCHED THEN
+ INSERT VALUES (s.key, s.val)
+ WHEN MATCHED THEN
+ UPDATE set key = t.key + 1, val = t.val || ' updated by ' || s.val;
+}
+step "pa_merge1"
+{
+ MERGE INTO pa_target t
+ USING (SELECT 1 as key, 'pa_merge1' as val) s
+ ON s.key = t.key
+ WHEN NOT MATCHED THEN
+ INSERT VALUES (s.key, s.val)
+ WHEN MATCHED THEN
+ UPDATE set val = t.val || ' updated by ' || s.val;
+}
+step "pa_merge2"
+{
+ MERGE INTO pa_target t
+ USING (SELECT 1 as key, 'pa_merge2' as val) s
+ ON s.key = t.key
+ WHEN NOT MATCHED THEN
+ INSERT VALUES (s.key, s.val)
+ WHEN MATCHED THEN
+ UPDATE set key = t.key + 1, val = t.val || ' updated by ' || s.val;
+}
+step "pa_merge3"
+{
+ MERGE INTO pa_target t
+ USING (SELECT 1 as key, 'pa_merge2' as val) s
+ ON s.key = t.key
+ WHEN NOT MATCHED THEN
+ INSERT VALUES (s.key, s.val)
+ WHEN MATCHED THEN
+ UPDATE set val = 'prefix ' || t.val;
+}
+step "c1" { COMMIT; }
+step "a1" { ABORT; }
+
+session "s2"
+setup
+{
+ BEGIN ISOLATION LEVEL READ COMMITTED;
+}
+step "merge2a"
+{
+ MERGE INTO target t
+ USING (SELECT 1 as key, 'merge2a' as val) s
+ ON s.key = t.key
+ WHEN NOT MATCHED THEN
+ INSERT VALUES (s.key, s.val)
+ WHEN MATCHED THEN
+ UPDATE set key = t.key + 1, val = t.val || ' updated by ' || s.val;
+}
+step "merge2b"
+{
+ MERGE INTO target t
+ USING (SELECT 1 as key, 'merge2b' as val) s
+ ON s.key = t.key
+ WHEN NOT MATCHED THEN
+ INSERT VALUES (s.key, s.val)
+ WHEN MATCHED AND t.key < 2 THEN
+ UPDATE set key = t.key + 1, val = t.val || ' updated by ' || s.val;
+}
+step "merge2c"
+{
+ MERGE INTO target t
+ USING (SELECT 1 as key, 'merge2c' as val) s
+ ON s.key = t.key AND t.key < 2
+ WHEN NOT MATCHED THEN
+ INSERT VALUES (s.key, s.val)
+ WHEN MATCHED THEN
+ UPDATE set key = t.key + 1, val = t.val || ' updated by ' || s.val;
+}
+step "pa_merge2a"
+{
+ MERGE INTO pa_target t
+ USING (SELECT 1 as key, 'pa_merge2a' as val) s
+ ON s.key = t.key
+ WHEN NOT MATCHED THEN
+ INSERT VALUES (s.key, s.val)
+ WHEN MATCHED THEN
+ UPDATE set key = t.key + 1, val = t.val || ' updated by ' || s.val;
+}
+# MERGE proceeds only if 'val' unchanged
+step "pa_merge2b_when"
+{
+ MERGE INTO pa_target t
+ USING (SELECT 1 as key, 'pa_merge2b_when' as val) s
+ ON s.key = t.key
+ WHEN NOT MATCHED THEN
+ INSERT VALUES (s.key, s.val)
+ WHEN MATCHED AND t.val like 'initial%' THEN
+ UPDATE set key = t.key + 1, val = t.val || ' updated by ' || s.val;
+}
+step "select2" { SELECT * FROM target; }
+step "pa_select2" { SELECT * FROM pa_target; }
+step "c2" { COMMIT; }
+
+# Basic effects
+permutation "merge1" "c1" "select2" "c2"
+
+# One after the other, no concurrency
+permutation "merge1" "c1" "merge2a" "select2" "c2"
+
+# Now with concurrency
+permutation "merge1" "merge2a" "c1" "select2" "c2"
+permutation "merge1" "merge2a" "a1" "select2" "c2"
+permutation "merge1" "merge2b" "c1" "select2" "c2"
+permutation "merge1" "merge2c" "c1" "select2" "c2"
+permutation "pa_merge1" "pa_merge2a" "c1" "pa_select2" "c2"
+permutation "pa_merge2" "pa_merge2a" "c1" "pa_select2" "c2" # fails
+permutation "pa_merge2" "c1" "pa_merge2a" "pa_select2" "c2" # succeeds
+permutation "pa_merge3" "pa_merge2b_when" "c1" "pa_select2" "c2" # WHEN not satisfied by updated tuple
+permutation "pa_merge1" "pa_merge2b_when" "c1" "pa_select2" "c2" # WHEN satisfied by updated tuple
diff --git a/src/test/isolation/specs/multiple-cic.spec b/src/test/isolation/specs/multiple-cic.spec
new file mode 100644
index 0000000..e34a6b0
--- /dev/null
+++ b/src/test/isolation/specs/multiple-cic.spec
@@ -0,0 +1,43 @@
+# Test multiple CREATE INDEX CONCURRENTLY working simultaneously
+
+setup
+{
+ CREATE TABLE mcic_one (
+ id int
+ );
+ CREATE TABLE mcic_two (
+ id int
+ );
+ CREATE FUNCTION lck_shr(bigint) RETURNS bool IMMUTABLE LANGUAGE plpgsql AS $$
+ BEGIN PERFORM pg_advisory_lock_shared($1); RETURN true; END;
+ $$;
+ CREATE FUNCTION unlck() RETURNS bool IMMUTABLE LANGUAGE plpgsql AS $$
+ BEGIN PERFORM pg_advisory_unlock_all(); RETURN true; END;
+ $$;
+}
+teardown
+{
+ DROP TABLE mcic_one, mcic_two;
+ DROP FUNCTION lck_shr(bigint);
+ DROP FUNCTION unlck();
+}
+
+session s1
+step s1i {
+ CREATE INDEX CONCURRENTLY mcic_one_pkey ON mcic_one (id)
+ WHERE lck_shr(281457);
+ }
+teardown { SELECT unlck(); }
+
+
+session s2
+step s2l { SELECT pg_advisory_lock(281457); }
+step s2i {
+ CREATE INDEX CONCURRENTLY mcic_two_pkey ON mcic_two (id)
+ WHERE unlck();
+ }
+
+# (*) marker ensures that s2i is reported as "waiting", even if it
+# completes very quickly
+
+permutation s2l s1i s2i(*)
diff --git a/src/test/isolation/specs/multiple-row-versions.spec b/src/test/isolation/specs/multiple-row-versions.spec
new file mode 100644
index 0000000..0779ea0
--- /dev/null
+++ b/src/test/isolation/specs/multiple-row-versions.spec
@@ -0,0 +1,47 @@
+# Multiple Row Versions test
+#
+# This test is designed to cover some code paths which only occur with
+# four or more transactions interacting with particular timings.
+#
+# Due to long permutation setup time, we are only testing one specific
+# permutation, which should get a serialization error.
+
+setup
+{
+ CREATE TABLE t (id int NOT NULL, txt text) WITH (fillfactor=50);
+ INSERT INTO t (id)
+ SELECT x FROM (SELECT * FROM generate_series(1, 1000000)) a(x);
+ ALTER TABLE t ADD PRIMARY KEY (id);
+}
+
+teardown
+{
+ DROP TABLE t;
+}
+
+session s1
+setup { BEGIN ISOLATION LEVEL SERIALIZABLE; }
+step rx1 { SELECT * FROM t WHERE id = 1000000; }
+# delay until after T3 commits
+step wz1 { UPDATE t SET txt = 'a' WHERE id = 1; }
+step c1 { COMMIT; }
+
+session s2
+setup { BEGIN ISOLATION LEVEL SERIALIZABLE; }
+step wx2 { UPDATE t SET txt = 'b' WHERE id = 1000000; }
+step c2 { COMMIT; }
+
+session s3
+setup { BEGIN ISOLATION LEVEL SERIALIZABLE; }
+step wx3 { UPDATE t SET txt = 'c' WHERE id = 1000000; }
+step ry3 { SELECT * FROM t WHERE id = 500000; }
+# delay until after T4 commits
+step c3 { COMMIT; }
+
+session s4
+setup { BEGIN ISOLATION LEVEL SERIALIZABLE; }
+step wy4 { UPDATE t SET txt = 'd' WHERE id = 500000; }
+step rz4 { SELECT * FROM t WHERE id = 1; }
+step c4 { COMMIT; }
+
+permutation rx1 wx2 c2 wx3 ry3 wy4 rz4 c4 c3 wz1 c1
diff --git a/src/test/isolation/specs/multixact-no-deadlock.spec b/src/test/isolation/specs/multixact-no-deadlock.spec
new file mode 100644
index 0000000..a8af724
--- /dev/null
+++ b/src/test/isolation/specs/multixact-no-deadlock.spec
@@ -0,0 +1,35 @@
+# If we already hold a lock of a given strength, do not deadlock when
+# some other transaction is waiting for a conflicting lock and we try
+# to acquire the same lock we already held.
+setup
+{
+ CREATE TABLE justthis (
+ value int
+ );
+
+ INSERT INTO justthis VALUES (1);
+}
+
+teardown
+{
+ DROP TABLE justthis;
+}
+
+session s1
+setup { BEGIN; }
+step s1lock { SELECT * FROM justthis FOR SHARE; }
+step s1svpt { SAVEPOINT foo; }
+step s1lock2 { SELECT * FROM justthis FOR SHARE; }
+step s1c { COMMIT; }
+
+session s2
+setup { BEGIN; }
+step s2lock { SELECT * FROM justthis FOR SHARE; } # ensure it's a multi
+step s2c { COMMIT; }
+
+session s3
+setup { BEGIN; }
+step s3lock { SELECT * FROM justthis FOR UPDATE; }
+step s3c { COMMIT; }
+
+permutation s1lock s2lock s1svpt s3lock s1lock2 s2c s1c s3c
diff --git a/src/test/isolation/specs/multixact-no-forget.spec b/src/test/isolation/specs/multixact-no-forget.spec
new file mode 100644
index 0000000..7f8a38b
--- /dev/null
+++ b/src/test/isolation/specs/multixact-no-forget.spec
@@ -0,0 +1,44 @@
+# If transaction A holds a lock, and transaction B does an update,
+# make sure we don't forget the lock if B aborts.
+setup
+{
+ CREATE TABLE dont_forget (
+ value int
+ );
+
+ INSERT INTO dont_forget VALUES (1);
+}
+
+teardown
+{
+ DROP TABLE dont_forget;
+}
+
+session s1
+setup { BEGIN; }
+step s1_show { SELECT current_setting('default_transaction_isolation') <> 'read committed'; }
+step s1_lock { SELECT * FROM dont_forget FOR KEY SHARE; }
+step s1_commit { COMMIT; }
+
+session s2
+setup { BEGIN; }
+step s2_update { UPDATE dont_forget SET value = 2; }
+step s2_abort { ROLLBACK; }
+step s2_commit { COMMIT; }
+
+session s3
+# try cases with both a non-conflicting lock with s1's and a conflicting one
+step s3_forkeyshr { SELECT * FROM dont_forget FOR KEY SHARE; }
+step s3_fornokeyupd { SELECT * FROM dont_forget FOR NO KEY UPDATE; }
+step s3_forupd { SELECT * FROM dont_forget FOR UPDATE; }
+
+permutation s1_show s1_commit s2_commit
+permutation s1_lock s2_update s2_abort s3_forkeyshr s1_commit
+permutation s1_lock s2_update s2_commit s3_forkeyshr s1_commit
+permutation s1_lock s2_update s1_commit s3_forkeyshr s2_commit
+permutation s1_lock s2_update s2_abort s3_fornokeyupd s1_commit
+permutation s1_lock s2_update s2_commit s3_fornokeyupd s1_commit
+permutation s1_lock s2_update s1_commit s3_fornokeyupd s2_commit
+permutation s1_lock s2_update s2_abort s3_forupd s1_commit
+permutation s1_lock s2_update s2_commit s3_forupd s1_commit
+permutation s1_lock s2_update s1_commit s3_forupd s2_commit
diff --git a/src/test/isolation/specs/nowait-2.spec b/src/test/isolation/specs/nowait-2.spec
new file mode 100644
index 0000000..cf892f2
--- /dev/null
+++ b/src/test/isolation/specs/nowait-2.spec
@@ -0,0 +1,37 @@
+# Test NOWAIT with multixact locks.
+
+setup
+{
+ CREATE TABLE foo (
+ id int PRIMARY KEY,
+ data text NOT NULL
+ );
+ INSERT INTO foo VALUES (1, 'x');
+}
+
+teardown
+{
+ DROP TABLE foo;
+}
+
+session s1
+setup { BEGIN; }
+step s1a { SELECT * FROM foo FOR SHARE NOWAIT; }
+step s1b { COMMIT; }
+
+session s2
+setup { BEGIN; }
+step s2a { SELECT * FROM foo FOR SHARE NOWAIT; }
+step s2b { SELECT * FROM foo FOR UPDATE NOWAIT; }
+step s2c { COMMIT; }
+
+# s1 and s2 both get SHARE lock, creating a multixact lock, then s2
+# tries to upgrade to UPDATE but aborts because it cannot acquire a
+# multi-xact lock
+permutation s1a s2a s2b s1b s2c
+# the same but with the SHARE locks acquired in a different order, so
+# s2 again aborts because it can't acquired a multi-xact lock
+permutation s2a s1a s2b s1b s2c
+# s2 acquires SHARE then UPDATE, then s1 tries to acquire SHARE but
+# can't so aborts because it can't acquire a regular lock
+permutation s2a s2b s1a s1b s2c
diff --git a/src/test/isolation/specs/nowait-3.spec b/src/test/isolation/specs/nowait-3.spec
new file mode 100644
index 0000000..06fb762
--- /dev/null
+++ b/src/test/isolation/specs/nowait-3.spec
@@ -0,0 +1,33 @@
+# Test NOWAIT with tuple locks.
+
+setup
+{
+ CREATE TABLE foo (
+ id int PRIMARY KEY,
+ data text NOT NULL
+ );
+ INSERT INTO foo VALUES (1, 'x');
+}
+
+teardown
+{
+ DROP TABLE foo;
+}
+
+session s1
+setup { BEGIN; }
+step s1a { SELECT * FROM foo FOR UPDATE; }
+step s1b { COMMIT; }
+
+session s2
+setup { BEGIN; }
+step s2a { SELECT * FROM foo FOR UPDATE; }
+step s2b { COMMIT; }
+
+session s3
+setup { BEGIN; }
+step s3a { SELECT * FROM foo FOR UPDATE NOWAIT; }
+step s3b { COMMIT; }
+
+# s3 skips to second record due to tuple lock held by s2
+permutation s1a s2a s3a s1b s2b s3b
diff --git a/src/test/isolation/specs/nowait-4.spec b/src/test/isolation/specs/nowait-4.spec
new file mode 100644
index 0000000..da80330
--- /dev/null
+++ b/src/test/isolation/specs/nowait-4.spec
@@ -0,0 +1,35 @@
+# Test NOWAIT with an updated tuple chain.
+
+setup
+{
+ CREATE TABLE foo (
+ id int PRIMARY KEY,
+ data text NOT NULL
+ );
+ INSERT INTO foo VALUES (1, 'x');
+}
+
+teardown
+{
+ DROP TABLE foo;
+}
+
+session s1
+setup { BEGIN; }
+step s1a { SELECT * FROM foo WHERE pg_advisory_lock(0) IS NOT NULL FOR UPDATE NOWAIT; }
+step s1b { COMMIT; }
+
+session s2
+step s2a { SELECT pg_advisory_lock(0); }
+step s2b { UPDATE foo SET data = data; }
+step s2c { BEGIN; }
+step s2d { UPDATE foo SET data = data; }
+step s2e { SELECT pg_advisory_unlock(0); }
+step s2f { COMMIT; }
+
+# s1 takes a snapshot but then waits on an advisory lock, then s2
+# updates the row in one transaction, then again in another without
+# committing, before allowing s1 to proceed to try to lock a row;
+# because it has a snapshot that sees the older version, we reach the
+# waiting code in EvalPlanQualFetch which ereports when in NOWAIT mode.
+permutation s2a s1a s2b s2c s2d s2e s1b s2f
diff --git a/src/test/isolation/specs/nowait-5.spec b/src/test/isolation/specs/nowait-5.spec
new file mode 100644
index 0000000..46108de
--- /dev/null
+++ b/src/test/isolation/specs/nowait-5.spec
@@ -0,0 +1,57 @@
+# Test NOWAIT on an updated tuple chain
+
+setup
+{
+
+ DROP TABLE IF EXISTS test_nowait;
+ CREATE TABLE test_nowait (
+ id integer PRIMARY KEY,
+ value integer not null
+ );
+
+ INSERT INTO test_nowait
+ SELECT x,x FROM generate_series(1,2) x;
+}
+
+teardown
+{
+ DROP TABLE test_nowait;
+}
+
+session sl1
+step sl1_prep {
+ PREPARE sl1_run AS SELECT id FROM test_nowait WHERE pg_advisory_lock(0) is not null FOR UPDATE NOWAIT;
+}
+step sl1_exec {
+ BEGIN ISOLATION LEVEL READ COMMITTED;
+ EXECUTE sl1_run;
+ SELECT xmin, xmax, ctid, * FROM test_nowait;
+}
+teardown { COMMIT; }
+
+# A session that's used for an UPDATE of the rows to be locked, for when we're testing ctid
+# chain following.
+session upd
+step upd_getlock {
+ SELECT pg_advisory_lock(0);
+}
+step upd_doupdate {
+ BEGIN ISOLATION LEVEL READ COMMITTED;
+ UPDATE test_nowait SET value = value WHERE id % 2 = 0;
+ COMMIT;
+}
+step upd_releaselock {
+ SELECT pg_advisory_unlock(0);
+}
+
+# A session that acquires locks that sl1 is supposed to avoid blocking on
+session lk1
+step lk1_doforshare {
+ BEGIN ISOLATION LEVEL READ COMMITTED;
+ SELECT id FROM test_nowait WHERE id % 2 = 0 FOR SHARE;
+}
+teardown {
+ COMMIT;
+}
+
+permutation sl1_prep upd_getlock sl1_exec upd_doupdate lk1_doforshare upd_releaselock
diff --git a/src/test/isolation/specs/nowait.spec b/src/test/isolation/specs/nowait.spec
new file mode 100644
index 0000000..a75e54c
--- /dev/null
+++ b/src/test/isolation/specs/nowait.spec
@@ -0,0 +1,25 @@
+# Test NOWAIT when regular row locks can't be acquired.
+
+setup
+{
+ CREATE TABLE foo (
+ id int PRIMARY KEY,
+ data text NOT NULL
+ );
+ INSERT INTO foo VALUES (1, 'x');
+}
+
+teardown
+{
+ DROP TABLE foo;
+}
+
+session s1
+setup { BEGIN; }
+step s1a { SELECT * FROM foo FOR UPDATE NOWAIT; }
+step s1b { COMMIT; }
+
+session s2
+setup { BEGIN; }
+step s2a { SELECT * FROM foo FOR UPDATE NOWAIT; }
+step s2b { COMMIT; }
diff --git a/src/test/isolation/specs/partial-index.spec b/src/test/isolation/specs/partial-index.spec
new file mode 100644
index 0000000..c033841
--- /dev/null
+++ b/src/test/isolation/specs/partial-index.spec
@@ -0,0 +1,32 @@
+# Partial Index test
+#
+# Make sure that an update which moves a row out of a partial index
+# is handled correctly. In early versions, an attempt at optimization
+# broke this behavior, allowing anomalies.
+#
+# Any overlap between the transactions must cause a serialization failure.
+
+setup
+{
+ create table test_t (id integer, val1 text, val2 integer);
+ create index test_idx on test_t(id) where val2 = 1;
+ insert into test_t (select generate_series(0, 10000), 'a', 2);
+ insert into test_t (select generate_series(0, 10), 'a', 1);
+}
+
+teardown
+{
+ DROP TABLE test_t;
+}
+
+session s1
+setup { BEGIN ISOLATION LEVEL SERIALIZABLE; }
+step rxy1 { select * from test_t where val2 = 1; }
+step wx1 { update test_t set val2 = 2 where val2 = 1 and id = 10; }
+step c1 { COMMIT; }
+
+session s2
+setup { BEGIN ISOLATION LEVEL SERIALIZABLE; }
+step wy2 { update test_t set val2 = 2 where val2 = 1 and id = 9; }
+step rxy2 { select * from test_t where val2 = 1; }
+step c2 { COMMIT; }
diff --git a/src/test/isolation/specs/partition-concurrent-attach.spec b/src/test/isolation/specs/partition-concurrent-attach.spec
new file mode 100644
index 0000000..fcd4dce
--- /dev/null
+++ b/src/test/isolation/specs/partition-concurrent-attach.spec
@@ -0,0 +1,43 @@
+# Verify that default partition constraint is enforced correctly
+# in light of partitions being added concurrently to its parent
+setup {
+ drop table if exists tpart;
+ create table tpart(i int, j text) partition by range(i);
+ create table tpart_1(like tpart);
+ create table tpart_2(like tpart);
+ create table tpart_default (a int, j text, i int) partition by list (j);
+ create table tpart_default_default (a int, i int, b int, j text);
+ alter table tpart_default_default drop b;
+ alter table tpart_default attach partition tpart_default_default default;
+ alter table tpart_default drop a;
+ alter table tpart attach partition tpart_default default;
+ alter table tpart attach partition tpart_1 for values from(0) to (100);
+ insert into tpart_2 values (110,'xxx'), (120, 'yyy'), (150, 'zzz');
+}
+
+session s1
+step s1b { begin; }
+step s1a { alter table tpart attach partition tpart_2 for values from (100) to (200); }
+step s1c { commit; }
+
+session s2
+step s2b { begin; }
+step s2i { insert into tpart values (110,'xxx'), (120, 'yyy'), (150, 'zzz'); }
+step s2i2 { insert into tpart_default (i, j) values (110, 'xxx'), (120, 'yyy'), (150, 'zzz'); }
+step s2c { commit; }
+step s2s { select tableoid::regclass, * from tpart; }
+
+teardown { drop table tpart; }
+
+# insert into tpart by s2 which routes to tpart_default due to not seeing
+# concurrently added tpart_2 should fail, because the partition constraint
+# of tpart_default would have changed due to tpart_2 having been added
+permutation s1b s1a s2b s2i s1c s2c s2s
+
+# similar to above, but now insert into sub-partitioned tpart_default
+permutation s1b s1a s2b s2i2 s1c s2c s2s
+
+# reverse: now the insert into tpart_default by s2 occurs first followed by
+# attach in s1, which should fail when it scans the leaf default partition
+# find the violating rows
+permutation s1b s2b s2i s1a s2c s1c s2s
diff --git a/src/test/isolation/specs/partition-drop-index-locking.spec b/src/test/isolation/specs/partition-drop-index-locking.spec
new file mode 100644
index 0000000..34e8b52
--- /dev/null
+++ b/src/test/isolation/specs/partition-drop-index-locking.spec
@@ -0,0 +1,47 @@
+# Verify that DROP INDEX properly locks all downward sub-partitions
+# and partitions before locking the indexes.
+
+setup
+{
+ CREATE TABLE part_drop_index_locking (id int) PARTITION BY RANGE(id);
+ CREATE TABLE part_drop_index_locking_subpart PARTITION OF part_drop_index_locking FOR VALUES FROM (1) TO (100) PARTITION BY RANGE(id);
+ CREATE TABLE part_drop_index_locking_subpart_child PARTITION OF part_drop_index_locking_subpart FOR VALUES FROM (1) TO (100);
+ CREATE INDEX part_drop_index_locking_idx ON part_drop_index_locking(id);
+ CREATE INDEX part_drop_index_locking_subpart_idx ON part_drop_index_locking_subpart(id);
+}
+
+teardown
+{
+ DROP TABLE part_drop_index_locking;
+}
+
+# SELECT will take AccessShare lock first on the table and then on its index.
+# We can simulate the case where DROP INDEX starts between those steps
+# by manually taking the table lock beforehand.
+session s1
+step s1begin { BEGIN; }
+step s1lock { LOCK TABLE part_drop_index_locking_subpart_child IN ACCESS SHARE MODE; }
+step s1select { SELECT * FROM part_drop_index_locking_subpart_child; }
+step s1commit { COMMIT; }
+
+session s2
+step s2begin { BEGIN; }
+step s2drop { DROP INDEX part_drop_index_locking_idx; }
+step s2dropsub { DROP INDEX part_drop_index_locking_subpart_idx; }
+step s2commit { COMMIT; }
+
+session s3
+step s3getlocks {
+ SELECT s.query, c.relname, l.mode, l.granted
+ FROM pg_locks l
+ JOIN pg_class c ON l.relation = c.oid
+ JOIN pg_stat_activity s ON l.pid = s.pid
+ WHERE c.relname LIKE 'part_drop_index_locking%'
+ ORDER BY s.query, c.relname, l.mode, l.granted;
+}
+
+# Run DROP INDEX on top partitioned table
+permutation s1begin s1lock s2begin s2drop(s1commit) s1select s3getlocks s1commit s3getlocks s2commit
+
+# Run DROP INDEX on top sub-partition table
+permutation s1begin s1lock s2begin s2dropsub(s1commit) s1select s3getlocks s1commit s3getlocks s2commit
diff --git a/src/test/isolation/specs/partition-key-update-1.spec b/src/test/isolation/specs/partition-key-update-1.spec
new file mode 100644
index 0000000..6b5f422
--- /dev/null
+++ b/src/test/isolation/specs/partition-key-update-1.spec
@@ -0,0 +1,86 @@
+# Test that an error if thrown if the target row has been moved to a
+# different partition by a concurrent session.
+
+setup
+{
+ --
+ -- Setup to test an error from ExecUpdate and ExecDelete.
+ --
+ CREATE TABLE foo (a int, b text) PARTITION BY LIST(a);
+ CREATE TABLE foo1 PARTITION OF foo FOR VALUES IN (1);
+ CREATE TABLE foo2 PARTITION OF foo FOR VALUES IN (2);
+ INSERT INTO foo VALUES (1, 'ABC');
+
+ --
+ -- Setup to test an error from GetTupleForTrigger
+ --
+ CREATE TABLE footrg (a int, b text) PARTITION BY LIST(a);
+ CREATE TABLE footrg1 PARTITION OF footrg FOR VALUES IN (1);
+ CREATE TABLE footrg2 PARTITION OF footrg FOR VALUES IN (2);
+ INSERT INTO footrg VALUES (1, 'ABC');
+ CREATE FUNCTION func_footrg_mod_a() RETURNS TRIGGER AS $$
+ BEGIN
+ NEW.a = 2; -- This is changing partition key column.
+ RETURN NEW;
+ END $$ LANGUAGE PLPGSQL;
+ CREATE TRIGGER footrg_mod_a BEFORE UPDATE ON footrg1
+ FOR EACH ROW EXECUTE PROCEDURE func_footrg_mod_a();
+
+ --
+ -- Setup to test an error from ExecLockRows
+ --
+ CREATE TABLE foo_range_parted (a int, b text) PARTITION BY RANGE(a);
+ CREATE TABLE foo_range_parted1 PARTITION OF foo_range_parted FOR VALUES FROM (1) TO (10);
+ CREATE TABLE foo_range_parted2 PARTITION OF foo_range_parted FOR VALUES FROM (10) TO (20);
+ INSERT INTO foo_range_parted VALUES(7, 'ABC');
+ CREATE UNIQUE INDEX foo_range_parted1_a_unique ON foo_range_parted1 (a);
+ CREATE TABLE bar (a int REFERENCES foo_range_parted1(a));
+}
+
+teardown
+{
+ DROP TABLE foo;
+ DROP TRIGGER footrg_mod_a ON footrg1;
+ DROP FUNCTION func_footrg_mod_a();
+ DROP TABLE footrg;
+ DROP TABLE bar, foo_range_parted;
+}
+
+session s1
+step s1b { BEGIN ISOLATION LEVEL READ COMMITTED; }
+step s1u { UPDATE foo SET a=2 WHERE a=1; }
+step s1u2 { UPDATE footrg SET b='EFG' WHERE a=1; }
+step s1u3pc { UPDATE foo_range_parted SET a=11 WHERE a=7; }
+step s1u3npc { UPDATE foo_range_parted SET b='XYZ' WHERE a=7; }
+step s1c { COMMIT; }
+step s1r { ROLLBACK; }
+
+session s2
+step s2b { BEGIN ISOLATION LEVEL READ COMMITTED; }
+step s2u { UPDATE foo SET b='EFG' WHERE a=1; }
+step s2u2 { UPDATE footrg SET b='XYZ' WHERE a=1; }
+step s2i { INSERT INTO bar VALUES(7); }
+step s2d { DELETE FROM foo WHERE a=1; }
+step s2c { COMMIT; }
+
+# Concurrency error from ExecUpdate and ExecDelete.
+permutation s1b s2b s1u s1c s2d s2c
+permutation s1b s2b s1u s2d s1c s2c
+permutation s1b s2b s1u s2u s1c s2c
+permutation s1b s2b s2d s1u s2c s1c
+
+# Concurrency error from GetTupleForTrigger
+permutation s1b s2b s1u2 s1c s2u2 s2c
+permutation s1b s2b s1u2 s2u2 s1c s2c
+permutation s1b s2b s2u2 s1u2 s2c s1c
+
+# Concurrency error from ExecLockRows
+# test waiting for moved row itself
+permutation s1b s2b s1u3pc s2i s1c s2c
+permutation s1b s2b s1u3pc s2i s1r s2c
+# test waiting for in-partition update, followed by cross-partition move
+permutation s1b s2b s1u3npc s1u3pc s2i s1c s2c
+permutation s1b s2b s1u3npc s1u3pc s2i s1r s2c
+# test waiting for in-partition update, followed by cross-partition move
+permutation s1b s2b s1u3npc s1u3pc s1u3pc s2i s1c s2c
+permutation s1b s2b s1u3npc s1u3pc s1u3pc s2i s1r s2c
diff --git a/src/test/isolation/specs/partition-key-update-2.spec b/src/test/isolation/specs/partition-key-update-2.spec
new file mode 100644
index 0000000..d4cd09b
--- /dev/null
+++ b/src/test/isolation/specs/partition-key-update-2.spec
@@ -0,0 +1,45 @@
+# Concurrent update of a partition key and INSERT...ON CONFLICT DO NOTHING test
+#
+# This test tries to expose problems with the interaction between concurrent
+# sessions during an update of the partition key and INSERT...ON CONFLICT DO
+# NOTHING on a partitioned table.
+#
+# The convention here is that session 1 moves row from one partition to
+# another due update of the partition key and session 2 always ends up
+# inserting, and session 3 always ends up doing nothing.
+#
+# Note: This test is slightly resemble to insert-conflict-do-nothing test.
+
+setup
+{
+ CREATE TABLE foo (a int primary key, b text) PARTITION BY LIST(a);
+ CREATE TABLE foo1 PARTITION OF foo FOR VALUES IN (1);
+ CREATE TABLE foo2 PARTITION OF foo FOR VALUES IN (2);
+ INSERT INTO foo VALUES (1, 'initial tuple');
+}
+
+teardown
+{
+ DROP TABLE foo;
+}
+
+session s1
+setup { BEGIN ISOLATION LEVEL READ COMMITTED; }
+step s1u { UPDATE foo SET a=2, b=b || ' -> moved by session-1' WHERE a=1; }
+step s1c { COMMIT; }
+
+session s2
+setup { BEGIN ISOLATION LEVEL READ COMMITTED; }
+step s2donothing { INSERT INTO foo VALUES(1, 'session-2 donothing') ON CONFLICT DO NOTHING; }
+step s2c { COMMIT; }
+
+session s3
+setup { BEGIN ISOLATION LEVEL READ COMMITTED; }
+step s3donothing { INSERT INTO foo VALUES(2, 'session-3 donothing') ON CONFLICT DO NOTHING; }
+step s3select { SELECT * FROM foo ORDER BY a; }
+step s3c { COMMIT; }
+
+# Regular case where one session block-waits on another to determine if it
+# should proceed with an insert or do nothing.
+permutation s1u s2donothing s3donothing s1c s2c s3select s3c
+permutation s2donothing s1u s3donothing s1c s2c s3select s3c
diff --git a/src/test/isolation/specs/partition-key-update-3.spec b/src/test/isolation/specs/partition-key-update-3.spec
new file mode 100644
index 0000000..d2883e3
--- /dev/null
+++ b/src/test/isolation/specs/partition-key-update-3.spec
@@ -0,0 +1,44 @@
+# Concurrent update of a partition key and INSERT...ON CONFLICT DO NOTHING
+# test on partitioned table with multiple rows in higher isolation levels.
+#
+# Note: This test is resemble to insert-conflict-do-nothing-2 test
+
+setup
+{
+ CREATE TABLE foo (a int primary key, b text) PARTITION BY LIST(a);
+ CREATE TABLE foo1 PARTITION OF foo FOR VALUES IN (1);
+ CREATE TABLE foo2 PARTITION OF foo FOR VALUES IN (2);
+ INSERT INTO foo VALUES (1, 'initial tuple');
+}
+
+teardown
+{
+ DROP TABLE foo;
+}
+
+session s1
+setup { BEGIN ISOLATION LEVEL READ COMMITTED; }
+step s1u { UPDATE foo SET a=2, b=b || ' -> moved by session-1' WHERE a=1; }
+step s1c { COMMIT; }
+
+session s2
+step s2beginrr { BEGIN ISOLATION LEVEL REPEATABLE READ; }
+step s2begins { BEGIN ISOLATION LEVEL SERIALIZABLE; }
+step s2donothing { INSERT INTO foo VALUES(1, 'session-2 donothing') ON CONFLICT DO NOTHING; }
+step s2c { COMMIT; }
+step s2select { SELECT * FROM foo ORDER BY a; }
+
+session s3
+step s3beginrr { BEGIN ISOLATION LEVEL REPEATABLE READ; }
+step s3begins { BEGIN ISOLATION LEVEL SERIALIZABLE; }
+step s3donothing { INSERT INTO foo VALUES(2, 'session-3 donothing'), (2, 'session-3 donothing2') ON CONFLICT DO NOTHING; }
+step s3c { COMMIT; }
+
+permutation s2beginrr s3beginrr s1u s2donothing s1c s2c s3donothing s3c s2select
+permutation s2beginrr s3beginrr s1u s3donothing s1c s3c s2donothing s2c s2select
+permutation s2beginrr s3beginrr s1u s2donothing s3donothing s1c s2c s3c s2select
+permutation s2beginrr s3beginrr s1u s3donothing s2donothing s1c s3c s2c s2select
+permutation s2begins s3begins s1u s2donothing s1c s2c s3donothing s3c s2select
+permutation s2begins s3begins s1u s3donothing s1c s3c s2donothing s2c s2select
+permutation s2begins s3begins s1u s2donothing s3donothing s1c s2c s3c s2select
+permutation s2begins s3begins s1u s3donothing s2donothing s1c s3c s2c s2select
diff --git a/src/test/isolation/specs/partition-key-update-4.spec b/src/test/isolation/specs/partition-key-update-4.spec
new file mode 100644
index 0000000..6c70816
--- /dev/null
+++ b/src/test/isolation/specs/partition-key-update-4.spec
@@ -0,0 +1,76 @@
+# Test that a row that ends up in a new partition contains changes made by
+# a concurrent transaction.
+
+setup
+{
+ --
+ -- Setup to test concurrent handling of ExecDelete().
+ --
+ CREATE TABLE foo (a int, b text) PARTITION BY LIST(a);
+ CREATE TABLE foo1 PARTITION OF foo FOR VALUES IN (1);
+ CREATE TABLE foo2 PARTITION OF foo FOR VALUES IN (2);
+ INSERT INTO foo VALUES (1, 'ABC');
+
+ --
+ -- Setup to test concurrent handling of GetTupleForTrigger().
+ --
+ CREATE TABLE footrg (a int, b text) PARTITION BY LIST(a);
+ CREATE TABLE triglog as select * from footrg;
+ CREATE TABLE footrg1 PARTITION OF footrg FOR VALUES IN (1);
+ CREATE TABLE footrg2 PARTITION OF footrg FOR VALUES IN (2);
+ INSERT INTO footrg VALUES (1, 'ABC');
+ CREATE FUNCTION func_footrg() RETURNS TRIGGER AS $$
+ BEGIN
+ OLD.b = OLD.b || ' trigger';
+
+ -- This will verify that the trigger is not run *before* the row is
+ -- refetched by EvalPlanQual. The OLD row should contain the changes made
+ -- by the concurrent session.
+ INSERT INTO triglog select OLD.*;
+
+ RETURN OLD;
+ END $$ LANGUAGE PLPGSQL;
+ CREATE TRIGGER footrg_ondel BEFORE DELETE ON footrg1
+ FOR EACH ROW EXECUTE PROCEDURE func_footrg();
+
+}
+
+teardown
+{
+ DROP TABLE foo;
+ DROP TRIGGER footrg_ondel ON footrg1;
+ DROP FUNCTION func_footrg();
+ DROP TABLE footrg;
+ DROP TABLE triglog;
+}
+
+session s1
+step s1b { BEGIN ISOLATION LEVEL READ COMMITTED; }
+step s1u { UPDATE foo SET a = a + 1, b = b || ' update1' WHERE b like '%ABC%'; }
+step s1ut { UPDATE footrg SET a = a + 1, b = b || ' update1' WHERE b like '%ABC%'; }
+step s1s { SELECT tableoid::regclass, * FROM foo ORDER BY a; }
+step s1st { SELECT tableoid::regclass, * FROM footrg ORDER BY a; }
+step s1stl { SELECT * FROM triglog ORDER BY a; }
+step s1c { COMMIT; }
+
+session s2
+step s2b { BEGIN ISOLATION LEVEL READ COMMITTED; }
+step s2u1 { UPDATE foo SET b = b || ' update2' WHERE a = 1; }
+step s2u2 { UPDATE foo SET b = 'EFG' WHERE a = 1; }
+step s2ut1 { UPDATE footrg SET b = b || ' update2' WHERE a = 1; }
+step s2ut2 { UPDATE footrg SET b = 'EFG' WHERE a = 1; }
+step s2c { COMMIT; }
+
+
+# Session s1 is moving a row into another partition, but is waiting for
+# another session s2 that is updating the original row. The row that ends up
+# in the new partition should contain the changes made by session s2.
+permutation s1b s2b s2u1 s1u s2c s1c s1s
+
+# Same as above, except, session s1 is waiting in GetTupleForTrigger().
+permutation s1b s2b s2ut1 s1ut s2c s1c s1st s1stl
+
+# Below two cases are similar to the above two; except that the session s1
+# fails EvalPlanQual() test, so partition key update does not happen.
+permutation s1b s2b s2u2 s1u s2c s1c s1s
+permutation s1b s2b s2ut2 s1ut s2c s1c s1st s1stl
diff --git a/src/test/isolation/specs/plpgsql-toast.spec b/src/test/isolation/specs/plpgsql-toast.spec
new file mode 100644
index 0000000..bb444fc
--- /dev/null
+++ b/src/test/isolation/specs/plpgsql-toast.spec
@@ -0,0 +1,178 @@
+# Test TOAST behavior in PL/pgSQL procedures with transaction control.
+#
+# We need to ensure that values stored in PL/pgSQL variables are free
+# of external TOAST references, because those could disappear after a
+# transaction is committed (leading to errors "missing chunk number
+# ... for toast value ..."). The tests here do this by running VACUUM
+# in a second session. Advisory locks are used to have the VACUUM
+# kick in at the right time. The different "assign" steps test
+# different code paths for variable assignments in PL/pgSQL.
+
+setup
+{
+ CREATE TABLE test1 (a int, b text);
+ ALTER TABLE test1 ALTER COLUMN b SET STORAGE EXTERNAL;
+ INSERT INTO test1 VALUES (1, repeat('foo', 2000));
+ CREATE TYPE test2 AS (a bigint, b text);
+}
+
+teardown
+{
+ DROP TABLE test1;
+ DROP TYPE test2;
+}
+
+session s1
+
+setup
+{
+ SELECT pg_advisory_unlock_all();
+}
+
+# assign_simple_var()
+step assign1
+{
+do $$
+ declare
+ x text;
+ begin
+ select test1.b into x from test1;
+ delete from test1;
+ commit;
+ perform pg_advisory_lock(1);
+ raise notice 'length(x) = %', length(x);
+ end;
+$$;
+}
+
+# assign_simple_var()
+step assign2
+{
+do $$
+ declare
+ x text;
+ begin
+ x := (select test1.b from test1);
+ delete from test1;
+ commit;
+ perform pg_advisory_lock(1);
+ raise notice 'length(x) = %', length(x);
+ end;
+$$;
+}
+
+# expanded_record_set_field()
+step assign3
+{
+do $$
+ declare
+ r record;
+ begin
+ select * into r from test1;
+ r.b := (select test1.b from test1);
+ delete from test1;
+ commit;
+ perform pg_advisory_lock(1);
+ raise notice 'length(r) = %', length(r::text);
+ end;
+$$;
+}
+
+# expanded_record_set_fields()
+step assign4
+{
+do $$
+ declare
+ r test2;
+ begin
+ select * into r from test1;
+ delete from test1;
+ commit;
+ perform pg_advisory_lock(1);
+ raise notice 'length(r) = %', length(r::text);
+ end;
+$$;
+}
+
+# expanded_record_set_tuple()
+step assign5
+{
+do $$
+ declare
+ r record;
+ begin
+ for r in select test1.b from test1 loop
+ null;
+ end loop;
+ delete from test1;
+ commit;
+ perform pg_advisory_lock(1);
+ raise notice 'length(r) = %', length(r::text);
+ end;
+$$;
+}
+
+# FOR loop must not hold any fetched-but-not-detoasted values across commit
+step assign6
+{
+do $$
+ declare
+ r record;
+ begin
+ insert into test1 values (2, repeat('bar', 3000));
+ insert into test1 values (3, repeat('baz', 4000));
+ for r in select test1.b from test1 loop
+ delete from test1;
+ commit;
+ perform pg_advisory_lock(1);
+ raise notice 'length(r) = %', length(r::text);
+ end loop;
+ end;
+$$;
+}
+
+# Check that the results of a query can be detoasted just after committing
+# (there's no interaction with VACUUM here)
+step "fetch-after-commit"
+{
+do $$
+ declare
+ r record;
+ t text;
+ begin
+ insert into test1 values (2, repeat('bar', 3000));
+ insert into test1 values (3, repeat('baz', 4000));
+ for r in select test1.a from test1 loop
+ commit;
+ select b into t from test1 where a = r.a;
+ raise notice 'length(t) = %', length(t);
+ end loop;
+ end;
+$$;
+}
+
+session s2
+setup
+{
+ SELECT pg_advisory_unlock_all();
+}
+step lock
+{
+ SELECT pg_advisory_lock(1);
+}
+step vacuum
+{
+ VACUUM test1;
+}
+step unlock
+{
+ SELECT pg_advisory_unlock(1);
+}
+
+permutation lock assign1 vacuum unlock
+permutation lock assign2 vacuum unlock
+permutation lock assign3 vacuum unlock
+permutation lock assign4 vacuum unlock
+permutation lock assign5 vacuum unlock
+permutation lock assign6 vacuum unlock
+permutation "fetch-after-commit"
diff --git a/src/test/isolation/specs/predicate-gin.spec b/src/test/isolation/specs/predicate-gin.spec
new file mode 100644
index 0000000..e279eaa
--- /dev/null
+++ b/src/test/isolation/specs/predicate-gin.spec
@@ -0,0 +1,115 @@
+# Test for page level predicate locking in gin index
+#
+# Test to verify serialization failures and to check reduced false positives
+#
+# To verify serialization failures, queries and permutations are written in such
+# a way that an index scan (from one transaction) and an index insert (from
+# another transaction) will try to access the same part (sub-tree) of the index
+# whereas to check reduced false positives, they will try to access different
+# parts (sub-tree) of the index.
+
+
+setup
+{
+ create table gin_tbl(p int4[]);
+ insert into gin_tbl select array[1] from generate_series(1, 8192) g;
+ insert into gin_tbl select array[g] from generate_series(2, 800) g;
+ create index ginidx on gin_tbl using gin(p) with (fastupdate = off);
+ create table other_tbl(v int4);
+}
+
+teardown
+{
+ drop table gin_tbl;
+ drop table other_tbl;
+}
+
+session s1
+setup
+{
+ begin isolation level serializable;
+ set enable_seqscan=off;
+}
+
+step ra1 { select * from gin_tbl where p @> array[1] limit 1; }
+step rb1 { select count(*) from gin_tbl where p @> array[2]; }
+step rc1 { select count(*) from gin_tbl where p @> array[800]; }
+step rd1 { select count(*) from gin_tbl where p @> array[2000]; }
+
+step wo1 { insert into other_tbl values (1); }
+
+step c1 { commit; }
+
+session s2
+setup
+{
+ begin isolation level serializable;
+ set enable_seqscan=off;
+}
+
+step ro2 { select count(*) from other_tbl; }
+
+step wa2 { insert into gin_tbl values (array[1]); }
+step wb2 { insert into gin_tbl values (array[2]); }
+step wc2 { insert into gin_tbl values (array[800]); }
+step wd2 { insert into gin_tbl values (array[2000]); }
+
+step c2 { commit; }
+
+session s3
+step fu { alter index ginidx set (fastupdate = on); }
+
+# An index scan (from one transaction) and an index insert (from another
+# transaction) try to access the same part of the index. So, there is a
+# r-w conflict.
+
+permutation ra1 ro2 wo1 c1 wa2 c2
+permutation ro2 ra1 wo1 c1 wa2 c2
+permutation ro2 ra1 wo1 wa2 c1 c2
+permutation ra1 ro2 wa2 wo1 c1 c2
+
+permutation rb1 ro2 wo1 c1 wb2 c2
+permutation ro2 rb1 wo1 c1 wb2 c2
+permutation ro2 rb1 wo1 wb2 c1 c2
+permutation rb1 ro2 wb2 wo1 c1 c2
+
+permutation rc1 ro2 wo1 c1 wc2 c2
+permutation ro2 rc1 wo1 c1 wc2 c2
+permutation ro2 rc1 wo1 wc2 c1 c2
+permutation rc1 ro2 wc2 wo1 c1 c2
+
+# An index scan (from one transaction) and an index insert (from another
+# transaction) try to access different parts of the index. So, there is no
+# r-w conflict.
+
+permutation ra1 ro2 wo1 c1 wb2 c2
+permutation ro2 ra1 wo1 c1 wc2 c2
+permutation ro2 rb1 wo1 wa2 c1 c2
+permutation rc1 ro2 wa2 wo1 c1 c2
+
+permutation rb1 ro2 wo1 c1 wa2 c2
+permutation ro2 rb1 wo1 c1 wc2 c2
+permutation ro2 ra1 wo1 wb2 c1 c2
+permutation rc1 ro2 wb2 wo1 c1 c2
+
+permutation rc1 ro2 wo1 c1 wa2 c2
+permutation ro2 rc1 wo1 c1 wb2 c2
+permutation ro2 ra1 wo1 wc2 c1 c2
+permutation rb1 ro2 wc2 wo1 c1 c2
+
+# With fastupdate = on all index is under predicate lock. So we can't
+# distinguish particular keys.
+
+permutation fu ra1 ro2 wo1 c1 wa2 c2
+permutation fu ra1 ro2 wo1 c1 wb2 c2
+
+# Check fastupdate turned on concurrently.
+
+permutation ra1 ro2 wo1 c1 fu wa2 c2
+
+# Tests for conflicts with previously non-existing key
+
+permutation rd1 ro2 wo1 c1 wd2 c2
+permutation ro2 rd1 wo1 c1 wd2 c2
+permutation ro2 rd1 wo1 wd2 c1 c2
+permutation rd1 ro2 wd2 wo1 c1 c2
diff --git a/src/test/isolation/specs/predicate-gist.spec b/src/test/isolation/specs/predicate-gist.spec
new file mode 100644
index 0000000..9016c6e
--- /dev/null
+++ b/src/test/isolation/specs/predicate-gist.spec
@@ -0,0 +1,117 @@
+# Test for page level predicate locking in gist
+#
+# Test to verify serialization failures and to check reduced false positives
+#
+# To verify serialization failures, queries and permutations are written in such
+# a way that an index scan (from one transaction) and an index insert (from
+# another transaction) will try to access the same part (sub-tree) of the index
+# whereas to check reduced false positives, they will try to access different
+# parts (sub-tree) of the index.
+
+setup
+{
+ create table gist_point_tbl(id int4, p point);
+ create index gist_pointidx on gist_point_tbl using gist(p);
+ insert into gist_point_tbl (id, p)
+ select g, point(g*10, g*10) from generate_series(1, 1000) g;
+}
+
+teardown
+{
+ drop table gist_point_tbl;
+}
+
+session s1
+setup
+{
+ begin isolation level serializable;
+ set enable_seqscan=off;
+ set enable_bitmapscan=off;
+ set enable_indexonlyscan=on;
+}
+
+step rxy1 { select sum(p[0]) from gist_point_tbl where p << point(2500, 2500); }
+step wx1 { insert into gist_point_tbl (id, p)
+ select g, point(g*500, g*500) from generate_series(15, 20) g; }
+step rxy3 { select sum(p[0]) from gist_point_tbl where p >> point(6000,6000); }
+step wx3 { insert into gist_point_tbl (id, p)
+ select g, point(g*500, g*500) from generate_series(12, 18) g; }
+step c1 { commit; }
+
+
+session s2
+setup
+{
+ begin isolation level serializable;
+ set enable_seqscan=off;
+ set enable_bitmapscan=off;
+ set enable_indexonlyscan=on;
+}
+
+step rxy2 { select sum(p[0]) from gist_point_tbl where p >> point(7500,7500); }
+step wy2 { insert into gist_point_tbl (id, p)
+ select g, point(g*500, g*500) from generate_series(1, 5) g; }
+step rxy4 { select sum(p[0]) from gist_point_tbl where p << point(1000,1000); }
+step wy4 { insert into gist_point_tbl (id, p)
+ select g, point(g*50, g*50) from generate_series(1, 20) g; }
+step c2 { commit; }
+
+# An index scan (from one transaction) and an index insert (from another
+# transaction) try to access the same part of the index but one transaction
+# commits before other transaction begins so no r-w conflict.
+
+permutation rxy1 wx1 c1 rxy2 wy2 c2
+permutation rxy2 wy2 c2 rxy1 wx1 c1
+
+# An index scan (from one transaction) and an index insert (from another
+# transaction) try to access different parts of the index and also one
+# transaction commits before other transaction begins, so no r-w conflict.
+
+permutation rxy3 wx3 c1 rxy4 wy4 c2
+permutation rxy4 wy4 c2 rxy3 wx3 c1
+
+
+# An index scan (from one transaction) and an index insert (from another
+# transaction) try to access the same part of the index and one transaction
+# begins before other transaction commits so there is a r-w conflict.
+
+permutation rxy1 wx1 rxy2 c1 wy2 c2
+permutation rxy1 wx1 rxy2 wy2 c1 c2
+permutation rxy1 wx1 rxy2 wy2 c2 c1
+permutation rxy1 rxy2 wx1 c1 wy2 c2
+permutation rxy1 rxy2 wx1 wy2 c1 c2
+permutation rxy1 rxy2 wx1 wy2 c2 c1
+permutation rxy1 rxy2 wy2 wx1 c1 c2
+permutation rxy1 rxy2 wy2 wx1 c2 c1
+permutation rxy1 rxy2 wy2 c2 wx1 c1
+permutation rxy2 rxy1 wx1 c1 wy2 c2
+permutation rxy2 rxy1 wx1 wy2 c1 c2
+permutation rxy2 rxy1 wx1 wy2 c2 c1
+permutation rxy2 rxy1 wy2 wx1 c1 c2
+permutation rxy2 rxy1 wy2 wx1 c2 c1
+permutation rxy2 rxy1 wy2 c2 wx1 c1
+permutation rxy2 wy2 rxy1 wx1 c1 c2
+permutation rxy2 wy2 rxy1 wx1 c2 c1
+permutation rxy2 wy2 rxy1 c2 wx1 c1
+
+# An index scan (from one transaction) and an index insert (from another
+# transaction) try to access different parts of the index so no r-w conflict.
+
+permutation rxy3 wx3 rxy4 c1 wy4 c2
+permutation rxy3 wx3 rxy4 wy4 c1 c2
+permutation rxy3 wx3 rxy4 wy4 c2 c1
+permutation rxy3 rxy4 wx3 c1 wy4 c2
+permutation rxy3 rxy4 wx3 wy4 c1 c2
+permutation rxy3 rxy4 wx3 wy4 c2 c1
+permutation rxy3 rxy4 wy4 wx3 c1 c2
+permutation rxy3 rxy4 wy4 wx3 c2 c1
+permutation rxy3 rxy4 wy4 c2 wx3 c1
+permutation rxy4 rxy3 wx3 c1 wy4 c2
+permutation rxy4 rxy3 wx3 wy4 c1 c2
+permutation rxy4 rxy3 wx3 wy4 c2 c1
+permutation rxy4 rxy3 wy4 wx3 c1 c2
+permutation rxy4 rxy3 wy4 wx3 c2 c1
+permutation rxy4 rxy3 wy4 c2 wx3 c1
+permutation rxy4 wy4 rxy3 wx3 c1 c2
+permutation rxy4 wy4 rxy3 wx3 c2 c1
+permutation rxy4 wy4 rxy3 c2 wx3 c1
diff --git a/src/test/isolation/specs/predicate-hash.spec b/src/test/isolation/specs/predicate-hash.spec
new file mode 100644
index 0000000..7ca193b
--- /dev/null
+++ b/src/test/isolation/specs/predicate-hash.spec
@@ -0,0 +1,122 @@
+# Test for page level predicate locking in hash index
+#
+# Test to verify serialization failures and to check reduced false positives
+#
+# To verify serialization failures, queries and permutations are written in such
+# a way that an index scan (from one transaction) and an index insert (from
+# another transaction) will try to access the same bucket of the index
+# whereas to check reduced false positives, they will try to access different
+# buckets of the index.
+
+setup
+{
+ create table hash_tbl(id int4, p integer);
+ create index hash_idx on hash_tbl using hash(p);
+ insert into hash_tbl (id, p)
+ select g, 10 from generate_series(1, 10) g;
+ insert into hash_tbl (id, p)
+ select g, 20 from generate_series(11, 20) g;
+ insert into hash_tbl (id, p)
+ select g, 30 from generate_series(21, 30) g;
+ insert into hash_tbl (id, p)
+ select g, 40 from generate_series(31, 40) g;
+}
+
+teardown
+{
+ drop table hash_tbl;
+}
+
+session s1
+setup
+{
+ begin isolation level serializable;
+ set enable_seqscan=off;
+ set enable_bitmapscan=off;
+ set enable_indexonlyscan=on;
+}
+step rxy1 { select sum(p) from hash_tbl where p=20; }
+step wx1 { insert into hash_tbl (id, p)
+ select g, 30 from generate_series(41, 50) g; }
+step rxy3 { select sum(p) from hash_tbl where p=20; }
+step wx3 { insert into hash_tbl (id, p)
+ select g, 50 from generate_series(41, 50) g; }
+step c1 { commit; }
+
+
+session s2
+setup
+{
+ begin isolation level serializable;
+ set enable_seqscan=off;
+ set enable_bitmapscan=off;
+ set enable_indexonlyscan=on;
+}
+step rxy2 { select sum(p) from hash_tbl where p=30; }
+step wy2 { insert into hash_tbl (id, p)
+ select g, 20 from generate_series(51, 60) g; }
+step rxy4 { select sum(p) from hash_tbl where p=30; }
+step wy4 { insert into hash_tbl (id, p)
+ select g, 60 from generate_series(51, 60) g; }
+step c2 { commit; }
+
+
+# An index scan (from one transaction) and an index insert (from another
+# transaction) try to access the same bucket of the index but one transaction
+# commits before other transaction begins so no r-w conflict.
+
+permutation rxy1 wx1 c1 rxy2 wy2 c2
+permutation rxy2 wy2 c2 rxy1 wx1 c1
+
+# An index scan (from one transaction) and an index insert (from another
+# transaction) try to access different buckets of the index and also one
+# transaction commits before other transaction begins, so no r-w conflict.
+
+permutation rxy3 wx3 c1 rxy4 wy4 c2
+permutation rxy4 wy4 c2 rxy3 wx3 c1
+
+
+# An index scan (from one transaction) and an index insert (from another
+# transaction) try to access the same bucket of the index and one transaction
+# begins before other transaction commits so there is a r-w conflict.
+
+permutation rxy1 wx1 rxy2 c1 wy2 c2
+permutation rxy1 wx1 rxy2 wy2 c1 c2
+permutation rxy1 wx1 rxy2 wy2 c2 c1
+permutation rxy1 rxy2 wx1 c1 wy2 c2
+permutation rxy1 rxy2 wx1 wy2 c1 c2
+permutation rxy1 rxy2 wx1 wy2 c2 c1
+permutation rxy1 rxy2 wy2 wx1 c1 c2
+permutation rxy1 rxy2 wy2 wx1 c2 c1
+permutation rxy1 rxy2 wy2 c2 wx1 c1
+permutation rxy2 rxy1 wx1 c1 wy2 c2
+permutation rxy2 rxy1 wx1 wy2 c1 c2
+permutation rxy2 rxy1 wx1 wy2 c2 c1
+permutation rxy2 rxy1 wy2 wx1 c1 c2
+permutation rxy2 rxy1 wy2 wx1 c2 c1
+permutation rxy2 rxy1 wy2 c2 wx1 c1
+permutation rxy2 wy2 rxy1 wx1 c1 c2
+permutation rxy2 wy2 rxy1 wx1 c2 c1
+permutation rxy2 wy2 rxy1 c2 wx1 c1
+
+# An index scan (from one transaction) and an index insert (from another
+# transaction) try to access different buckets of the index so no r-w conflict.
+
+permutation rxy3 wx3 rxy4 c1 wy4 c2
+permutation rxy3 wx3 rxy4 wy4 c1 c2
+permutation rxy3 wx3 rxy4 wy4 c2 c1
+permutation rxy3 rxy4 wx3 c1 wy4 c2
+permutation rxy3 rxy4 wx3 wy4 c1 c2
+permutation rxy3 rxy4 wx3 wy4 c2 c1
+permutation rxy3 rxy4 wy4 wx3 c1 c2
+permutation rxy3 rxy4 wy4 wx3 c2 c1
+permutation rxy3 rxy4 wy4 c2 wx3 c1
+permutation rxy4 rxy3 wx3 c1 wy4 c2
+permutation rxy4 rxy3 wx3 wy4 c1 c2
+permutation rxy4 rxy3 wx3 wy4 c2 c1
+permutation rxy4 rxy3 wy4 wx3 c1 c2
+permutation rxy4 rxy3 wy4 wx3 c2 c1
+permutation rxy4 rxy3 wy4 c2 wx3 c1
+permutation rxy4 wy4 rxy3 wx3 c1 c2
+permutation rxy4 wy4 rxy3 wx3 c2 c1
+permutation rxy4 wy4 rxy3 c2 wx3 c1
diff --git a/src/test/isolation/specs/predicate-lock-hot-tuple.spec b/src/test/isolation/specs/predicate-lock-hot-tuple.spec
new file mode 100644
index 0000000..5b8aecc
--- /dev/null
+++ b/src/test/isolation/specs/predicate-lock-hot-tuple.spec
@@ -0,0 +1,37 @@
+# Test predicate locks on HOT updated tuples.
+#
+# This test has two serializable transactions. Both select two rows
+# from the table, and then update one of them.
+# If these were serialized (run one at a time), the transaction that
+# runs later would see one of the rows to be updated.
+#
+# Any overlap between the transactions must cause a serialization failure.
+# We used to have a bug in predicate locking HOT updated tuples, which
+# caused the conflict to be missed, if the row was HOT updated.
+
+setup
+{
+ CREATE TABLE test (i int PRIMARY KEY, t text);
+ INSERT INTO test VALUES (5, 'apple'), (7, 'pear'), (11, 'banana');
+ -- HOT-update 'pear' row.
+ UPDATE test SET t = 'pear_hot_updated' WHERE i = 7;
+}
+
+teardown
+{
+ DROP TABLE test;
+}
+
+session s1
+step b1 { BEGIN ISOLATION LEVEL SERIALIZABLE; }
+step r1 { SELECT * FROM test WHERE i IN (5, 7) }
+step w1 { UPDATE test SET t = 'pear_xact1' WHERE i = 7 }
+step c1 { COMMIT; }
+
+session s2
+step b2 { BEGIN ISOLATION LEVEL SERIALIZABLE; }
+step r2 { SELECT * FROM test WHERE i IN (5, 7) }
+step w2 { UPDATE test SET t = 'apple_xact2' WHERE i = 5 }
+step c2 { COMMIT; }
+
+permutation b1 b2 r1 r2 w1 w2 c1 c2
diff --git a/src/test/isolation/specs/prepared-transactions-cic.spec b/src/test/isolation/specs/prepared-transactions-cic.spec
new file mode 100644
index 0000000..626b1b6
--- /dev/null
+++ b/src/test/isolation/specs/prepared-transactions-cic.spec
@@ -0,0 +1,37 @@
+# This test verifies that CREATE INDEX CONCURRENTLY interacts with prepared
+# transactions correctly.
+setup
+{
+ CREATE TABLE cic_test (a int);
+}
+
+teardown
+{
+ DROP TABLE cic_test;
+}
+
+
+# Sessions for CREATE INDEX CONCURRENTLY test
+session s1
+step w1 { BEGIN; INSERT INTO cic_test VALUES (1); }
+step p1 { PREPARE TRANSACTION 's1'; }
+step c1 { COMMIT PREPARED 's1'; }
+
+session s2
+# The isolation tester never recognizes that a lock of s1 blocks s2, because a
+# prepared transaction's locks have no pid associated. While there's a slight
+# chance of timeout while waiting for an autovacuum-held lock, that wouldn't
+# change the output. Hence, no timeout is too short.
+setup { SET lock_timeout = 10; }
+step cic2
+{
+ CREATE INDEX CONCURRENTLY on cic_test(a);
+}
+step r2
+{
+ SET enable_seqscan to off;
+ SET enable_bitmapscan to off;
+ SELECT * FROM cic_test WHERE a = 1;
+}
+
+permutation w1 p1 cic2 c1 r2
diff --git a/src/test/isolation/specs/prepared-transactions.spec b/src/test/isolation/specs/prepared-transactions.spec
new file mode 100644
index 0000000..78b9d2c
--- /dev/null
+++ b/src/test/isolation/specs/prepared-transactions.spec
@@ -0,0 +1,1507 @@
+# This test verifies that if there's a series of rw-conflicts
+# s1 ---> s2 ---> s3, with s3 committing first
+# at least one transaction will be aborted, regardless of the order in
+# which the conflicts are detected and transactions prepare and
+# commit.
+#
+#
+# Tables test2 and test3 are used to create the
+# s1 --> s2 and s2 --> s3 rw-dependencies respectively
+#
+# test1 isn't involved in the anomaly; s1 only inserts a row into it
+# so that there's an easy way to tell (by looking for that row) if s1
+# successfully committed.
+#
+# force_snapshot is used to force s2 and s3 to take their snapshot
+# immediately after BEGIN, so we can be sure the three transactions
+# overlap.
+setup
+{
+ CREATE TABLE test1 (a int);
+ CREATE TABLE test2 (b int);
+ CREATE TABLE test3 (c int);
+ CREATE TABLE force_snapshot (a int);
+}
+
+teardown
+{
+ DROP TABLE test1;
+ DROP TABLE test2;
+ DROP TABLE test3;
+ DROP TABLE force_snapshot;
+}
+
+
+session s1
+setup { BEGIN ISOLATION LEVEL SERIALIZABLE; INSERT INTO test1 VALUES (1); }
+step r1 { SELECT * FROM test2; }
+step p1 { PREPARE TRANSACTION 's1'; }
+step c1 { COMMIT PREPARED 's1'; }
+
+session s2
+setup { BEGIN ISOLATION LEVEL SERIALIZABLE; SELECT * FROM force_snapshot; }
+step r2 { SELECT * FROM test3; }
+step w2 { INSERT INTO test2 VALUES (2); }
+step p2 { PREPARE TRANSACTION 's2'; }
+step c2 { COMMIT PREPARED 's2'; }
+
+session s3
+setup { BEGIN ISOLATION LEVEL SERIALIZABLE; SELECT * FROM force_snapshot; }
+step w3 { INSERT INTO test3 VALUES (3); }
+step p3 { PREPARE TRANSACTION 's3'; }
+step c3 { COMMIT PREPARED 's3'; }
+
+# When run at the end of the permutations below, this SELECT statement
+# should never return any tuples, because at least one of the three
+# transactions involved should be aborted.
+session s4
+step check { SELECT * FROM test1,test2,test3; }
+
+# We run on all permutations of the statements above subject to the
+# following constraints:
+# - each transaction's reads/writes must happen before it prepares
+# - each transaction must prepare before committing
+# - s3 must be the first of the three transactions to commit
+# ...which means that each permutation should fail.
+#
+# To bring the number of permutations down a bit, we further require
+# them to satisfy *one* of the following:
+# - r1 < w2 < r2 < w3 (T1-->T2 conflict on write, T2-->T3 on write)
+# - r1 < w2 < w3 < r2 (T1-->T2 conflict on read, T2-->T3 on write)
+# - w2 < r1 < r2 < w3 (T1-->T2 conflict on write, T2-->T3 on read)
+# - w2 < r1 < w3 < r2 (T1-->T2 conflict on read, T2-->T3 on read)
+# This eliminates some redundant combinations. For example, it doesn't
+# matter if w2 happens before w3 as long as both come before the
+# conflicting reads.
+permutation r1 r2 w2 w3 p1 p2 p3 c3 c1 c2 check
+permutation r1 r2 w2 w3 p1 p2 p3 c3 c2 c1 check
+permutation r1 r2 w2 w3 p1 p3 p2 c3 c1 c2 check
+permutation r1 r2 w2 w3 p1 p3 p2 c3 c2 c1 check
+permutation r1 r2 w2 w3 p1 p3 c3 p2 c1 c2 check
+permutation r1 r2 w2 w3 p1 p3 c3 p2 c2 c1 check
+permutation r1 r2 w2 w3 p1 p3 c3 c1 p2 c2 check
+permutation r1 r2 w2 w3 p2 p1 p3 c3 c1 c2 check
+permutation r1 r2 w2 w3 p2 p1 p3 c3 c2 c1 check
+permutation r1 r2 w2 w3 p2 p3 p1 c3 c1 c2 check
+permutation r1 r2 w2 w3 p2 p3 p1 c3 c2 c1 check
+permutation r1 r2 w2 w3 p2 p3 c3 p1 c1 c2 check
+permutation r1 r2 w2 w3 p2 p3 c3 p1 c2 c1 check
+permutation r1 r2 w2 w3 p2 p3 c3 c2 p1 c1 check
+permutation r1 r2 w2 w3 p3 p1 p2 c3 c1 c2 check
+permutation r1 r2 w2 w3 p3 p1 p2 c3 c2 c1 check
+permutation r1 r2 w2 w3 p3 p1 c3 p2 c1 c2 check
+permutation r1 r2 w2 w3 p3 p1 c3 p2 c2 c1 check
+permutation r1 r2 w2 w3 p3 p1 c3 c1 p2 c2 check
+permutation r1 r2 w2 w3 p3 p2 p1 c3 c1 c2 check
+permutation r1 r2 w2 w3 p3 p2 p1 c3 c2 c1 check
+permutation r1 r2 w2 w3 p3 p2 c3 p1 c1 c2 check
+permutation r1 r2 w2 w3 p3 p2 c3 p1 c2 c1 check
+permutation r1 r2 w2 w3 p3 p2 c3 c2 p1 c1 check
+permutation r1 r2 w2 w3 p3 c3 p1 p2 c1 c2 check
+permutation r1 r2 w2 w3 p3 c3 p1 p2 c2 c1 check
+permutation r1 r2 w2 w3 p3 c3 p1 c1 p2 c2 check
+permutation r1 r2 w2 w3 p3 c3 p2 p1 c1 c2 check
+permutation r1 r2 w2 w3 p3 c3 p2 p1 c2 c1 check
+permutation r1 r2 w2 w3 p3 c3 p2 c2 p1 c1 check
+permutation r1 r2 w2 p1 w3 p2 p3 c3 c1 c2 check
+permutation r1 r2 w2 p1 w3 p2 p3 c3 c2 c1 check
+permutation r1 r2 w2 p1 w3 p3 p2 c3 c1 c2 check
+permutation r1 r2 w2 p1 w3 p3 p2 c3 c2 c1 check
+permutation r1 r2 w2 p1 w3 p3 c3 p2 c1 c2 check
+permutation r1 r2 w2 p1 w3 p3 c3 p2 c2 c1 check
+permutation r1 r2 w2 p1 w3 p3 c3 c1 p2 c2 check
+permutation r1 r2 w2 p1 p2 w3 p3 c3 c1 c2 check
+permutation r1 r2 w2 p1 p2 w3 p3 c3 c2 c1 check
+permutation r1 r2 w2 p2 w3 p1 p3 c3 c1 c2 check
+permutation r1 r2 w2 p2 w3 p1 p3 c3 c2 c1 check
+permutation r1 r2 w2 p2 w3 p3 p1 c3 c1 c2 check
+permutation r1 r2 w2 p2 w3 p3 p1 c3 c2 c1 check
+permutation r1 r2 w2 p2 w3 p3 c3 p1 c1 c2 check
+permutation r1 r2 w2 p2 w3 p3 c3 p1 c2 c1 check
+permutation r1 r2 w2 p2 w3 p3 c3 c2 p1 c1 check
+permutation r1 r2 w2 p2 p1 w3 p3 c3 c1 c2 check
+permutation r1 r2 w2 p2 p1 w3 p3 c3 c2 c1 check
+permutation r1 r2 p1 w2 w3 p2 p3 c3 c1 c2 check
+permutation r1 r2 p1 w2 w3 p2 p3 c3 c2 c1 check
+permutation r1 r2 p1 w2 w3 p3 p2 c3 c1 c2 check
+permutation r1 r2 p1 w2 w3 p3 p2 c3 c2 c1 check
+permutation r1 r2 p1 w2 w3 p3 c3 p2 c1 c2 check
+permutation r1 r2 p1 w2 w3 p3 c3 p2 c2 c1 check
+permutation r1 r2 p1 w2 w3 p3 c3 c1 p2 c2 check
+permutation r1 r2 p1 w2 p2 w3 p3 c3 c1 c2 check
+permutation r1 r2 p1 w2 p2 w3 p3 c3 c2 c1 check
+permutation r1 w2 w3 r2 p1 p2 p3 c3 c1 c2 check
+permutation r1 w2 w3 r2 p1 p2 p3 c3 c2 c1 check
+permutation r1 w2 w3 r2 p1 p3 p2 c3 c1 c2 check
+permutation r1 w2 w3 r2 p1 p3 p2 c3 c2 c1 check
+permutation r1 w2 w3 r2 p1 p3 c3 p2 c1 c2 check
+permutation r1 w2 w3 r2 p1 p3 c3 p2 c2 c1 check
+permutation r1 w2 w3 r2 p1 p3 c3 c1 p2 c2 check
+permutation r1 w2 w3 r2 p2 p1 p3 c3 c1 c2 check
+permutation r1 w2 w3 r2 p2 p1 p3 c3 c2 c1 check
+permutation r1 w2 w3 r2 p2 p3 p1 c3 c1 c2 check
+permutation r1 w2 w3 r2 p2 p3 p1 c3 c2 c1 check
+permutation r1 w2 w3 r2 p2 p3 c3 p1 c1 c2 check
+permutation r1 w2 w3 r2 p2 p3 c3 p1 c2 c1 check
+permutation r1 w2 w3 r2 p2 p3 c3 c2 p1 c1 check
+permutation r1 w2 w3 r2 p3 p1 p2 c3 c1 c2 check
+permutation r1 w2 w3 r2 p3 p1 p2 c3 c2 c1 check
+permutation r1 w2 w3 r2 p3 p1 c3 p2 c1 c2 check
+permutation r1 w2 w3 r2 p3 p1 c3 p2 c2 c1 check
+permutation r1 w2 w3 r2 p3 p1 c3 c1 p2 c2 check
+permutation r1 w2 w3 r2 p3 p2 p1 c3 c1 c2 check
+permutation r1 w2 w3 r2 p3 p2 p1 c3 c2 c1 check
+permutation r1 w2 w3 r2 p3 p2 c3 p1 c1 c2 check
+permutation r1 w2 w3 r2 p3 p2 c3 p1 c2 c1 check
+permutation r1 w2 w3 r2 p3 p2 c3 c2 p1 c1 check
+permutation r1 w2 w3 r2 p3 c3 p1 p2 c1 c2 check
+permutation r1 w2 w3 r2 p3 c3 p1 p2 c2 c1 check
+permutation r1 w2 w3 r2 p3 c3 p1 c1 p2 c2 check
+permutation r1 w2 w3 r2 p3 c3 p2 p1 c1 c2 check
+permutation r1 w2 w3 r2 p3 c3 p2 p1 c2 c1 check
+permutation r1 w2 w3 r2 p3 c3 p2 c2 p1 c1 check
+permutation r1 w2 w3 p1 r2 p2 p3 c3 c1 c2 check
+permutation r1 w2 w3 p1 r2 p2 p3 c3 c2 c1 check
+permutation r1 w2 w3 p1 r2 p3 p2 c3 c1 c2 check
+permutation r1 w2 w3 p1 r2 p3 p2 c3 c2 c1 check
+permutation r1 w2 w3 p1 r2 p3 c3 p2 c1 c2 check
+permutation r1 w2 w3 p1 r2 p3 c3 p2 c2 c1 check
+permutation r1 w2 w3 p1 r2 p3 c3 c1 p2 c2 check
+permutation r1 w2 w3 p1 p3 r2 p2 c3 c1 c2 check
+permutation r1 w2 w3 p1 p3 r2 p2 c3 c2 c1 check
+permutation r1 w2 w3 p1 p3 r2 c3 p2 c1 c2 check
+permutation r1 w2 w3 p1 p3 r2 c3 p2 c2 c1 check
+permutation r1 w2 w3 p1 p3 r2 c3 c1 p2 c2 check
+permutation r1 w2 w3 p1 p3 c3 r2 p2 c1 c2 check
+permutation r1 w2 w3 p1 p3 c3 r2 p2 c2 c1 check
+permutation r1 w2 w3 p1 p3 c3 r2 c1 p2 c2 check
+permutation r1 w2 w3 p1 p3 c3 c1 r2 p2 c2 check
+permutation r1 w2 w3 p3 r2 p1 p2 c3 c1 c2 check
+permutation r1 w2 w3 p3 r2 p1 p2 c3 c2 c1 check
+permutation r1 w2 w3 p3 r2 p1 c3 p2 c1 c2 check
+permutation r1 w2 w3 p3 r2 p1 c3 p2 c2 c1 check
+permutation r1 w2 w3 p3 r2 p1 c3 c1 p2 c2 check
+permutation r1 w2 w3 p3 r2 p2 p1 c3 c1 c2 check
+permutation r1 w2 w3 p3 r2 p2 p1 c3 c2 c1 check
+permutation r1 w2 w3 p3 r2 p2 c3 p1 c1 c2 check
+permutation r1 w2 w3 p3 r2 p2 c3 p1 c2 c1 check
+permutation r1 w2 w3 p3 r2 p2 c3 c2 p1 c1 check
+permutation r1 w2 w3 p3 r2 c3 p1 p2 c1 c2 check
+permutation r1 w2 w3 p3 r2 c3 p1 p2 c2 c1 check
+permutation r1 w2 w3 p3 r2 c3 p1 c1 p2 c2 check
+permutation r1 w2 w3 p3 r2 c3 p2 p1 c1 c2 check
+permutation r1 w2 w3 p3 r2 c3 p2 p1 c2 c1 check
+permutation r1 w2 w3 p3 r2 c3 p2 c2 p1 c1 check
+permutation r1 w2 w3 p3 p1 r2 p2 c3 c1 c2 check
+permutation r1 w2 w3 p3 p1 r2 p2 c3 c2 c1 check
+permutation r1 w2 w3 p3 p1 r2 c3 p2 c1 c2 check
+permutation r1 w2 w3 p3 p1 r2 c3 p2 c2 c1 check
+permutation r1 w2 w3 p3 p1 r2 c3 c1 p2 c2 check
+permutation r1 w2 w3 p3 p1 c3 r2 p2 c1 c2 check
+permutation r1 w2 w3 p3 p1 c3 r2 p2 c2 c1 check
+permutation r1 w2 w3 p3 p1 c3 r2 c1 p2 c2 check
+permutation r1 w2 w3 p3 p1 c3 c1 r2 p2 c2 check
+permutation r1 w2 w3 p3 c3 r2 p1 p2 c1 c2 check
+permutation r1 w2 w3 p3 c3 r2 p1 p2 c2 c1 check
+permutation r1 w2 w3 p3 c3 r2 p1 c1 p2 c2 check
+permutation r1 w2 w3 p3 c3 r2 p2 p1 c1 c2 check
+permutation r1 w2 w3 p3 c3 r2 p2 p1 c2 c1 check
+permutation r1 w2 w3 p3 c3 r2 p2 c2 p1 c1 check
+permutation r1 w2 w3 p3 c3 p1 r2 p2 c1 c2 check
+permutation r1 w2 w3 p3 c3 p1 r2 p2 c2 c1 check
+permutation r1 w2 w3 p3 c3 p1 r2 c1 p2 c2 check
+permutation r1 w2 w3 p3 c3 p1 c1 r2 p2 c2 check
+permutation r1 w2 p1 w3 r2 p2 p3 c3 c1 c2 check
+permutation r1 w2 p1 w3 r2 p2 p3 c3 c2 c1 check
+permutation r1 w2 p1 w3 r2 p3 p2 c3 c1 c2 check
+permutation r1 w2 p1 w3 r2 p3 p2 c3 c2 c1 check
+permutation r1 w2 p1 w3 r2 p3 c3 p2 c1 c2 check
+permutation r1 w2 p1 w3 r2 p3 c3 p2 c2 c1 check
+permutation r1 w2 p1 w3 r2 p3 c3 c1 p2 c2 check
+permutation r1 w2 p1 w3 p3 r2 p2 c3 c1 c2 check
+permutation r1 w2 p1 w3 p3 r2 p2 c3 c2 c1 check
+permutation r1 w2 p1 w3 p3 r2 c3 p2 c1 c2 check
+permutation r1 w2 p1 w3 p3 r2 c3 p2 c2 c1 check
+permutation r1 w2 p1 w3 p3 r2 c3 c1 p2 c2 check
+permutation r1 w2 p1 w3 p3 c3 r2 p2 c1 c2 check
+permutation r1 w2 p1 w3 p3 c3 r2 p2 c2 c1 check
+permutation r1 w2 p1 w3 p3 c3 r2 c1 p2 c2 check
+permutation r1 w2 p1 w3 p3 c3 c1 r2 p2 c2 check
+permutation r1 w3 r2 w2 p1 p2 p3 c3 c1 c2 check
+permutation r1 w3 r2 w2 p1 p2 p3 c3 c2 c1 check
+permutation r1 w3 r2 w2 p1 p3 p2 c3 c1 c2 check
+permutation r1 w3 r2 w2 p1 p3 p2 c3 c2 c1 check
+permutation r1 w3 r2 w2 p1 p3 c3 p2 c1 c2 check
+permutation r1 w3 r2 w2 p1 p3 c3 p2 c2 c1 check
+permutation r1 w3 r2 w2 p1 p3 c3 c1 p2 c2 check
+permutation r1 w3 r2 w2 p2 p1 p3 c3 c1 c2 check
+permutation r1 w3 r2 w2 p2 p1 p3 c3 c2 c1 check
+permutation r1 w3 r2 w2 p2 p3 p1 c3 c1 c2 check
+permutation r1 w3 r2 w2 p2 p3 p1 c3 c2 c1 check
+permutation r1 w3 r2 w2 p2 p3 c3 p1 c1 c2 check
+permutation r1 w3 r2 w2 p2 p3 c3 p1 c2 c1 check
+permutation r1 w3 r2 w2 p2 p3 c3 c2 p1 c1 check
+permutation r1 w3 r2 w2 p3 p1 p2 c3 c1 c2 check
+permutation r1 w3 r2 w2 p3 p1 p2 c3 c2 c1 check
+permutation r1 w3 r2 w2 p3 p1 c3 p2 c1 c2 check
+permutation r1 w3 r2 w2 p3 p1 c3 p2 c2 c1 check
+permutation r1 w3 r2 w2 p3 p1 c3 c1 p2 c2 check
+permutation r1 w3 r2 w2 p3 p2 p1 c3 c1 c2 check
+permutation r1 w3 r2 w2 p3 p2 p1 c3 c2 c1 check
+permutation r1 w3 r2 w2 p3 p2 c3 p1 c1 c2 check
+permutation r1 w3 r2 w2 p3 p2 c3 p1 c2 c1 check
+permutation r1 w3 r2 w2 p3 p2 c3 c2 p1 c1 check
+permutation r1 w3 r2 w2 p3 c3 p1 p2 c1 c2 check
+permutation r1 w3 r2 w2 p3 c3 p1 p2 c2 c1 check
+permutation r1 w3 r2 w2 p3 c3 p1 c1 p2 c2 check
+permutation r1 w3 r2 w2 p3 c3 p2 p1 c1 c2 check
+permutation r1 w3 r2 w2 p3 c3 p2 p1 c2 c1 check
+permutation r1 w3 r2 w2 p3 c3 p2 c2 p1 c1 check
+permutation r1 w3 r2 p1 w2 p2 p3 c3 c1 c2 check
+permutation r1 w3 r2 p1 w2 p2 p3 c3 c2 c1 check
+permutation r1 w3 r2 p1 w2 p3 p2 c3 c1 c2 check
+permutation r1 w3 r2 p1 w2 p3 p2 c3 c2 c1 check
+permutation r1 w3 r2 p1 w2 p3 c3 p2 c1 c2 check
+permutation r1 w3 r2 p1 w2 p3 c3 p2 c2 c1 check
+permutation r1 w3 r2 p1 w2 p3 c3 c1 p2 c2 check
+permutation r1 w3 r2 p1 p3 w2 p2 c3 c1 c2 check
+permutation r1 w3 r2 p1 p3 w2 p2 c3 c2 c1 check
+permutation r1 w3 r2 p1 p3 w2 c3 p2 c1 c2 check
+permutation r1 w3 r2 p1 p3 w2 c3 p2 c2 c1 check
+permutation r1 w3 r2 p1 p3 w2 c3 c1 p2 c2 check
+permutation r1 w3 r2 p1 p3 c3 w2 p2 c1 c2 check
+permutation r1 w3 r2 p1 p3 c3 w2 p2 c2 c1 check
+permutation r1 w3 r2 p1 p3 c3 w2 c1 p2 c2 check
+permutation r1 w3 r2 p1 p3 c3 c1 w2 p2 c2 check
+permutation r1 w3 r2 p3 w2 p1 p2 c3 c1 c2 check
+permutation r1 w3 r2 p3 w2 p1 p2 c3 c2 c1 check
+permutation r1 w3 r2 p3 w2 p1 c3 p2 c1 c2 check
+permutation r1 w3 r2 p3 w2 p1 c3 p2 c2 c1 check
+permutation r1 w3 r2 p3 w2 p1 c3 c1 p2 c2 check
+permutation r1 w3 r2 p3 w2 p2 p1 c3 c1 c2 check
+permutation r1 w3 r2 p3 w2 p2 p1 c3 c2 c1 check
+permutation r1 w3 r2 p3 w2 p2 c3 p1 c1 c2 check
+permutation r1 w3 r2 p3 w2 p2 c3 p1 c2 c1 check
+permutation r1 w3 r2 p3 w2 p2 c3 c2 p1 c1 check
+permutation r1 w3 r2 p3 w2 c3 p1 p2 c1 c2 check
+permutation r1 w3 r2 p3 w2 c3 p1 p2 c2 c1 check
+permutation r1 w3 r2 p3 w2 c3 p1 c1 p2 c2 check
+permutation r1 w3 r2 p3 w2 c3 p2 p1 c1 c2 check
+permutation r1 w3 r2 p3 w2 c3 p2 p1 c2 c1 check
+permutation r1 w3 r2 p3 w2 c3 p2 c2 p1 c1 check
+permutation r1 w3 r2 p3 p1 w2 p2 c3 c1 c2 check
+permutation r1 w3 r2 p3 p1 w2 p2 c3 c2 c1 check
+permutation r1 w3 r2 p3 p1 w2 c3 p2 c1 c2 check
+permutation r1 w3 r2 p3 p1 w2 c3 p2 c2 c1 check
+permutation r1 w3 r2 p3 p1 w2 c3 c1 p2 c2 check
+permutation r1 w3 r2 p3 p1 c3 w2 p2 c1 c2 check
+permutation r1 w3 r2 p3 p1 c3 w2 p2 c2 c1 check
+permutation r1 w3 r2 p3 p1 c3 w2 c1 p2 c2 check
+permutation r1 w3 r2 p3 p1 c3 c1 w2 p2 c2 check
+permutation r1 w3 r2 p3 c3 w2 p1 p2 c1 c2 check
+permutation r1 w3 r2 p3 c3 w2 p1 p2 c2 c1 check
+permutation r1 w3 r2 p3 c3 w2 p1 c1 p2 c2 check
+permutation r1 w3 r2 p3 c3 w2 p2 p1 c1 c2 check
+permutation r1 w3 r2 p3 c3 w2 p2 p1 c2 c1 check
+permutation r1 w3 r2 p3 c3 w2 p2 c2 p1 c1 check
+permutation r1 w3 r2 p3 c3 p1 w2 p2 c1 c2 check
+permutation r1 w3 r2 p3 c3 p1 w2 p2 c2 c1 check
+permutation r1 w3 r2 p3 c3 p1 w2 c1 p2 c2 check
+permutation r1 w3 r2 p3 c3 p1 c1 w2 p2 c2 check
+permutation r1 w3 w2 r2 p1 p2 p3 c3 c1 c2 check
+permutation r1 w3 w2 r2 p1 p2 p3 c3 c2 c1 check
+permutation r1 w3 w2 r2 p1 p3 p2 c3 c1 c2 check
+permutation r1 w3 w2 r2 p1 p3 p2 c3 c2 c1 check
+permutation r1 w3 w2 r2 p1 p3 c3 p2 c1 c2 check
+permutation r1 w3 w2 r2 p1 p3 c3 p2 c2 c1 check
+permutation r1 w3 w2 r2 p1 p3 c3 c1 p2 c2 check
+permutation r1 w3 w2 r2 p2 p1 p3 c3 c1 c2 check
+permutation r1 w3 w2 r2 p2 p1 p3 c3 c2 c1 check
+permutation r1 w3 w2 r2 p2 p3 p1 c3 c1 c2 check
+permutation r1 w3 w2 r2 p2 p3 p1 c3 c2 c1 check
+permutation r1 w3 w2 r2 p2 p3 c3 p1 c1 c2 check
+permutation r1 w3 w2 r2 p2 p3 c3 p1 c2 c1 check
+permutation r1 w3 w2 r2 p2 p3 c3 c2 p1 c1 check
+permutation r1 w3 w2 r2 p3 p1 p2 c3 c1 c2 check
+permutation r1 w3 w2 r2 p3 p1 p2 c3 c2 c1 check
+permutation r1 w3 w2 r2 p3 p1 c3 p2 c1 c2 check
+permutation r1 w3 w2 r2 p3 p1 c3 p2 c2 c1 check
+permutation r1 w3 w2 r2 p3 p1 c3 c1 p2 c2 check
+permutation r1 w3 w2 r2 p3 p2 p1 c3 c1 c2 check
+permutation r1 w3 w2 r2 p3 p2 p1 c3 c2 c1 check
+permutation r1 w3 w2 r2 p3 p2 c3 p1 c1 c2 check
+permutation r1 w3 w2 r2 p3 p2 c3 p1 c2 c1 check
+permutation r1 w3 w2 r2 p3 p2 c3 c2 p1 c1 check
+permutation r1 w3 w2 r2 p3 c3 p1 p2 c1 c2 check
+permutation r1 w3 w2 r2 p3 c3 p1 p2 c2 c1 check
+permutation r1 w3 w2 r2 p3 c3 p1 c1 p2 c2 check
+permutation r1 w3 w2 r2 p3 c3 p2 p1 c1 c2 check
+permutation r1 w3 w2 r2 p3 c3 p2 p1 c2 c1 check
+permutation r1 w3 w2 r2 p3 c3 p2 c2 p1 c1 check
+permutation r1 w3 w2 p1 r2 p2 p3 c3 c1 c2 check
+permutation r1 w3 w2 p1 r2 p2 p3 c3 c2 c1 check
+permutation r1 w3 w2 p1 r2 p3 p2 c3 c1 c2 check
+permutation r1 w3 w2 p1 r2 p3 p2 c3 c2 c1 check
+permutation r1 w3 w2 p1 r2 p3 c3 p2 c1 c2 check
+permutation r1 w3 w2 p1 r2 p3 c3 p2 c2 c1 check
+permutation r1 w3 w2 p1 r2 p3 c3 c1 p2 c2 check
+permutation r1 w3 w2 p1 p3 r2 p2 c3 c1 c2 check
+permutation r1 w3 w2 p1 p3 r2 p2 c3 c2 c1 check
+permutation r1 w3 w2 p1 p3 r2 c3 p2 c1 c2 check
+permutation r1 w3 w2 p1 p3 r2 c3 p2 c2 c1 check
+permutation r1 w3 w2 p1 p3 r2 c3 c1 p2 c2 check
+permutation r1 w3 w2 p1 p3 c3 r2 p2 c1 c2 check
+permutation r1 w3 w2 p1 p3 c3 r2 p2 c2 c1 check
+permutation r1 w3 w2 p1 p3 c3 r2 c1 p2 c2 check
+permutation r1 w3 w2 p1 p3 c3 c1 r2 p2 c2 check
+permutation r1 w3 w2 p3 r2 p1 p2 c3 c1 c2 check
+permutation r1 w3 w2 p3 r2 p1 p2 c3 c2 c1 check
+permutation r1 w3 w2 p3 r2 p1 c3 p2 c1 c2 check
+permutation r1 w3 w2 p3 r2 p1 c3 p2 c2 c1 check
+permutation r1 w3 w2 p3 r2 p1 c3 c1 p2 c2 check
+permutation r1 w3 w2 p3 r2 p2 p1 c3 c1 c2 check
+permutation r1 w3 w2 p3 r2 p2 p1 c3 c2 c1 check
+permutation r1 w3 w2 p3 r2 p2 c3 p1 c1 c2 check
+permutation r1 w3 w2 p3 r2 p2 c3 p1 c2 c1 check
+permutation r1 w3 w2 p3 r2 p2 c3 c2 p1 c1 check
+permutation r1 w3 w2 p3 r2 c3 p1 p2 c1 c2 check
+permutation r1 w3 w2 p3 r2 c3 p1 p2 c2 c1 check
+permutation r1 w3 w2 p3 r2 c3 p1 c1 p2 c2 check
+permutation r1 w3 w2 p3 r2 c3 p2 p1 c1 c2 check
+permutation r1 w3 w2 p3 r2 c3 p2 p1 c2 c1 check
+permutation r1 w3 w2 p3 r2 c3 p2 c2 p1 c1 check
+permutation r1 w3 w2 p3 p1 r2 p2 c3 c1 c2 check
+permutation r1 w3 w2 p3 p1 r2 p2 c3 c2 c1 check
+permutation r1 w3 w2 p3 p1 r2 c3 p2 c1 c2 check
+permutation r1 w3 w2 p3 p1 r2 c3 p2 c2 c1 check
+permutation r1 w3 w2 p3 p1 r2 c3 c1 p2 c2 check
+permutation r1 w3 w2 p3 p1 c3 r2 p2 c1 c2 check
+permutation r1 w3 w2 p3 p1 c3 r2 p2 c2 c1 check
+permutation r1 w3 w2 p3 p1 c3 r2 c1 p2 c2 check
+permutation r1 w3 w2 p3 p1 c3 c1 r2 p2 c2 check
+permutation r1 w3 w2 p3 c3 r2 p1 p2 c1 c2 check
+permutation r1 w3 w2 p3 c3 r2 p1 p2 c2 c1 check
+permutation r1 w3 w2 p3 c3 r2 p1 c1 p2 c2 check
+permutation r1 w3 w2 p3 c3 r2 p2 p1 c1 c2 check
+permutation r1 w3 w2 p3 c3 r2 p2 p1 c2 c1 check
+permutation r1 w3 w2 p3 c3 r2 p2 c2 p1 c1 check
+permutation r1 w3 w2 p3 c3 p1 r2 p2 c1 c2 check
+permutation r1 w3 w2 p3 c3 p1 r2 p2 c2 c1 check
+permutation r1 w3 w2 p3 c3 p1 r2 c1 p2 c2 check
+permutation r1 w3 w2 p3 c3 p1 c1 r2 p2 c2 check
+permutation r1 w3 p1 r2 w2 p2 p3 c3 c1 c2 check
+permutation r1 w3 p1 r2 w2 p2 p3 c3 c2 c1 check
+permutation r1 w3 p1 r2 w2 p3 p2 c3 c1 c2 check
+permutation r1 w3 p1 r2 w2 p3 p2 c3 c2 c1 check
+permutation r1 w3 p1 r2 w2 p3 c3 p2 c1 c2 check
+permutation r1 w3 p1 r2 w2 p3 c3 p2 c2 c1 check
+permutation r1 w3 p1 r2 w2 p3 c3 c1 p2 c2 check
+permutation r1 w3 p1 r2 p3 w2 p2 c3 c1 c2 check
+permutation r1 w3 p1 r2 p3 w2 p2 c3 c2 c1 check
+permutation r1 w3 p1 r2 p3 w2 c3 p2 c1 c2 check
+permutation r1 w3 p1 r2 p3 w2 c3 p2 c2 c1 check
+permutation r1 w3 p1 r2 p3 w2 c3 c1 p2 c2 check
+permutation r1 w3 p1 r2 p3 c3 w2 p2 c1 c2 check
+permutation r1 w3 p1 r2 p3 c3 w2 p2 c2 c1 check
+permutation r1 w3 p1 r2 p3 c3 w2 c1 p2 c2 check
+permutation r1 w3 p1 r2 p3 c3 c1 w2 p2 c2 check
+permutation r1 w3 p1 w2 r2 p2 p3 c3 c1 c2 check
+permutation r1 w3 p1 w2 r2 p2 p3 c3 c2 c1 check
+permutation r1 w3 p1 w2 r2 p3 p2 c3 c1 c2 check
+permutation r1 w3 p1 w2 r2 p3 p2 c3 c2 c1 check
+permutation r1 w3 p1 w2 r2 p3 c3 p2 c1 c2 check
+permutation r1 w3 p1 w2 r2 p3 c3 p2 c2 c1 check
+permutation r1 w3 p1 w2 r2 p3 c3 c1 p2 c2 check
+permutation r1 w3 p1 w2 p3 r2 p2 c3 c1 c2 check
+permutation r1 w3 p1 w2 p3 r2 p2 c3 c2 c1 check
+permutation r1 w3 p1 w2 p3 r2 c3 p2 c1 c2 check
+permutation r1 w3 p1 w2 p3 r2 c3 p2 c2 c1 check
+permutation r1 w3 p1 w2 p3 r2 c3 c1 p2 c2 check
+permutation r1 w3 p1 w2 p3 c3 r2 p2 c1 c2 check
+permutation r1 w3 p1 w2 p3 c3 r2 p2 c2 c1 check
+permutation r1 w3 p1 w2 p3 c3 r2 c1 p2 c2 check
+permutation r1 w3 p1 w2 p3 c3 c1 r2 p2 c2 check
+permutation r1 w3 p1 p3 r2 w2 p2 c3 c1 c2 check
+permutation r1 w3 p1 p3 r2 w2 p2 c3 c2 c1 check
+permutation r1 w3 p1 p3 r2 w2 c3 p2 c1 c2 check
+permutation r1 w3 p1 p3 r2 w2 c3 p2 c2 c1 check
+permutation r1 w3 p1 p3 r2 w2 c3 c1 p2 c2 check
+permutation r1 w3 p1 p3 r2 c3 w2 p2 c1 c2 check
+permutation r1 w3 p1 p3 r2 c3 w2 p2 c2 c1 check
+permutation r1 w3 p1 p3 r2 c3 w2 c1 p2 c2 check
+permutation r1 w3 p1 p3 r2 c3 c1 w2 p2 c2 check
+permutation r1 w3 p1 p3 w2 r2 p2 c3 c1 c2 check
+permutation r1 w3 p1 p3 w2 r2 p2 c3 c2 c1 check
+permutation r1 w3 p1 p3 w2 r2 c3 p2 c1 c2 check
+permutation r1 w3 p1 p3 w2 r2 c3 p2 c2 c1 check
+permutation r1 w3 p1 p3 w2 r2 c3 c1 p2 c2 check
+permutation r1 w3 p1 p3 w2 c3 r2 p2 c1 c2 check
+permutation r1 w3 p1 p3 w2 c3 r2 p2 c2 c1 check
+permutation r1 w3 p1 p3 w2 c3 r2 c1 p2 c2 check
+permutation r1 w3 p1 p3 w2 c3 c1 r2 p2 c2 check
+permutation r1 w3 p1 p3 c3 r2 w2 p2 c1 c2 check
+permutation r1 w3 p1 p3 c3 r2 w2 p2 c2 c1 check
+permutation r1 w3 p1 p3 c3 r2 w2 c1 p2 c2 check
+permutation r1 w3 p1 p3 c3 r2 c1 w2 p2 c2 check
+permutation r1 w3 p1 p3 c3 w2 r2 p2 c1 c2 check
+permutation r1 w3 p1 p3 c3 w2 r2 p2 c2 c1 check
+permutation r1 w3 p1 p3 c3 w2 r2 c1 p2 c2 check
+permutation r1 w3 p1 p3 c3 w2 c1 r2 p2 c2 check
+permutation r1 w3 p1 p3 c3 c1 r2 w2 p2 c2 check
+permutation r1 w3 p1 p3 c3 c1 w2 r2 p2 c2 check
+permutation r1 w3 p3 r2 w2 p1 p2 c3 c1 c2 check
+permutation r1 w3 p3 r2 w2 p1 p2 c3 c2 c1 check
+permutation r1 w3 p3 r2 w2 p1 c3 p2 c1 c2 check
+permutation r1 w3 p3 r2 w2 p1 c3 p2 c2 c1 check
+permutation r1 w3 p3 r2 w2 p1 c3 c1 p2 c2 check
+permutation r1 w3 p3 r2 w2 p2 p1 c3 c1 c2 check
+permutation r1 w3 p3 r2 w2 p2 p1 c3 c2 c1 check
+permutation r1 w3 p3 r2 w2 p2 c3 p1 c1 c2 check
+permutation r1 w3 p3 r2 w2 p2 c3 p1 c2 c1 check
+permutation r1 w3 p3 r2 w2 p2 c3 c2 p1 c1 check
+permutation r1 w3 p3 r2 w2 c3 p1 p2 c1 c2 check
+permutation r1 w3 p3 r2 w2 c3 p1 p2 c2 c1 check
+permutation r1 w3 p3 r2 w2 c3 p1 c1 p2 c2 check
+permutation r1 w3 p3 r2 w2 c3 p2 p1 c1 c2 check
+permutation r1 w3 p3 r2 w2 c3 p2 p1 c2 c1 check
+permutation r1 w3 p3 r2 w2 c3 p2 c2 p1 c1 check
+permutation r1 w3 p3 r2 p1 w2 p2 c3 c1 c2 check
+permutation r1 w3 p3 r2 p1 w2 p2 c3 c2 c1 check
+permutation r1 w3 p3 r2 p1 w2 c3 p2 c1 c2 check
+permutation r1 w3 p3 r2 p1 w2 c3 p2 c2 c1 check
+permutation r1 w3 p3 r2 p1 w2 c3 c1 p2 c2 check
+permutation r1 w3 p3 r2 p1 c3 w2 p2 c1 c2 check
+permutation r1 w3 p3 r2 p1 c3 w2 p2 c2 c1 check
+permutation r1 w3 p3 r2 p1 c3 w2 c1 p2 c2 check
+permutation r1 w3 p3 r2 p1 c3 c1 w2 p2 c2 check
+permutation r1 w3 p3 r2 c3 w2 p1 p2 c1 c2 check
+permutation r1 w3 p3 r2 c3 w2 p1 p2 c2 c1 check
+permutation r1 w3 p3 r2 c3 w2 p1 c1 p2 c2 check
+permutation r1 w3 p3 r2 c3 w2 p2 p1 c1 c2 check
+permutation r1 w3 p3 r2 c3 w2 p2 p1 c2 c1 check
+permutation r1 w3 p3 r2 c3 w2 p2 c2 p1 c1 check
+permutation r1 w3 p3 r2 c3 p1 w2 p2 c1 c2 check
+permutation r1 w3 p3 r2 c3 p1 w2 p2 c2 c1 check
+permutation r1 w3 p3 r2 c3 p1 w2 c1 p2 c2 check
+permutation r1 w3 p3 r2 c3 p1 c1 w2 p2 c2 check
+permutation r1 w3 p3 w2 r2 p1 p2 c3 c1 c2 check
+permutation r1 w3 p3 w2 r2 p1 p2 c3 c2 c1 check
+permutation r1 w3 p3 w2 r2 p1 c3 p2 c1 c2 check
+permutation r1 w3 p3 w2 r2 p1 c3 p2 c2 c1 check
+permutation r1 w3 p3 w2 r2 p1 c3 c1 p2 c2 check
+permutation r1 w3 p3 w2 r2 p2 p1 c3 c1 c2 check
+permutation r1 w3 p3 w2 r2 p2 p1 c3 c2 c1 check
+permutation r1 w3 p3 w2 r2 p2 c3 p1 c1 c2 check
+permutation r1 w3 p3 w2 r2 p2 c3 p1 c2 c1 check
+permutation r1 w3 p3 w2 r2 p2 c3 c2 p1 c1 check
+permutation r1 w3 p3 w2 r2 c3 p1 p2 c1 c2 check
+permutation r1 w3 p3 w2 r2 c3 p1 p2 c2 c1 check
+permutation r1 w3 p3 w2 r2 c3 p1 c1 p2 c2 check
+permutation r1 w3 p3 w2 r2 c3 p2 p1 c1 c2 check
+permutation r1 w3 p3 w2 r2 c3 p2 p1 c2 c1 check
+permutation r1 w3 p3 w2 r2 c3 p2 c2 p1 c1 check
+permutation r1 w3 p3 w2 p1 r2 p2 c3 c1 c2 check
+permutation r1 w3 p3 w2 p1 r2 p2 c3 c2 c1 check
+permutation r1 w3 p3 w2 p1 r2 c3 p2 c1 c2 check
+permutation r1 w3 p3 w2 p1 r2 c3 p2 c2 c1 check
+permutation r1 w3 p3 w2 p1 r2 c3 c1 p2 c2 check
+permutation r1 w3 p3 w2 p1 c3 r2 p2 c1 c2 check
+permutation r1 w3 p3 w2 p1 c3 r2 p2 c2 c1 check
+permutation r1 w3 p3 w2 p1 c3 r2 c1 p2 c2 check
+permutation r1 w3 p3 w2 p1 c3 c1 r2 p2 c2 check
+permutation r1 w3 p3 w2 c3 r2 p1 p2 c1 c2 check
+permutation r1 w3 p3 w2 c3 r2 p1 p2 c2 c1 check
+permutation r1 w3 p3 w2 c3 r2 p1 c1 p2 c2 check
+permutation r1 w3 p3 w2 c3 r2 p2 p1 c1 c2 check
+permutation r1 w3 p3 w2 c3 r2 p2 p1 c2 c1 check
+permutation r1 w3 p3 w2 c3 r2 p2 c2 p1 c1 check
+permutation r1 w3 p3 w2 c3 p1 r2 p2 c1 c2 check
+permutation r1 w3 p3 w2 c3 p1 r2 p2 c2 c1 check
+permutation r1 w3 p3 w2 c3 p1 r2 c1 p2 c2 check
+permutation r1 w3 p3 w2 c3 p1 c1 r2 p2 c2 check
+permutation r1 w3 p3 p1 r2 w2 p2 c3 c1 c2 check
+permutation r1 w3 p3 p1 r2 w2 p2 c3 c2 c1 check
+permutation r1 w3 p3 p1 r2 w2 c3 p2 c1 c2 check
+permutation r1 w3 p3 p1 r2 w2 c3 p2 c2 c1 check
+permutation r1 w3 p3 p1 r2 w2 c3 c1 p2 c2 check
+permutation r1 w3 p3 p1 r2 c3 w2 p2 c1 c2 check
+permutation r1 w3 p3 p1 r2 c3 w2 p2 c2 c1 check
+permutation r1 w3 p3 p1 r2 c3 w2 c1 p2 c2 check
+permutation r1 w3 p3 p1 r2 c3 c1 w2 p2 c2 check
+permutation r1 w3 p3 p1 w2 r2 p2 c3 c1 c2 check
+permutation r1 w3 p3 p1 w2 r2 p2 c3 c2 c1 check
+permutation r1 w3 p3 p1 w2 r2 c3 p2 c1 c2 check
+permutation r1 w3 p3 p1 w2 r2 c3 p2 c2 c1 check
+permutation r1 w3 p3 p1 w2 r2 c3 c1 p2 c2 check
+permutation r1 w3 p3 p1 w2 c3 r2 p2 c1 c2 check
+permutation r1 w3 p3 p1 w2 c3 r2 p2 c2 c1 check
+permutation r1 w3 p3 p1 w2 c3 r2 c1 p2 c2 check
+permutation r1 w3 p3 p1 w2 c3 c1 r2 p2 c2 check
+permutation r1 w3 p3 p1 c3 r2 w2 p2 c1 c2 check
+permutation r1 w3 p3 p1 c3 r2 w2 p2 c2 c1 check
+permutation r1 w3 p3 p1 c3 r2 w2 c1 p2 c2 check
+permutation r1 w3 p3 p1 c3 r2 c1 w2 p2 c2 check
+permutation r1 w3 p3 p1 c3 w2 r2 p2 c1 c2 check
+permutation r1 w3 p3 p1 c3 w2 r2 p2 c2 c1 check
+permutation r1 w3 p3 p1 c3 w2 r2 c1 p2 c2 check
+permutation r1 w3 p3 p1 c3 w2 c1 r2 p2 c2 check
+permutation r1 w3 p3 p1 c3 c1 r2 w2 p2 c2 check
+permutation r1 w3 p3 p1 c3 c1 w2 r2 p2 c2 check
+permutation r1 w3 p3 c3 r2 w2 p1 p2 c1 c2 check
+permutation r1 w3 p3 c3 r2 w2 p1 p2 c2 c1 check
+permutation r1 w3 p3 c3 r2 w2 p1 c1 p2 c2 check
+permutation r1 w3 p3 c3 r2 w2 p2 p1 c1 c2 check
+permutation r1 w3 p3 c3 r2 w2 p2 p1 c2 c1 check
+permutation r1 w3 p3 c3 r2 w2 p2 c2 p1 c1 check
+permutation r1 w3 p3 c3 r2 p1 w2 p2 c1 c2 check
+permutation r1 w3 p3 c3 r2 p1 w2 p2 c2 c1 check
+permutation r1 w3 p3 c3 r2 p1 w2 c1 p2 c2 check
+permutation r1 w3 p3 c3 r2 p1 c1 w2 p2 c2 check
+permutation r1 w3 p3 c3 w2 r2 p1 p2 c1 c2 check
+permutation r1 w3 p3 c3 w2 r2 p1 p2 c2 c1 check
+permutation r1 w3 p3 c3 w2 r2 p1 c1 p2 c2 check
+permutation r1 w3 p3 c3 w2 r2 p2 p1 c1 c2 check
+permutation r1 w3 p3 c3 w2 r2 p2 p1 c2 c1 check
+permutation r1 w3 p3 c3 w2 r2 p2 c2 p1 c1 check
+permutation r1 w3 p3 c3 w2 p1 r2 p2 c1 c2 check
+permutation r1 w3 p3 c3 w2 p1 r2 p2 c2 c1 check
+permutation r1 w3 p3 c3 w2 p1 r2 c1 p2 c2 check
+permutation r1 w3 p3 c3 w2 p1 c1 r2 p2 c2 check
+permutation r1 w3 p3 c3 p1 r2 w2 p2 c1 c2 check
+permutation r1 w3 p3 c3 p1 r2 w2 p2 c2 c1 check
+permutation r1 w3 p3 c3 p1 r2 w2 c1 p2 c2 check
+permutation r1 w3 p3 c3 p1 r2 c1 w2 p2 c2 check
+permutation r1 w3 p3 c3 p1 w2 r2 p2 c1 c2 check
+permutation r1 w3 p3 c3 p1 w2 r2 p2 c2 c1 check
+permutation r1 w3 p3 c3 p1 w2 r2 c1 p2 c2 check
+permutation r1 w3 p3 c3 p1 w2 c1 r2 p2 c2 check
+permutation r1 w3 p3 c3 p1 c1 r2 w2 p2 c2 check
+permutation r1 w3 p3 c3 p1 c1 w2 r2 p2 c2 check
+permutation r1 p1 r2 w2 w3 p2 p3 c3 c1 c2 check
+permutation r1 p1 r2 w2 w3 p2 p3 c3 c2 c1 check
+permutation r1 p1 r2 w2 w3 p3 p2 c3 c1 c2 check
+permutation r1 p1 r2 w2 w3 p3 p2 c3 c2 c1 check
+permutation r1 p1 r2 w2 w3 p3 c3 p2 c1 c2 check
+permutation r1 p1 r2 w2 w3 p3 c3 p2 c2 c1 check
+permutation r1 p1 r2 w2 w3 p3 c3 c1 p2 c2 check
+permutation r1 p1 r2 w2 p2 w3 p3 c3 c1 c2 check
+permutation r1 p1 r2 w2 p2 w3 p3 c3 c2 c1 check
+permutation r1 p1 w2 w3 r2 p2 p3 c3 c1 c2 check
+permutation r1 p1 w2 w3 r2 p2 p3 c3 c2 c1 check
+permutation r1 p1 w2 w3 r2 p3 p2 c3 c1 c2 check
+permutation r1 p1 w2 w3 r2 p3 p2 c3 c2 c1 check
+permutation r1 p1 w2 w3 r2 p3 c3 p2 c1 c2 check
+permutation r1 p1 w2 w3 r2 p3 c3 p2 c2 c1 check
+permutation r1 p1 w2 w3 r2 p3 c3 c1 p2 c2 check
+permutation r1 p1 w2 w3 p3 r2 p2 c3 c1 c2 check
+permutation r1 p1 w2 w3 p3 r2 p2 c3 c2 c1 check
+permutation r1 p1 w2 w3 p3 r2 c3 p2 c1 c2 check
+permutation r1 p1 w2 w3 p3 r2 c3 p2 c2 c1 check
+permutation r1 p1 w2 w3 p3 r2 c3 c1 p2 c2 check
+permutation r1 p1 w2 w3 p3 c3 r2 p2 c1 c2 check
+permutation r1 p1 w2 w3 p3 c3 r2 p2 c2 c1 check
+permutation r1 p1 w2 w3 p3 c3 r2 c1 p2 c2 check
+permutation r1 p1 w2 w3 p3 c3 c1 r2 p2 c2 check
+permutation r1 p1 w3 r2 w2 p2 p3 c3 c1 c2 check
+permutation r1 p1 w3 r2 w2 p2 p3 c3 c2 c1 check
+permutation r1 p1 w3 r2 w2 p3 p2 c3 c1 c2 check
+permutation r1 p1 w3 r2 w2 p3 p2 c3 c2 c1 check
+permutation r1 p1 w3 r2 w2 p3 c3 p2 c1 c2 check
+permutation r1 p1 w3 r2 w2 p3 c3 p2 c2 c1 check
+permutation r1 p1 w3 r2 w2 p3 c3 c1 p2 c2 check
+permutation r1 p1 w3 r2 p3 w2 p2 c3 c1 c2 check
+permutation r1 p1 w3 r2 p3 w2 p2 c3 c2 c1 check
+permutation r1 p1 w3 r2 p3 w2 c3 p2 c1 c2 check
+permutation r1 p1 w3 r2 p3 w2 c3 p2 c2 c1 check
+permutation r1 p1 w3 r2 p3 w2 c3 c1 p2 c2 check
+permutation r1 p1 w3 r2 p3 c3 w2 p2 c1 c2 check
+permutation r1 p1 w3 r2 p3 c3 w2 p2 c2 c1 check
+permutation r1 p1 w3 r2 p3 c3 w2 c1 p2 c2 check
+permutation r1 p1 w3 r2 p3 c3 c1 w2 p2 c2 check
+permutation r1 p1 w3 w2 r2 p2 p3 c3 c1 c2 check
+permutation r1 p1 w3 w2 r2 p2 p3 c3 c2 c1 check
+permutation r1 p1 w3 w2 r2 p3 p2 c3 c1 c2 check
+permutation r1 p1 w3 w2 r2 p3 p2 c3 c2 c1 check
+permutation r1 p1 w3 w2 r2 p3 c3 p2 c1 c2 check
+permutation r1 p1 w3 w2 r2 p3 c3 p2 c2 c1 check
+permutation r1 p1 w3 w2 r2 p3 c3 c1 p2 c2 check
+permutation r1 p1 w3 w2 p3 r2 p2 c3 c1 c2 check
+permutation r1 p1 w3 w2 p3 r2 p2 c3 c2 c1 check
+permutation r1 p1 w3 w2 p3 r2 c3 p2 c1 c2 check
+permutation r1 p1 w3 w2 p3 r2 c3 p2 c2 c1 check
+permutation r1 p1 w3 w2 p3 r2 c3 c1 p2 c2 check
+permutation r1 p1 w3 w2 p3 c3 r2 p2 c1 c2 check
+permutation r1 p1 w3 w2 p3 c3 r2 p2 c2 c1 check
+permutation r1 p1 w3 w2 p3 c3 r2 c1 p2 c2 check
+permutation r1 p1 w3 w2 p3 c3 c1 r2 p2 c2 check
+permutation r1 p1 w3 p3 r2 w2 p2 c3 c1 c2 check
+permutation r1 p1 w3 p3 r2 w2 p2 c3 c2 c1 check
+permutation r1 p1 w3 p3 r2 w2 c3 p2 c1 c2 check
+permutation r1 p1 w3 p3 r2 w2 c3 p2 c2 c1 check
+permutation r1 p1 w3 p3 r2 w2 c3 c1 p2 c2 check
+permutation r1 p1 w3 p3 r2 c3 w2 p2 c1 c2 check
+permutation r1 p1 w3 p3 r2 c3 w2 p2 c2 c1 check
+permutation r1 p1 w3 p3 r2 c3 w2 c1 p2 c2 check
+permutation r1 p1 w3 p3 r2 c3 c1 w2 p2 c2 check
+permutation r1 p1 w3 p3 w2 r2 p2 c3 c1 c2 check
+permutation r1 p1 w3 p3 w2 r2 p2 c3 c2 c1 check
+permutation r1 p1 w3 p3 w2 r2 c3 p2 c1 c2 check
+permutation r1 p1 w3 p3 w2 r2 c3 p2 c2 c1 check
+permutation r1 p1 w3 p3 w2 r2 c3 c1 p2 c2 check
+permutation r1 p1 w3 p3 w2 c3 r2 p2 c1 c2 check
+permutation r1 p1 w3 p3 w2 c3 r2 p2 c2 c1 check
+permutation r1 p1 w3 p3 w2 c3 r2 c1 p2 c2 check
+permutation r1 p1 w3 p3 w2 c3 c1 r2 p2 c2 check
+permutation r1 p1 w3 p3 c3 r2 w2 p2 c1 c2 check
+permutation r1 p1 w3 p3 c3 r2 w2 p2 c2 c1 check
+permutation r1 p1 w3 p3 c3 r2 w2 c1 p2 c2 check
+permutation r1 p1 w3 p3 c3 r2 c1 w2 p2 c2 check
+permutation r1 p1 w3 p3 c3 w2 r2 p2 c1 c2 check
+permutation r1 p1 w3 p3 c3 w2 r2 p2 c2 c1 check
+permutation r1 p1 w3 p3 c3 w2 r2 c1 p2 c2 check
+permutation r1 p1 w3 p3 c3 w2 c1 r2 p2 c2 check
+permutation r1 p1 w3 p3 c3 c1 r2 w2 p2 c2 check
+permutation r1 p1 w3 p3 c3 c1 w2 r2 p2 c2 check
+permutation w2 r1 r2 w3 p1 p2 p3 c3 c1 c2 check
+permutation w2 r1 r2 w3 p1 p2 p3 c3 c2 c1 check
+permutation w2 r1 r2 w3 p1 p3 p2 c3 c1 c2 check
+permutation w2 r1 r2 w3 p1 p3 p2 c3 c2 c1 check
+permutation w2 r1 r2 w3 p1 p3 c3 p2 c1 c2 check
+permutation w2 r1 r2 w3 p1 p3 c3 p2 c2 c1 check
+permutation w2 r1 r2 w3 p1 p3 c3 c1 p2 c2 check
+permutation w2 r1 r2 w3 p2 p1 p3 c3 c1 c2 check
+permutation w2 r1 r2 w3 p2 p1 p3 c3 c2 c1 check
+permutation w2 r1 r2 w3 p2 p3 p1 c3 c1 c2 check
+permutation w2 r1 r2 w3 p2 p3 p1 c3 c2 c1 check
+permutation w2 r1 r2 w3 p2 p3 c3 p1 c1 c2 check
+permutation w2 r1 r2 w3 p2 p3 c3 p1 c2 c1 check
+permutation w2 r1 r2 w3 p2 p3 c3 c2 p1 c1 check
+permutation w2 r1 r2 w3 p3 p1 p2 c3 c1 c2 check
+permutation w2 r1 r2 w3 p3 p1 p2 c3 c2 c1 check
+permutation w2 r1 r2 w3 p3 p1 c3 p2 c1 c2 check
+permutation w2 r1 r2 w3 p3 p1 c3 p2 c2 c1 check
+permutation w2 r1 r2 w3 p3 p1 c3 c1 p2 c2 check
+permutation w2 r1 r2 w3 p3 p2 p1 c3 c1 c2 check
+permutation w2 r1 r2 w3 p3 p2 p1 c3 c2 c1 check
+permutation w2 r1 r2 w3 p3 p2 c3 p1 c1 c2 check
+permutation w2 r1 r2 w3 p3 p2 c3 p1 c2 c1 check
+permutation w2 r1 r2 w3 p3 p2 c3 c2 p1 c1 check
+permutation w2 r1 r2 w3 p3 c3 p1 p2 c1 c2 check
+permutation w2 r1 r2 w3 p3 c3 p1 p2 c2 c1 check
+permutation w2 r1 r2 w3 p3 c3 p1 c1 p2 c2 check
+permutation w2 r1 r2 w3 p3 c3 p2 p1 c1 c2 check
+permutation w2 r1 r2 w3 p3 c3 p2 p1 c2 c1 check
+permutation w2 r1 r2 w3 p3 c3 p2 c2 p1 c1 check
+permutation w2 r1 r2 p1 w3 p2 p3 c3 c1 c2 check
+permutation w2 r1 r2 p1 w3 p2 p3 c3 c2 c1 check
+permutation w2 r1 r2 p1 w3 p3 p2 c3 c1 c2 check
+permutation w2 r1 r2 p1 w3 p3 p2 c3 c2 c1 check
+permutation w2 r1 r2 p1 w3 p3 c3 p2 c1 c2 check
+permutation w2 r1 r2 p1 w3 p3 c3 p2 c2 c1 check
+permutation w2 r1 r2 p1 w3 p3 c3 c1 p2 c2 check
+permutation w2 r1 r2 p1 p2 w3 p3 c3 c1 c2 check
+permutation w2 r1 r2 p1 p2 w3 p3 c3 c2 c1 check
+permutation w2 r1 r2 p2 w3 p1 p3 c3 c1 c2 check
+permutation w2 r1 r2 p2 w3 p1 p3 c3 c2 c1 check
+permutation w2 r1 r2 p2 w3 p3 p1 c3 c1 c2 check
+permutation w2 r1 r2 p2 w3 p3 p1 c3 c2 c1 check
+permutation w2 r1 r2 p2 w3 p3 c3 p1 c1 c2 check
+permutation w2 r1 r2 p2 w3 p3 c3 p1 c2 c1 check
+permutation w2 r1 r2 p2 w3 p3 c3 c2 p1 c1 check
+permutation w2 r1 r2 p2 p1 w3 p3 c3 c1 c2 check
+permutation w2 r1 r2 p2 p1 w3 p3 c3 c2 c1 check
+permutation w2 r1 w3 r2 p1 p2 p3 c3 c1 c2 check
+permutation w2 r1 w3 r2 p1 p2 p3 c3 c2 c1 check
+permutation w2 r1 w3 r2 p1 p3 p2 c3 c1 c2 check
+permutation w2 r1 w3 r2 p1 p3 p2 c3 c2 c1 check
+permutation w2 r1 w3 r2 p1 p3 c3 p2 c1 c2 check
+permutation w2 r1 w3 r2 p1 p3 c3 p2 c2 c1 check
+permutation w2 r1 w3 r2 p1 p3 c3 c1 p2 c2 check
+permutation w2 r1 w3 r2 p2 p1 p3 c3 c1 c2 check
+permutation w2 r1 w3 r2 p2 p1 p3 c3 c2 c1 check
+permutation w2 r1 w3 r2 p2 p3 p1 c3 c1 c2 check
+permutation w2 r1 w3 r2 p2 p3 p1 c3 c2 c1 check
+permutation w2 r1 w3 r2 p2 p3 c3 p1 c1 c2 check
+permutation w2 r1 w3 r2 p2 p3 c3 p1 c2 c1 check
+permutation w2 r1 w3 r2 p2 p3 c3 c2 p1 c1 check
+permutation w2 r1 w3 r2 p3 p1 p2 c3 c1 c2 check
+permutation w2 r1 w3 r2 p3 p1 p2 c3 c2 c1 check
+permutation w2 r1 w3 r2 p3 p1 c3 p2 c1 c2 check
+permutation w2 r1 w3 r2 p3 p1 c3 p2 c2 c1 check
+permutation w2 r1 w3 r2 p3 p1 c3 c1 p2 c2 check
+permutation w2 r1 w3 r2 p3 p2 p1 c3 c1 c2 check
+permutation w2 r1 w3 r2 p3 p2 p1 c3 c2 c1 check
+permutation w2 r1 w3 r2 p3 p2 c3 p1 c1 c2 check
+permutation w2 r1 w3 r2 p3 p2 c3 p1 c2 c1 check
+permutation w2 r1 w3 r2 p3 p2 c3 c2 p1 c1 check
+permutation w2 r1 w3 r2 p3 c3 p1 p2 c1 c2 check
+permutation w2 r1 w3 r2 p3 c3 p1 p2 c2 c1 check
+permutation w2 r1 w3 r2 p3 c3 p1 c1 p2 c2 check
+permutation w2 r1 w3 r2 p3 c3 p2 p1 c1 c2 check
+permutation w2 r1 w3 r2 p3 c3 p2 p1 c2 c1 check
+permutation w2 r1 w3 r2 p3 c3 p2 c2 p1 c1 check
+permutation w2 r1 w3 p1 r2 p2 p3 c3 c1 c2 check
+permutation w2 r1 w3 p1 r2 p2 p3 c3 c2 c1 check
+permutation w2 r1 w3 p1 r2 p3 p2 c3 c1 c2 check
+permutation w2 r1 w3 p1 r2 p3 p2 c3 c2 c1 check
+permutation w2 r1 w3 p1 r2 p3 c3 p2 c1 c2 check
+permutation w2 r1 w3 p1 r2 p3 c3 p2 c2 c1 check
+permutation w2 r1 w3 p1 r2 p3 c3 c1 p2 c2 check
+permutation w2 r1 w3 p1 p3 r2 p2 c3 c1 c2 check
+permutation w2 r1 w3 p1 p3 r2 p2 c3 c2 c1 check
+permutation w2 r1 w3 p1 p3 r2 c3 p2 c1 c2 check
+permutation w2 r1 w3 p1 p3 r2 c3 p2 c2 c1 check
+permutation w2 r1 w3 p1 p3 r2 c3 c1 p2 c2 check
+permutation w2 r1 w3 p1 p3 c3 r2 p2 c1 c2 check
+permutation w2 r1 w3 p1 p3 c3 r2 p2 c2 c1 check
+permutation w2 r1 w3 p1 p3 c3 r2 c1 p2 c2 check
+permutation w2 r1 w3 p1 p3 c3 c1 r2 p2 c2 check
+permutation w2 r1 w3 p3 r2 p1 p2 c3 c1 c2 check
+permutation w2 r1 w3 p3 r2 p1 p2 c3 c2 c1 check
+permutation w2 r1 w3 p3 r2 p1 c3 p2 c1 c2 check
+permutation w2 r1 w3 p3 r2 p1 c3 p2 c2 c1 check
+permutation w2 r1 w3 p3 r2 p1 c3 c1 p2 c2 check
+permutation w2 r1 w3 p3 r2 p2 p1 c3 c1 c2 check
+permutation w2 r1 w3 p3 r2 p2 p1 c3 c2 c1 check
+permutation w2 r1 w3 p3 r2 p2 c3 p1 c1 c2 check
+permutation w2 r1 w3 p3 r2 p2 c3 p1 c2 c1 check
+permutation w2 r1 w3 p3 r2 p2 c3 c2 p1 c1 check
+permutation w2 r1 w3 p3 r2 c3 p1 p2 c1 c2 check
+permutation w2 r1 w3 p3 r2 c3 p1 p2 c2 c1 check
+permutation w2 r1 w3 p3 r2 c3 p1 c1 p2 c2 check
+permutation w2 r1 w3 p3 r2 c3 p2 p1 c1 c2 check
+permutation w2 r1 w3 p3 r2 c3 p2 p1 c2 c1 check
+permutation w2 r1 w3 p3 r2 c3 p2 c2 p1 c1 check
+permutation w2 r1 w3 p3 p1 r2 p2 c3 c1 c2 check
+permutation w2 r1 w3 p3 p1 r2 p2 c3 c2 c1 check
+permutation w2 r1 w3 p3 p1 r2 c3 p2 c1 c2 check
+permutation w2 r1 w3 p3 p1 r2 c3 p2 c2 c1 check
+permutation w2 r1 w3 p3 p1 r2 c3 c1 p2 c2 check
+permutation w2 r1 w3 p3 p1 c3 r2 p2 c1 c2 check
+permutation w2 r1 w3 p3 p1 c3 r2 p2 c2 c1 check
+permutation w2 r1 w3 p3 p1 c3 r2 c1 p2 c2 check
+permutation w2 r1 w3 p3 p1 c3 c1 r2 p2 c2 check
+permutation w2 r1 w3 p3 c3 r2 p1 p2 c1 c2 check
+permutation w2 r1 w3 p3 c3 r2 p1 p2 c2 c1 check
+permutation w2 r1 w3 p3 c3 r2 p1 c1 p2 c2 check
+permutation w2 r1 w3 p3 c3 r2 p2 p1 c1 c2 check
+permutation w2 r1 w3 p3 c3 r2 p2 p1 c2 c1 check
+permutation w2 r1 w3 p3 c3 r2 p2 c2 p1 c1 check
+permutation w2 r1 w3 p3 c3 p1 r2 p2 c1 c2 check
+permutation w2 r1 w3 p3 c3 p1 r2 p2 c2 c1 check
+permutation w2 r1 w3 p3 c3 p1 r2 c1 p2 c2 check
+permutation w2 r1 w3 p3 c3 p1 c1 r2 p2 c2 check
+permutation w2 r1 p1 r2 w3 p2 p3 c3 c1 c2 check
+permutation w2 r1 p1 r2 w3 p2 p3 c3 c2 c1 check
+permutation w2 r1 p1 r2 w3 p3 p2 c3 c1 c2 check
+permutation w2 r1 p1 r2 w3 p3 p2 c3 c2 c1 check
+permutation w2 r1 p1 r2 w3 p3 c3 p2 c1 c2 check
+permutation w2 r1 p1 r2 w3 p3 c3 p2 c2 c1 check
+permutation w2 r1 p1 r2 w3 p3 c3 c1 p2 c2 check
+permutation w2 r1 p1 r2 p2 w3 p3 c3 c1 c2 check
+permutation w2 r1 p1 r2 p2 w3 p3 c3 c2 c1 check
+permutation w2 r1 p1 w3 r2 p2 p3 c3 c1 c2 check
+permutation w2 r1 p1 w3 r2 p2 p3 c3 c2 c1 check
+permutation w2 r1 p1 w3 r2 p3 p2 c3 c1 c2 check
+permutation w2 r1 p1 w3 r2 p3 p2 c3 c2 c1 check
+permutation w2 r1 p1 w3 r2 p3 c3 p2 c1 c2 check
+permutation w2 r1 p1 w3 r2 p3 c3 p2 c2 c1 check
+permutation w2 r1 p1 w3 r2 p3 c3 c1 p2 c2 check
+permutation w2 r1 p1 w3 p3 r2 p2 c3 c1 c2 check
+permutation w2 r1 p1 w3 p3 r2 p2 c3 c2 c1 check
+permutation w2 r1 p1 w3 p3 r2 c3 p2 c1 c2 check
+permutation w2 r1 p1 w3 p3 r2 c3 p2 c2 c1 check
+permutation w2 r1 p1 w3 p3 r2 c3 c1 p2 c2 check
+permutation w2 r1 p1 w3 p3 c3 r2 p2 c1 c2 check
+permutation w2 r1 p1 w3 p3 c3 r2 p2 c2 c1 check
+permutation w2 r1 p1 w3 p3 c3 r2 c1 p2 c2 check
+permutation w2 r1 p1 w3 p3 c3 c1 r2 p2 c2 check
+permutation w3 r1 r2 w2 p1 p2 p3 c3 c1 c2 check
+permutation w3 r1 r2 w2 p1 p2 p3 c3 c2 c1 check
+permutation w3 r1 r2 w2 p1 p3 p2 c3 c1 c2 check
+permutation w3 r1 r2 w2 p1 p3 p2 c3 c2 c1 check
+permutation w3 r1 r2 w2 p1 p3 c3 p2 c1 c2 check
+permutation w3 r1 r2 w2 p1 p3 c3 p2 c2 c1 check
+permutation w3 r1 r2 w2 p1 p3 c3 c1 p2 c2 check
+permutation w3 r1 r2 w2 p2 p1 p3 c3 c1 c2 check
+permutation w3 r1 r2 w2 p2 p1 p3 c3 c2 c1 check
+permutation w3 r1 r2 w2 p2 p3 p1 c3 c1 c2 check
+permutation w3 r1 r2 w2 p2 p3 p1 c3 c2 c1 check
+permutation w3 r1 r2 w2 p2 p3 c3 p1 c1 c2 check
+permutation w3 r1 r2 w2 p2 p3 c3 p1 c2 c1 check
+permutation w3 r1 r2 w2 p2 p3 c3 c2 p1 c1 check
+permutation w3 r1 r2 w2 p3 p1 p2 c3 c1 c2 check
+permutation w3 r1 r2 w2 p3 p1 p2 c3 c2 c1 check
+permutation w3 r1 r2 w2 p3 p1 c3 p2 c1 c2 check
+permutation w3 r1 r2 w2 p3 p1 c3 p2 c2 c1 check
+permutation w3 r1 r2 w2 p3 p1 c3 c1 p2 c2 check
+permutation w3 r1 r2 w2 p3 p2 p1 c3 c1 c2 check
+permutation w3 r1 r2 w2 p3 p2 p1 c3 c2 c1 check
+permutation w3 r1 r2 w2 p3 p2 c3 p1 c1 c2 check
+permutation w3 r1 r2 w2 p3 p2 c3 p1 c2 c1 check
+permutation w3 r1 r2 w2 p3 p2 c3 c2 p1 c1 check
+permutation w3 r1 r2 w2 p3 c3 p1 p2 c1 c2 check
+permutation w3 r1 r2 w2 p3 c3 p1 p2 c2 c1 check
+permutation w3 r1 r2 w2 p3 c3 p1 c1 p2 c2 check
+permutation w3 r1 r2 w2 p3 c3 p2 p1 c1 c2 check
+permutation w3 r1 r2 w2 p3 c3 p2 p1 c2 c1 check
+permutation w3 r1 r2 w2 p3 c3 p2 c2 p1 c1 check
+permutation w3 r1 r2 p1 w2 p2 p3 c3 c1 c2 check
+permutation w3 r1 r2 p1 w2 p2 p3 c3 c2 c1 check
+permutation w3 r1 r2 p1 w2 p3 p2 c3 c1 c2 check
+permutation w3 r1 r2 p1 w2 p3 p2 c3 c2 c1 check
+permutation w3 r1 r2 p1 w2 p3 c3 p2 c1 c2 check
+permutation w3 r1 r2 p1 w2 p3 c3 p2 c2 c1 check
+permutation w3 r1 r2 p1 w2 p3 c3 c1 p2 c2 check
+permutation w3 r1 r2 p1 p3 w2 p2 c3 c1 c2 check
+permutation w3 r1 r2 p1 p3 w2 p2 c3 c2 c1 check
+permutation w3 r1 r2 p1 p3 w2 c3 p2 c1 c2 check
+permutation w3 r1 r2 p1 p3 w2 c3 p2 c2 c1 check
+permutation w3 r1 r2 p1 p3 w2 c3 c1 p2 c2 check
+permutation w3 r1 r2 p1 p3 c3 w2 p2 c1 c2 check
+permutation w3 r1 r2 p1 p3 c3 w2 p2 c2 c1 check
+permutation w3 r1 r2 p1 p3 c3 w2 c1 p2 c2 check
+permutation w3 r1 r2 p1 p3 c3 c1 w2 p2 c2 check
+permutation w3 r1 r2 p3 w2 p1 p2 c3 c1 c2 check
+permutation w3 r1 r2 p3 w2 p1 p2 c3 c2 c1 check
+permutation w3 r1 r2 p3 w2 p1 c3 p2 c1 c2 check
+permutation w3 r1 r2 p3 w2 p1 c3 p2 c2 c1 check
+permutation w3 r1 r2 p3 w2 p1 c3 c1 p2 c2 check
+permutation w3 r1 r2 p3 w2 p2 p1 c3 c1 c2 check
+permutation w3 r1 r2 p3 w2 p2 p1 c3 c2 c1 check
+permutation w3 r1 r2 p3 w2 p2 c3 p1 c1 c2 check
+permutation w3 r1 r2 p3 w2 p2 c3 p1 c2 c1 check
+permutation w3 r1 r2 p3 w2 p2 c3 c2 p1 c1 check
+permutation w3 r1 r2 p3 w2 c3 p1 p2 c1 c2 check
+permutation w3 r1 r2 p3 w2 c3 p1 p2 c2 c1 check
+permutation w3 r1 r2 p3 w2 c3 p1 c1 p2 c2 check
+permutation w3 r1 r2 p3 w2 c3 p2 p1 c1 c2 check
+permutation w3 r1 r2 p3 w2 c3 p2 p1 c2 c1 check
+permutation w3 r1 r2 p3 w2 c3 p2 c2 p1 c1 check
+permutation w3 r1 r2 p3 p1 w2 p2 c3 c1 c2 check
+permutation w3 r1 r2 p3 p1 w2 p2 c3 c2 c1 check
+permutation w3 r1 r2 p3 p1 w2 c3 p2 c1 c2 check
+permutation w3 r1 r2 p3 p1 w2 c3 p2 c2 c1 check
+permutation w3 r1 r2 p3 p1 w2 c3 c1 p2 c2 check
+permutation w3 r1 r2 p3 p1 c3 w2 p2 c1 c2 check
+permutation w3 r1 r2 p3 p1 c3 w2 p2 c2 c1 check
+permutation w3 r1 r2 p3 p1 c3 w2 c1 p2 c2 check
+permutation w3 r1 r2 p3 p1 c3 c1 w2 p2 c2 check
+permutation w3 r1 r2 p3 c3 w2 p1 p2 c1 c2 check
+permutation w3 r1 r2 p3 c3 w2 p1 p2 c2 c1 check
+permutation w3 r1 r2 p3 c3 w2 p1 c1 p2 c2 check
+permutation w3 r1 r2 p3 c3 w2 p2 p1 c1 c2 check
+permutation w3 r1 r2 p3 c3 w2 p2 p1 c2 c1 check
+permutation w3 r1 r2 p3 c3 w2 p2 c2 p1 c1 check
+permutation w3 r1 r2 p3 c3 p1 w2 p2 c1 c2 check
+permutation w3 r1 r2 p3 c3 p1 w2 p2 c2 c1 check
+permutation w3 r1 r2 p3 c3 p1 w2 c1 p2 c2 check
+permutation w3 r1 r2 p3 c3 p1 c1 w2 p2 c2 check
+permutation w3 r1 w2 r2 p1 p2 p3 c3 c1 c2 check
+permutation w3 r1 w2 r2 p1 p2 p3 c3 c2 c1 check
+permutation w3 r1 w2 r2 p1 p3 p2 c3 c1 c2 check
+permutation w3 r1 w2 r2 p1 p3 p2 c3 c2 c1 check
+permutation w3 r1 w2 r2 p1 p3 c3 p2 c1 c2 check
+permutation w3 r1 w2 r2 p1 p3 c3 p2 c2 c1 check
+permutation w3 r1 w2 r2 p1 p3 c3 c1 p2 c2 check
+permutation w3 r1 w2 r2 p2 p1 p3 c3 c1 c2 check
+permutation w3 r1 w2 r2 p2 p1 p3 c3 c2 c1 check
+permutation w3 r1 w2 r2 p2 p3 p1 c3 c1 c2 check
+permutation w3 r1 w2 r2 p2 p3 p1 c3 c2 c1 check
+permutation w3 r1 w2 r2 p2 p3 c3 p1 c1 c2 check
+permutation w3 r1 w2 r2 p2 p3 c3 p1 c2 c1 check
+permutation w3 r1 w2 r2 p2 p3 c3 c2 p1 c1 check
+permutation w3 r1 w2 r2 p3 p1 p2 c3 c1 c2 check
+permutation w3 r1 w2 r2 p3 p1 p2 c3 c2 c1 check
+permutation w3 r1 w2 r2 p3 p1 c3 p2 c1 c2 check
+permutation w3 r1 w2 r2 p3 p1 c3 p2 c2 c1 check
+permutation w3 r1 w2 r2 p3 p1 c3 c1 p2 c2 check
+permutation w3 r1 w2 r2 p3 p2 p1 c3 c1 c2 check
+permutation w3 r1 w2 r2 p3 p2 p1 c3 c2 c1 check
+permutation w3 r1 w2 r2 p3 p2 c3 p1 c1 c2 check
+permutation w3 r1 w2 r2 p3 p2 c3 p1 c2 c1 check
+permutation w3 r1 w2 r2 p3 p2 c3 c2 p1 c1 check
+permutation w3 r1 w2 r2 p3 c3 p1 p2 c1 c2 check
+permutation w3 r1 w2 r2 p3 c3 p1 p2 c2 c1 check
+permutation w3 r1 w2 r2 p3 c3 p1 c1 p2 c2 check
+permutation w3 r1 w2 r2 p3 c3 p2 p1 c1 c2 check
+permutation w3 r1 w2 r2 p3 c3 p2 p1 c2 c1 check
+permutation w3 r1 w2 r2 p3 c3 p2 c2 p1 c1 check
+permutation w3 r1 w2 p1 r2 p2 p3 c3 c1 c2 check
+permutation w3 r1 w2 p1 r2 p2 p3 c3 c2 c1 check
+permutation w3 r1 w2 p1 r2 p3 p2 c3 c1 c2 check
+permutation w3 r1 w2 p1 r2 p3 p2 c3 c2 c1 check
+permutation w3 r1 w2 p1 r2 p3 c3 p2 c1 c2 check
+permutation w3 r1 w2 p1 r2 p3 c3 p2 c2 c1 check
+permutation w3 r1 w2 p1 r2 p3 c3 c1 p2 c2 check
+permutation w3 r1 w2 p1 p3 r2 p2 c3 c1 c2 check
+permutation w3 r1 w2 p1 p3 r2 p2 c3 c2 c1 check
+permutation w3 r1 w2 p1 p3 r2 c3 p2 c1 c2 check
+permutation w3 r1 w2 p1 p3 r2 c3 p2 c2 c1 check
+permutation w3 r1 w2 p1 p3 r2 c3 c1 p2 c2 check
+permutation w3 r1 w2 p1 p3 c3 r2 p2 c1 c2 check
+permutation w3 r1 w2 p1 p3 c3 r2 p2 c2 c1 check
+permutation w3 r1 w2 p1 p3 c3 r2 c1 p2 c2 check
+permutation w3 r1 w2 p1 p3 c3 c1 r2 p2 c2 check
+permutation w3 r1 w2 p3 r2 p1 p2 c3 c1 c2 check
+permutation w3 r1 w2 p3 r2 p1 p2 c3 c2 c1 check
+permutation w3 r1 w2 p3 r2 p1 c3 p2 c1 c2 check
+permutation w3 r1 w2 p3 r2 p1 c3 p2 c2 c1 check
+permutation w3 r1 w2 p3 r2 p1 c3 c1 p2 c2 check
+permutation w3 r1 w2 p3 r2 p2 p1 c3 c1 c2 check
+permutation w3 r1 w2 p3 r2 p2 p1 c3 c2 c1 check
+permutation w3 r1 w2 p3 r2 p2 c3 p1 c1 c2 check
+permutation w3 r1 w2 p3 r2 p2 c3 p1 c2 c1 check
+permutation w3 r1 w2 p3 r2 p2 c3 c2 p1 c1 check
+permutation w3 r1 w2 p3 r2 c3 p1 p2 c1 c2 check
+permutation w3 r1 w2 p3 r2 c3 p1 p2 c2 c1 check
+permutation w3 r1 w2 p3 r2 c3 p1 c1 p2 c2 check
+permutation w3 r1 w2 p3 r2 c3 p2 p1 c1 c2 check
+permutation w3 r1 w2 p3 r2 c3 p2 p1 c2 c1 check
+permutation w3 r1 w2 p3 r2 c3 p2 c2 p1 c1 check
+permutation w3 r1 w2 p3 p1 r2 p2 c3 c1 c2 check
+permutation w3 r1 w2 p3 p1 r2 p2 c3 c2 c1 check
+permutation w3 r1 w2 p3 p1 r2 c3 p2 c1 c2 check
+permutation w3 r1 w2 p3 p1 r2 c3 p2 c2 c1 check
+permutation w3 r1 w2 p3 p1 r2 c3 c1 p2 c2 check
+permutation w3 r1 w2 p3 p1 c3 r2 p2 c1 c2 check
+permutation w3 r1 w2 p3 p1 c3 r2 p2 c2 c1 check
+permutation w3 r1 w2 p3 p1 c3 r2 c1 p2 c2 check
+permutation w3 r1 w2 p3 p1 c3 c1 r2 p2 c2 check
+permutation w3 r1 w2 p3 c3 r2 p1 p2 c1 c2 check
+permutation w3 r1 w2 p3 c3 r2 p1 p2 c2 c1 check
+permutation w3 r1 w2 p3 c3 r2 p1 c1 p2 c2 check
+permutation w3 r1 w2 p3 c3 r2 p2 p1 c1 c2 check
+permutation w3 r1 w2 p3 c3 r2 p2 p1 c2 c1 check
+permutation w3 r1 w2 p3 c3 r2 p2 c2 p1 c1 check
+permutation w3 r1 w2 p3 c3 p1 r2 p2 c1 c2 check
+permutation w3 r1 w2 p3 c3 p1 r2 p2 c2 c1 check
+permutation w3 r1 w2 p3 c3 p1 r2 c1 p2 c2 check
+permutation w3 r1 w2 p3 c3 p1 c1 r2 p2 c2 check
+permutation w3 r1 p1 r2 w2 p2 p3 c3 c1 c2 check
+permutation w3 r1 p1 r2 w2 p2 p3 c3 c2 c1 check
+permutation w3 r1 p1 r2 w2 p3 p2 c3 c1 c2 check
+permutation w3 r1 p1 r2 w2 p3 p2 c3 c2 c1 check
+permutation w3 r1 p1 r2 w2 p3 c3 p2 c1 c2 check
+permutation w3 r1 p1 r2 w2 p3 c3 p2 c2 c1 check
+permutation w3 r1 p1 r2 w2 p3 c3 c1 p2 c2 check
+permutation w3 r1 p1 r2 p3 w2 p2 c3 c1 c2 check
+permutation w3 r1 p1 r2 p3 w2 p2 c3 c2 c1 check
+permutation w3 r1 p1 r2 p3 w2 c3 p2 c1 c2 check
+permutation w3 r1 p1 r2 p3 w2 c3 p2 c2 c1 check
+permutation w3 r1 p1 r2 p3 w2 c3 c1 p2 c2 check
+permutation w3 r1 p1 r2 p3 c3 w2 p2 c1 c2 check
+permutation w3 r1 p1 r2 p3 c3 w2 p2 c2 c1 check
+permutation w3 r1 p1 r2 p3 c3 w2 c1 p2 c2 check
+permutation w3 r1 p1 r2 p3 c3 c1 w2 p2 c2 check
+permutation w3 r1 p1 w2 r2 p2 p3 c3 c1 c2 check
+permutation w3 r1 p1 w2 r2 p2 p3 c3 c2 c1 check
+permutation w3 r1 p1 w2 r2 p3 p2 c3 c1 c2 check
+permutation w3 r1 p1 w2 r2 p3 p2 c3 c2 c1 check
+permutation w3 r1 p1 w2 r2 p3 c3 p2 c1 c2 check
+permutation w3 r1 p1 w2 r2 p3 c3 p2 c2 c1 check
+permutation w3 r1 p1 w2 r2 p3 c3 c1 p2 c2 check
+permutation w3 r1 p1 w2 p3 r2 p2 c3 c1 c2 check
+permutation w3 r1 p1 w2 p3 r2 p2 c3 c2 c1 check
+permutation w3 r1 p1 w2 p3 r2 c3 p2 c1 c2 check
+permutation w3 r1 p1 w2 p3 r2 c3 p2 c2 c1 check
+permutation w3 r1 p1 w2 p3 r2 c3 c1 p2 c2 check
+permutation w3 r1 p1 w2 p3 c3 r2 p2 c1 c2 check
+permutation w3 r1 p1 w2 p3 c3 r2 p2 c2 c1 check
+permutation w3 r1 p1 w2 p3 c3 r2 c1 p2 c2 check
+permutation w3 r1 p1 w2 p3 c3 c1 r2 p2 c2 check
+permutation w3 r1 p1 p3 r2 w2 p2 c3 c1 c2 check
+permutation w3 r1 p1 p3 r2 w2 p2 c3 c2 c1 check
+permutation w3 r1 p1 p3 r2 w2 c3 p2 c1 c2 check
+permutation w3 r1 p1 p3 r2 w2 c3 p2 c2 c1 check
+permutation w3 r1 p1 p3 r2 w2 c3 c1 p2 c2 check
+permutation w3 r1 p1 p3 r2 c3 w2 p2 c1 c2 check
+permutation w3 r1 p1 p3 r2 c3 w2 p2 c2 c1 check
+permutation w3 r1 p1 p3 r2 c3 w2 c1 p2 c2 check
+permutation w3 r1 p1 p3 r2 c3 c1 w2 p2 c2 check
+permutation w3 r1 p1 p3 w2 r2 p2 c3 c1 c2 check
+permutation w3 r1 p1 p3 w2 r2 p2 c3 c2 c1 check
+permutation w3 r1 p1 p3 w2 r2 c3 p2 c1 c2 check
+permutation w3 r1 p1 p3 w2 r2 c3 p2 c2 c1 check
+permutation w3 r1 p1 p3 w2 r2 c3 c1 p2 c2 check
+permutation w3 r1 p1 p3 w2 c3 r2 p2 c1 c2 check
+permutation w3 r1 p1 p3 w2 c3 r2 p2 c2 c1 check
+permutation w3 r1 p1 p3 w2 c3 r2 c1 p2 c2 check
+permutation w3 r1 p1 p3 w2 c3 c1 r2 p2 c2 check
+permutation w3 r1 p1 p3 c3 r2 w2 p2 c1 c2 check
+permutation w3 r1 p1 p3 c3 r2 w2 p2 c2 c1 check
+permutation w3 r1 p1 p3 c3 r2 w2 c1 p2 c2 check
+permutation w3 r1 p1 p3 c3 r2 c1 w2 p2 c2 check
+permutation w3 r1 p1 p3 c3 w2 r2 p2 c1 c2 check
+permutation w3 r1 p1 p3 c3 w2 r2 p2 c2 c1 check
+permutation w3 r1 p1 p3 c3 w2 r2 c1 p2 c2 check
+permutation w3 r1 p1 p3 c3 w2 c1 r2 p2 c2 check
+permutation w3 r1 p1 p3 c3 c1 r2 w2 p2 c2 check
+permutation w3 r1 p1 p3 c3 c1 w2 r2 p2 c2 check
+permutation w3 r1 p3 r2 w2 p1 p2 c3 c1 c2 check
+permutation w3 r1 p3 r2 w2 p1 p2 c3 c2 c1 check
+permutation w3 r1 p3 r2 w2 p1 c3 p2 c1 c2 check
+permutation w3 r1 p3 r2 w2 p1 c3 p2 c2 c1 check
+permutation w3 r1 p3 r2 w2 p1 c3 c1 p2 c2 check
+permutation w3 r1 p3 r2 w2 p2 p1 c3 c1 c2 check
+permutation w3 r1 p3 r2 w2 p2 p1 c3 c2 c1 check
+permutation w3 r1 p3 r2 w2 p2 c3 p1 c1 c2 check
+permutation w3 r1 p3 r2 w2 p2 c3 p1 c2 c1 check
+permutation w3 r1 p3 r2 w2 p2 c3 c2 p1 c1 check
+permutation w3 r1 p3 r2 w2 c3 p1 p2 c1 c2 check
+permutation w3 r1 p3 r2 w2 c3 p1 p2 c2 c1 check
+permutation w3 r1 p3 r2 w2 c3 p1 c1 p2 c2 check
+permutation w3 r1 p3 r2 w2 c3 p2 p1 c1 c2 check
+permutation w3 r1 p3 r2 w2 c3 p2 p1 c2 c1 check
+permutation w3 r1 p3 r2 w2 c3 p2 c2 p1 c1 check
+permutation w3 r1 p3 r2 p1 w2 p2 c3 c1 c2 check
+permutation w3 r1 p3 r2 p1 w2 p2 c3 c2 c1 check
+permutation w3 r1 p3 r2 p1 w2 c3 p2 c1 c2 check
+permutation w3 r1 p3 r2 p1 w2 c3 p2 c2 c1 check
+permutation w3 r1 p3 r2 p1 w2 c3 c1 p2 c2 check
+permutation w3 r1 p3 r2 p1 c3 w2 p2 c1 c2 check
+permutation w3 r1 p3 r2 p1 c3 w2 p2 c2 c1 check
+permutation w3 r1 p3 r2 p1 c3 w2 c1 p2 c2 check
+permutation w3 r1 p3 r2 p1 c3 c1 w2 p2 c2 check
+permutation w3 r1 p3 r2 c3 w2 p1 p2 c1 c2 check
+permutation w3 r1 p3 r2 c3 w2 p1 p2 c2 c1 check
+permutation w3 r1 p3 r2 c3 w2 p1 c1 p2 c2 check
+permutation w3 r1 p3 r2 c3 w2 p2 p1 c1 c2 check
+permutation w3 r1 p3 r2 c3 w2 p2 p1 c2 c1 check
+permutation w3 r1 p3 r2 c3 w2 p2 c2 p1 c1 check
+permutation w3 r1 p3 r2 c3 p1 w2 p2 c1 c2 check
+permutation w3 r1 p3 r2 c3 p1 w2 p2 c2 c1 check
+permutation w3 r1 p3 r2 c3 p1 w2 c1 p2 c2 check
+permutation w3 r1 p3 r2 c3 p1 c1 w2 p2 c2 check
+permutation w3 r1 p3 w2 r2 p1 p2 c3 c1 c2 check
+permutation w3 r1 p3 w2 r2 p1 p2 c3 c2 c1 check
+permutation w3 r1 p3 w2 r2 p1 c3 p2 c1 c2 check
+permutation w3 r1 p3 w2 r2 p1 c3 p2 c2 c1 check
+permutation w3 r1 p3 w2 r2 p1 c3 c1 p2 c2 check
+permutation w3 r1 p3 w2 r2 p2 p1 c3 c1 c2 check
+permutation w3 r1 p3 w2 r2 p2 p1 c3 c2 c1 check
+permutation w3 r1 p3 w2 r2 p2 c3 p1 c1 c2 check
+permutation w3 r1 p3 w2 r2 p2 c3 p1 c2 c1 check
+permutation w3 r1 p3 w2 r2 p2 c3 c2 p1 c1 check
+permutation w3 r1 p3 w2 r2 c3 p1 p2 c1 c2 check
+permutation w3 r1 p3 w2 r2 c3 p1 p2 c2 c1 check
+permutation w3 r1 p3 w2 r2 c3 p1 c1 p2 c2 check
+permutation w3 r1 p3 w2 r2 c3 p2 p1 c1 c2 check
+permutation w3 r1 p3 w2 r2 c3 p2 p1 c2 c1 check
+permutation w3 r1 p3 w2 r2 c3 p2 c2 p1 c1 check
+permutation w3 r1 p3 w2 p1 r2 p2 c3 c1 c2 check
+permutation w3 r1 p3 w2 p1 r2 p2 c3 c2 c1 check
+permutation w3 r1 p3 w2 p1 r2 c3 p2 c1 c2 check
+permutation w3 r1 p3 w2 p1 r2 c3 p2 c2 c1 check
+permutation w3 r1 p3 w2 p1 r2 c3 c1 p2 c2 check
+permutation w3 r1 p3 w2 p1 c3 r2 p2 c1 c2 check
+permutation w3 r1 p3 w2 p1 c3 r2 p2 c2 c1 check
+permutation w3 r1 p3 w2 p1 c3 r2 c1 p2 c2 check
+permutation w3 r1 p3 w2 p1 c3 c1 r2 p2 c2 check
+permutation w3 r1 p3 w2 c3 r2 p1 p2 c1 c2 check
+permutation w3 r1 p3 w2 c3 r2 p1 p2 c2 c1 check
+permutation w3 r1 p3 w2 c3 r2 p1 c1 p2 c2 check
+permutation w3 r1 p3 w2 c3 r2 p2 p1 c1 c2 check
+permutation w3 r1 p3 w2 c3 r2 p2 p1 c2 c1 check
+permutation w3 r1 p3 w2 c3 r2 p2 c2 p1 c1 check
+permutation w3 r1 p3 w2 c3 p1 r2 p2 c1 c2 check
+permutation w3 r1 p3 w2 c3 p1 r2 p2 c2 c1 check
+permutation w3 r1 p3 w2 c3 p1 r2 c1 p2 c2 check
+permutation w3 r1 p3 w2 c3 p1 c1 r2 p2 c2 check
+permutation w3 r1 p3 p1 r2 w2 p2 c3 c1 c2 check
+permutation w3 r1 p3 p1 r2 w2 p2 c3 c2 c1 check
+permutation w3 r1 p3 p1 r2 w2 c3 p2 c1 c2 check
+permutation w3 r1 p3 p1 r2 w2 c3 p2 c2 c1 check
+permutation w3 r1 p3 p1 r2 w2 c3 c1 p2 c2 check
+permutation w3 r1 p3 p1 r2 c3 w2 p2 c1 c2 check
+permutation w3 r1 p3 p1 r2 c3 w2 p2 c2 c1 check
+permutation w3 r1 p3 p1 r2 c3 w2 c1 p2 c2 check
+permutation w3 r1 p3 p1 r2 c3 c1 w2 p2 c2 check
+permutation w3 r1 p3 p1 w2 r2 p2 c3 c1 c2 check
+permutation w3 r1 p3 p1 w2 r2 p2 c3 c2 c1 check
+permutation w3 r1 p3 p1 w2 r2 c3 p2 c1 c2 check
+permutation w3 r1 p3 p1 w2 r2 c3 p2 c2 c1 check
+permutation w3 r1 p3 p1 w2 r2 c3 c1 p2 c2 check
+permutation w3 r1 p3 p1 w2 c3 r2 p2 c1 c2 check
+permutation w3 r1 p3 p1 w2 c3 r2 p2 c2 c1 check
+permutation w3 r1 p3 p1 w2 c3 r2 c1 p2 c2 check
+permutation w3 r1 p3 p1 w2 c3 c1 r2 p2 c2 check
+permutation w3 r1 p3 p1 c3 r2 w2 p2 c1 c2 check
+permutation w3 r1 p3 p1 c3 r2 w2 p2 c2 c1 check
+permutation w3 r1 p3 p1 c3 r2 w2 c1 p2 c2 check
+permutation w3 r1 p3 p1 c3 r2 c1 w2 p2 c2 check
+permutation w3 r1 p3 p1 c3 w2 r2 p2 c1 c2 check
+permutation w3 r1 p3 p1 c3 w2 r2 p2 c2 c1 check
+permutation w3 r1 p3 p1 c3 w2 r2 c1 p2 c2 check
+permutation w3 r1 p3 p1 c3 w2 c1 r2 p2 c2 check
+permutation w3 r1 p3 p1 c3 c1 r2 w2 p2 c2 check
+permutation w3 r1 p3 p1 c3 c1 w2 r2 p2 c2 check
+permutation w3 r1 p3 c3 r2 w2 p1 p2 c1 c2 check
+permutation w3 r1 p3 c3 r2 w2 p1 p2 c2 c1 check
+permutation w3 r1 p3 c3 r2 w2 p1 c1 p2 c2 check
+permutation w3 r1 p3 c3 r2 w2 p2 p1 c1 c2 check
+permutation w3 r1 p3 c3 r2 w2 p2 p1 c2 c1 check
+permutation w3 r1 p3 c3 r2 w2 p2 c2 p1 c1 check
+permutation w3 r1 p3 c3 r2 p1 w2 p2 c1 c2 check
+permutation w3 r1 p3 c3 r2 p1 w2 p2 c2 c1 check
+permutation w3 r1 p3 c3 r2 p1 w2 c1 p2 c2 check
+permutation w3 r1 p3 c3 r2 p1 c1 w2 p2 c2 check
+permutation w3 r1 p3 c3 w2 r2 p1 p2 c1 c2 check
+permutation w3 r1 p3 c3 w2 r2 p1 p2 c2 c1 check
+permutation w3 r1 p3 c3 w2 r2 p1 c1 p2 c2 check
+permutation w3 r1 p3 c3 w2 r2 p2 p1 c1 c2 check
+permutation w3 r1 p3 c3 w2 r2 p2 p1 c2 c1 check
+permutation w3 r1 p3 c3 w2 r2 p2 c2 p1 c1 check
+permutation w3 r1 p3 c3 w2 p1 r2 p2 c1 c2 check
+permutation w3 r1 p3 c3 w2 p1 r2 p2 c2 c1 check
+permutation w3 r1 p3 c3 w2 p1 r2 c1 p2 c2 check
+permutation w3 r1 p3 c3 w2 p1 c1 r2 p2 c2 check
+permutation w3 r1 p3 c3 p1 r2 w2 p2 c1 c2 check
+permutation w3 r1 p3 c3 p1 r2 w2 p2 c2 c1 check
+permutation w3 r1 p3 c3 p1 r2 w2 c1 p2 c2 check
+permutation w3 r1 p3 c3 p1 r2 c1 w2 p2 c2 check
+permutation w3 r1 p3 c3 p1 w2 r2 p2 c1 c2 check
+permutation w3 r1 p3 c3 p1 w2 r2 p2 c2 c1 check
+permutation w3 r1 p3 c3 p1 w2 r2 c1 p2 c2 check
+permutation w3 r1 p3 c3 p1 w2 c1 r2 p2 c2 check
+permutation w3 r1 p3 c3 p1 c1 r2 w2 p2 c2 check
+permutation w3 r1 p3 c3 p1 c1 w2 r2 p2 c2 check
+permutation w3 r2 r1 w2 p1 p2 p3 c3 c1 c2 check
+permutation w3 r2 r1 w2 p1 p2 p3 c3 c2 c1 check
+permutation w3 r2 r1 w2 p1 p3 p2 c3 c1 c2 check
+permutation w3 r2 r1 w2 p1 p3 p2 c3 c2 c1 check
+permutation w3 r2 r1 w2 p1 p3 c3 p2 c1 c2 check
+permutation w3 r2 r1 w2 p1 p3 c3 p2 c2 c1 check
+permutation w3 r2 r1 w2 p1 p3 c3 c1 p2 c2 check
+permutation w3 r2 r1 w2 p2 p1 p3 c3 c1 c2 check
+permutation w3 r2 r1 w2 p2 p1 p3 c3 c2 c1 check
+permutation w3 r2 r1 w2 p2 p3 p1 c3 c1 c2 check
+permutation w3 r2 r1 w2 p2 p3 p1 c3 c2 c1 check
+permutation w3 r2 r1 w2 p2 p3 c3 p1 c1 c2 check
+permutation w3 r2 r1 w2 p2 p3 c3 p1 c2 c1 check
+permutation w3 r2 r1 w2 p2 p3 c3 c2 p1 c1 check
+permutation w3 r2 r1 w2 p3 p1 p2 c3 c1 c2 check
+permutation w3 r2 r1 w2 p3 p1 p2 c3 c2 c1 check
+permutation w3 r2 r1 w2 p3 p1 c3 p2 c1 c2 check
+permutation w3 r2 r1 w2 p3 p1 c3 p2 c2 c1 check
+permutation w3 r2 r1 w2 p3 p1 c3 c1 p2 c2 check
+permutation w3 r2 r1 w2 p3 p2 p1 c3 c1 c2 check
+permutation w3 r2 r1 w2 p3 p2 p1 c3 c2 c1 check
+permutation w3 r2 r1 w2 p3 p2 c3 p1 c1 c2 check
+permutation w3 r2 r1 w2 p3 p2 c3 p1 c2 c1 check
+permutation w3 r2 r1 w2 p3 p2 c3 c2 p1 c1 check
+permutation w3 r2 r1 w2 p3 c3 p1 p2 c1 c2 check
+permutation w3 r2 r1 w2 p3 c3 p1 p2 c2 c1 check
+permutation w3 r2 r1 w2 p3 c3 p1 c1 p2 c2 check
+permutation w3 r2 r1 w2 p3 c3 p2 p1 c1 c2 check
+permutation w3 r2 r1 w2 p3 c3 p2 p1 c2 c1 check
+permutation w3 r2 r1 w2 p3 c3 p2 c2 p1 c1 check
+permutation w3 r2 r1 p1 w2 p2 p3 c3 c1 c2 check
+permutation w3 r2 r1 p1 w2 p2 p3 c3 c2 c1 check
+permutation w3 r2 r1 p1 w2 p3 p2 c3 c1 c2 check
+permutation w3 r2 r1 p1 w2 p3 p2 c3 c2 c1 check
+permutation w3 r2 r1 p1 w2 p3 c3 p2 c1 c2 check
+permutation w3 r2 r1 p1 w2 p3 c3 p2 c2 c1 check
+permutation w3 r2 r1 p1 w2 p3 c3 c1 p2 c2 check
+permutation w3 r2 r1 p1 p3 w2 p2 c3 c1 c2 check
+permutation w3 r2 r1 p1 p3 w2 p2 c3 c2 c1 check
+permutation w3 r2 r1 p1 p3 w2 c3 p2 c1 c2 check
+permutation w3 r2 r1 p1 p3 w2 c3 p2 c2 c1 check
+permutation w3 r2 r1 p1 p3 w2 c3 c1 p2 c2 check
+permutation w3 r2 r1 p1 p3 c3 w2 p2 c1 c2 check
+permutation w3 r2 r1 p1 p3 c3 w2 p2 c2 c1 check
+permutation w3 r2 r1 p1 p3 c3 w2 c1 p2 c2 check
+permutation w3 r2 r1 p1 p3 c3 c1 w2 p2 c2 check
+permutation w3 r2 r1 p3 w2 p1 p2 c3 c1 c2 check
+permutation w3 r2 r1 p3 w2 p1 p2 c3 c2 c1 check
+permutation w3 r2 r1 p3 w2 p1 c3 p2 c1 c2 check
+permutation w3 r2 r1 p3 w2 p1 c3 p2 c2 c1 check
+permutation w3 r2 r1 p3 w2 p1 c3 c1 p2 c2 check
+permutation w3 r2 r1 p3 w2 p2 p1 c3 c1 c2 check
+permutation w3 r2 r1 p3 w2 p2 p1 c3 c2 c1 check
+permutation w3 r2 r1 p3 w2 p2 c3 p1 c1 c2 check
+permutation w3 r2 r1 p3 w2 p2 c3 p1 c2 c1 check
+permutation w3 r2 r1 p3 w2 p2 c3 c2 p1 c1 check
+permutation w3 r2 r1 p3 w2 c3 p1 p2 c1 c2 check
+permutation w3 r2 r1 p3 w2 c3 p1 p2 c2 c1 check
+permutation w3 r2 r1 p3 w2 c3 p1 c1 p2 c2 check
+permutation w3 r2 r1 p3 w2 c3 p2 p1 c1 c2 check
+permutation w3 r2 r1 p3 w2 c3 p2 p1 c2 c1 check
+permutation w3 r2 r1 p3 w2 c3 p2 c2 p1 c1 check
+permutation w3 r2 r1 p3 p1 w2 p2 c3 c1 c2 check
+permutation w3 r2 r1 p3 p1 w2 p2 c3 c2 c1 check
+permutation w3 r2 r1 p3 p1 w2 c3 p2 c1 c2 check
+permutation w3 r2 r1 p3 p1 w2 c3 p2 c2 c1 check
+permutation w3 r2 r1 p3 p1 w2 c3 c1 p2 c2 check
+permutation w3 r2 r1 p3 p1 c3 w2 p2 c1 c2 check
+permutation w3 r2 r1 p3 p1 c3 w2 p2 c2 c1 check
+permutation w3 r2 r1 p3 p1 c3 w2 c1 p2 c2 check
+permutation w3 r2 r1 p3 p1 c3 c1 w2 p2 c2 check
+permutation w3 r2 r1 p3 c3 w2 p1 p2 c1 c2 check
+permutation w3 r2 r1 p3 c3 w2 p1 p2 c2 c1 check
+permutation w3 r2 r1 p3 c3 w2 p1 c1 p2 c2 check
+permutation w3 r2 r1 p3 c3 w2 p2 p1 c1 c2 check
+permutation w3 r2 r1 p3 c3 w2 p2 p1 c2 c1 check
+permutation w3 r2 r1 p3 c3 w2 p2 c2 p1 c1 check
+permutation w3 r2 r1 p3 c3 p1 w2 p2 c1 c2 check
+permutation w3 r2 r1 p3 c3 p1 w2 p2 c2 c1 check
+permutation w3 r2 r1 p3 c3 p1 w2 c1 p2 c2 check
+permutation w3 r2 r1 p3 c3 p1 c1 w2 p2 c2 check
+permutation w3 r2 p3 r1 w2 p1 p2 c3 c1 c2 check
+permutation w3 r2 p3 r1 w2 p1 p2 c3 c2 c1 check
+permutation w3 r2 p3 r1 w2 p1 c3 p2 c1 c2 check
+permutation w3 r2 p3 r1 w2 p1 c3 p2 c2 c1 check
+permutation w3 r2 p3 r1 w2 p1 c3 c1 p2 c2 check
+permutation w3 r2 p3 r1 w2 p2 p1 c3 c1 c2 check
+permutation w3 r2 p3 r1 w2 p2 p1 c3 c2 c1 check
+permutation w3 r2 p3 r1 w2 p2 c3 p1 c1 c2 check
+permutation w3 r2 p3 r1 w2 p2 c3 p1 c2 c1 check
+permutation w3 r2 p3 r1 w2 p2 c3 c2 p1 c1 check
+permutation w3 r2 p3 r1 w2 c3 p1 p2 c1 c2 check
+permutation w3 r2 p3 r1 w2 c3 p1 p2 c2 c1 check
+permutation w3 r2 p3 r1 w2 c3 p1 c1 p2 c2 check
+permutation w3 r2 p3 r1 w2 c3 p2 p1 c1 c2 check
+permutation w3 r2 p3 r1 w2 c3 p2 p1 c2 c1 check
+permutation w3 r2 p3 r1 w2 c3 p2 c2 p1 c1 check
+permutation w3 r2 p3 r1 p1 w2 p2 c3 c1 c2 check
+permutation w3 r2 p3 r1 p1 w2 p2 c3 c2 c1 check
+permutation w3 r2 p3 r1 p1 w2 c3 p2 c1 c2 check
+permutation w3 r2 p3 r1 p1 w2 c3 p2 c2 c1 check
+permutation w3 r2 p3 r1 p1 w2 c3 c1 p2 c2 check
+permutation w3 r2 p3 r1 p1 c3 w2 p2 c1 c2 check
+permutation w3 r2 p3 r1 p1 c3 w2 p2 c2 c1 check
+permutation w3 r2 p3 r1 p1 c3 w2 c1 p2 c2 check
+permutation w3 r2 p3 r1 p1 c3 c1 w2 p2 c2 check
+permutation w3 r2 p3 r1 c3 w2 p1 p2 c1 c2 check
+permutation w3 r2 p3 r1 c3 w2 p1 p2 c2 c1 check
+permutation w3 r2 p3 r1 c3 w2 p1 c1 p2 c2 check
+permutation w3 r2 p3 r1 c3 w2 p2 p1 c1 c2 check
+permutation w3 r2 p3 r1 c3 w2 p2 p1 c2 c1 check
+permutation w3 r2 p3 r1 c3 w2 p2 c2 p1 c1 check
+permutation w3 r2 p3 r1 c3 p1 w2 p2 c1 c2 check
+permutation w3 r2 p3 r1 c3 p1 w2 p2 c2 c1 check
+permutation w3 r2 p3 r1 c3 p1 w2 c1 p2 c2 check
+permutation w3 r2 p3 r1 c3 p1 c1 w2 p2 c2 check
+permutation w3 r2 p3 c3 r1 w2 p1 p2 c1 c2 check
+permutation w3 r2 p3 c3 r1 w2 p1 p2 c2 c1 check
+permutation w3 r2 p3 c3 r1 w2 p1 c1 p2 c2 check
+permutation w3 r2 p3 c3 r1 w2 p2 p1 c1 c2 check
+permutation w3 r2 p3 c3 r1 w2 p2 p1 c2 c1 check
+permutation w3 r2 p3 c3 r1 w2 p2 c2 p1 c1 check
+permutation w3 r2 p3 c3 r1 p1 w2 p2 c1 c2 check
+permutation w3 r2 p3 c3 r1 p1 w2 p2 c2 c1 check
+permutation w3 r2 p3 c3 r1 p1 w2 c1 p2 c2 check
+permutation w3 r2 p3 c3 r1 p1 c1 w2 p2 c2 check
+permutation w3 p3 r1 r2 w2 p1 p2 c3 c1 c2 check
+permutation w3 p3 r1 r2 w2 p1 p2 c3 c2 c1 check
+permutation w3 p3 r1 r2 w2 p1 c3 p2 c1 c2 check
+permutation w3 p3 r1 r2 w2 p1 c3 p2 c2 c1 check
+permutation w3 p3 r1 r2 w2 p1 c3 c1 p2 c2 check
+permutation w3 p3 r1 r2 w2 p2 p1 c3 c1 c2 check
+permutation w3 p3 r1 r2 w2 p2 p1 c3 c2 c1 check
+permutation w3 p3 r1 r2 w2 p2 c3 p1 c1 c2 check
+permutation w3 p3 r1 r2 w2 p2 c3 p1 c2 c1 check
+permutation w3 p3 r1 r2 w2 p2 c3 c2 p1 c1 check
+permutation w3 p3 r1 r2 w2 c3 p1 p2 c1 c2 check
+permutation w3 p3 r1 r2 w2 c3 p1 p2 c2 c1 check
+permutation w3 p3 r1 r2 w2 c3 p1 c1 p2 c2 check
+permutation w3 p3 r1 r2 w2 c3 p2 p1 c1 c2 check
+permutation w3 p3 r1 r2 w2 c3 p2 p1 c2 c1 check
+permutation w3 p3 r1 r2 w2 c3 p2 c2 p1 c1 check
+permutation w3 p3 r1 r2 p1 w2 p2 c3 c1 c2 check
+permutation w3 p3 r1 r2 p1 w2 p2 c3 c2 c1 check
+permutation w3 p3 r1 r2 p1 w2 c3 p2 c1 c2 check
+permutation w3 p3 r1 r2 p1 w2 c3 p2 c2 c1 check
+permutation w3 p3 r1 r2 p1 w2 c3 c1 p2 c2 check
+permutation w3 p3 r1 r2 p1 c3 w2 p2 c1 c2 check
+permutation w3 p3 r1 r2 p1 c3 w2 p2 c2 c1 check
+permutation w3 p3 r1 r2 p1 c3 w2 c1 p2 c2 check
+permutation w3 p3 r1 r2 p1 c3 c1 w2 p2 c2 check
+permutation w3 p3 r1 r2 c3 w2 p1 p2 c1 c2 check
+permutation w3 p3 r1 r2 c3 w2 p1 p2 c2 c1 check
+permutation w3 p3 r1 r2 c3 w2 p1 c1 p2 c2 check
+permutation w3 p3 r1 r2 c3 w2 p2 p1 c1 c2 check
+permutation w3 p3 r1 r2 c3 w2 p2 p1 c2 c1 check
+permutation w3 p3 r1 r2 c3 w2 p2 c2 p1 c1 check
+permutation w3 p3 r1 r2 c3 p1 w2 p2 c1 c2 check
+permutation w3 p3 r1 r2 c3 p1 w2 p2 c2 c1 check
+permutation w3 p3 r1 r2 c3 p1 w2 c1 p2 c2 check
+permutation w3 p3 r1 r2 c3 p1 c1 w2 p2 c2 check
+permutation w3 p3 r1 w2 r2 p1 p2 c3 c1 c2 check
+permutation w3 p3 r1 w2 r2 p1 p2 c3 c2 c1 check
+permutation w3 p3 r1 w2 r2 p1 c3 p2 c1 c2 check
+permutation w3 p3 r1 w2 r2 p1 c3 p2 c2 c1 check
+permutation w3 p3 r1 w2 r2 p1 c3 c1 p2 c2 check
+permutation w3 p3 r1 w2 r2 p2 p1 c3 c1 c2 check
+permutation w3 p3 r1 w2 r2 p2 p1 c3 c2 c1 check
+permutation w3 p3 r1 w2 r2 p2 c3 p1 c1 c2 check
+permutation w3 p3 r1 w2 r2 p2 c3 p1 c2 c1 check
+permutation w3 p3 r1 w2 r2 p2 c3 c2 p1 c1 check
+permutation w3 p3 r1 w2 r2 c3 p1 p2 c1 c2 check
+permutation w3 p3 r1 w2 r2 c3 p1 p2 c2 c1 check
+permutation w3 p3 r1 w2 r2 c3 p1 c1 p2 c2 check
+permutation w3 p3 r1 w2 r2 c3 p2 p1 c1 c2 check
+permutation w3 p3 r1 w2 r2 c3 p2 p1 c2 c1 check
+permutation w3 p3 r1 w2 r2 c3 p2 c2 p1 c1 check
+permutation w3 p3 r1 w2 p1 r2 p2 c3 c1 c2 check
+permutation w3 p3 r1 w2 p1 r2 p2 c3 c2 c1 check
+permutation w3 p3 r1 w2 p1 r2 c3 p2 c1 c2 check
+permutation w3 p3 r1 w2 p1 r2 c3 p2 c2 c1 check
+permutation w3 p3 r1 w2 p1 r2 c3 c1 p2 c2 check
+permutation w3 p3 r1 w2 p1 c3 r2 p2 c1 c2 check
+permutation w3 p3 r1 w2 p1 c3 r2 p2 c2 c1 check
+permutation w3 p3 r1 w2 p1 c3 r2 c1 p2 c2 check
+permutation w3 p3 r1 w2 p1 c3 c1 r2 p2 c2 check
+permutation w3 p3 r1 w2 c3 r2 p1 p2 c1 c2 check
+permutation w3 p3 r1 w2 c3 r2 p1 p2 c2 c1 check
+permutation w3 p3 r1 w2 c3 r2 p1 c1 p2 c2 check
+permutation w3 p3 r1 w2 c3 r2 p2 p1 c1 c2 check
+permutation w3 p3 r1 w2 c3 r2 p2 p1 c2 c1 check
+permutation w3 p3 r1 w2 c3 r2 p2 c2 p1 c1 check
+permutation w3 p3 r1 w2 c3 p1 r2 p2 c1 c2 check
+permutation w3 p3 r1 w2 c3 p1 r2 p2 c2 c1 check
+permutation w3 p3 r1 w2 c3 p1 r2 c1 p2 c2 check
+permutation w3 p3 r1 w2 c3 p1 c1 r2 p2 c2 check
+permutation w3 p3 r1 p1 r2 w2 p2 c3 c1 c2 check
+permutation w3 p3 r1 p1 r2 w2 p2 c3 c2 c1 check
+permutation w3 p3 r1 p1 r2 w2 c3 p2 c1 c2 check
+permutation w3 p3 r1 p1 r2 w2 c3 p2 c2 c1 check
+permutation w3 p3 r1 p1 r2 w2 c3 c1 p2 c2 check
+permutation w3 p3 r1 p1 r2 c3 w2 p2 c1 c2 check
+permutation w3 p3 r1 p1 r2 c3 w2 p2 c2 c1 check
+permutation w3 p3 r1 p1 r2 c3 w2 c1 p2 c2 check
+permutation w3 p3 r1 p1 r2 c3 c1 w2 p2 c2 check
+permutation w3 p3 r1 p1 w2 r2 p2 c3 c1 c2 check
+permutation w3 p3 r1 p1 w2 r2 p2 c3 c2 c1 check
+permutation w3 p3 r1 p1 w2 r2 c3 p2 c1 c2 check
+permutation w3 p3 r1 p1 w2 r2 c3 p2 c2 c1 check
+permutation w3 p3 r1 p1 w2 r2 c3 c1 p2 c2 check
+permutation w3 p3 r1 p1 w2 c3 r2 p2 c1 c2 check
+permutation w3 p3 r1 p1 w2 c3 r2 p2 c2 c1 check
+permutation w3 p3 r1 p1 w2 c3 r2 c1 p2 c2 check
+permutation w3 p3 r1 p1 w2 c3 c1 r2 p2 c2 check
+permutation w3 p3 r1 p1 c3 r2 w2 p2 c1 c2 check
+permutation w3 p3 r1 p1 c3 r2 w2 p2 c2 c1 check
+permutation w3 p3 r1 p1 c3 r2 w2 c1 p2 c2 check
+permutation w3 p3 r1 p1 c3 r2 c1 w2 p2 c2 check
+permutation w3 p3 r1 p1 c3 w2 r2 p2 c1 c2 check
+permutation w3 p3 r1 p1 c3 w2 r2 p2 c2 c1 check
+permutation w3 p3 r1 p1 c3 w2 r2 c1 p2 c2 check
+permutation w3 p3 r1 p1 c3 w2 c1 r2 p2 c2 check
+permutation w3 p3 r1 p1 c3 c1 r2 w2 p2 c2 check
+permutation w3 p3 r1 p1 c3 c1 w2 r2 p2 c2 check
+permutation w3 p3 r1 c3 r2 w2 p1 p2 c1 c2 check
+permutation w3 p3 r1 c3 r2 w2 p1 p2 c2 c1 check
+permutation w3 p3 r1 c3 r2 w2 p1 c1 p2 c2 check
+permutation w3 p3 r1 c3 r2 w2 p2 p1 c1 c2 check
+permutation w3 p3 r1 c3 r2 w2 p2 p1 c2 c1 check
+permutation w3 p3 r1 c3 r2 w2 p2 c2 p1 c1 check
+permutation w3 p3 r1 c3 r2 p1 w2 p2 c1 c2 check
+permutation w3 p3 r1 c3 r2 p1 w2 p2 c2 c1 check
+permutation w3 p3 r1 c3 r2 p1 w2 c1 p2 c2 check
+permutation w3 p3 r1 c3 r2 p1 c1 w2 p2 c2 check
+permutation w3 p3 r1 c3 w2 r2 p1 p2 c1 c2 check
+permutation w3 p3 r1 c3 w2 r2 p1 p2 c2 c1 check
+permutation w3 p3 r1 c3 w2 r2 p1 c1 p2 c2 check
+permutation w3 p3 r1 c3 w2 r2 p2 p1 c1 c2 check
+permutation w3 p3 r1 c3 w2 r2 p2 p1 c2 c1 check
+permutation w3 p3 r1 c3 w2 r2 p2 c2 p1 c1 check
+permutation w3 p3 r1 c3 w2 p1 r2 p2 c1 c2 check
+permutation w3 p3 r1 c3 w2 p1 r2 p2 c2 c1 check
+permutation w3 p3 r1 c3 w2 p1 r2 c1 p2 c2 check
+permutation w3 p3 r1 c3 w2 p1 c1 r2 p2 c2 check
+permutation w3 p3 r1 c3 p1 r2 w2 p2 c1 c2 check
+permutation w3 p3 r1 c3 p1 r2 w2 p2 c2 c1 check
+permutation w3 p3 r1 c3 p1 r2 w2 c1 p2 c2 check
+permutation w3 p3 r1 c3 p1 r2 c1 w2 p2 c2 check
+permutation w3 p3 r1 c3 p1 w2 r2 p2 c1 c2 check
+permutation w3 p3 r1 c3 p1 w2 r2 p2 c2 c1 check
+permutation w3 p3 r1 c3 p1 w2 r2 c1 p2 c2 check
+permutation w3 p3 r1 c3 p1 w2 c1 r2 p2 c2 check
+permutation w3 p3 r1 c3 p1 c1 r2 w2 p2 c2 check
+permutation w3 p3 r1 c3 p1 c1 w2 r2 p2 c2 check
+permutation w3 p3 r2 r1 w2 p1 p2 c3 c1 c2 check
+permutation w3 p3 r2 r1 w2 p1 p2 c3 c2 c1 check
+permutation w3 p3 r2 r1 w2 p1 c3 p2 c1 c2 check
+permutation w3 p3 r2 r1 w2 p1 c3 p2 c2 c1 check
+permutation w3 p3 r2 r1 w2 p1 c3 c1 p2 c2 check
+permutation w3 p3 r2 r1 w2 p2 p1 c3 c1 c2 check
+permutation w3 p3 r2 r1 w2 p2 p1 c3 c2 c1 check
+permutation w3 p3 r2 r1 w2 p2 c3 p1 c1 c2 check
+permutation w3 p3 r2 r1 w2 p2 c3 p1 c2 c1 check
+permutation w3 p3 r2 r1 w2 p2 c3 c2 p1 c1 check
+permutation w3 p3 r2 r1 w2 c3 p1 p2 c1 c2 check
+permutation w3 p3 r2 r1 w2 c3 p1 p2 c2 c1 check
+permutation w3 p3 r2 r1 w2 c3 p1 c1 p2 c2 check
+permutation w3 p3 r2 r1 w2 c3 p2 p1 c1 c2 check
+permutation w3 p3 r2 r1 w2 c3 p2 p1 c2 c1 check
+permutation w3 p3 r2 r1 w2 c3 p2 c2 p1 c1 check
+permutation w3 p3 r2 r1 p1 w2 p2 c3 c1 c2 check
+permutation w3 p3 r2 r1 p1 w2 p2 c3 c2 c1 check
+permutation w3 p3 r2 r1 p1 w2 c3 p2 c1 c2 check
+permutation w3 p3 r2 r1 p1 w2 c3 p2 c2 c1 check
+permutation w3 p3 r2 r1 p1 w2 c3 c1 p2 c2 check
+permutation w3 p3 r2 r1 p1 c3 w2 p2 c1 c2 check
+permutation w3 p3 r2 r1 p1 c3 w2 p2 c2 c1 check
+permutation w3 p3 r2 r1 p1 c3 w2 c1 p2 c2 check
+permutation w3 p3 r2 r1 p1 c3 c1 w2 p2 c2 check
+permutation w3 p3 r2 r1 c3 w2 p1 p2 c1 c2 check
+permutation w3 p3 r2 r1 c3 w2 p1 p2 c2 c1 check
+permutation w3 p3 r2 r1 c3 w2 p1 c1 p2 c2 check
+permutation w3 p3 r2 r1 c3 w2 p2 p1 c1 c2 check
+permutation w3 p3 r2 r1 c3 w2 p2 p1 c2 c1 check
+permutation w3 p3 r2 r1 c3 w2 p2 c2 p1 c1 check
+permutation w3 p3 r2 r1 c3 p1 w2 p2 c1 c2 check
+permutation w3 p3 r2 r1 c3 p1 w2 p2 c2 c1 check
+permutation w3 p3 r2 r1 c3 p1 w2 c1 p2 c2 check
+permutation w3 p3 r2 r1 c3 p1 c1 w2 p2 c2 check
+permutation w3 p3 r2 c3 r1 w2 p1 p2 c1 c2 check
+permutation w3 p3 r2 c3 r1 w2 p1 p2 c2 c1 check
+permutation w3 p3 r2 c3 r1 w2 p1 c1 p2 c2 check
+permutation w3 p3 r2 c3 r1 w2 p2 p1 c1 c2 check
+permutation w3 p3 r2 c3 r1 w2 p2 p1 c2 c1 check
+permutation w3 p3 r2 c3 r1 w2 p2 c2 p1 c1 check
+permutation w3 p3 r2 c3 r1 p1 w2 p2 c1 c2 check
+permutation w3 p3 r2 c3 r1 p1 w2 p2 c2 c1 check
+permutation w3 p3 r2 c3 r1 p1 w2 c1 p2 c2 check
+permutation w3 p3 r2 c3 r1 p1 c1 w2 p2 c2 check
+permutation w3 p3 c3 r1 r2 w2 p1 p2 c1 c2 check
+permutation w3 p3 c3 r1 r2 w2 p1 p2 c2 c1 check
+permutation w3 p3 c3 r1 r2 w2 p1 c1 p2 c2 check
+permutation w3 p3 c3 r1 r2 w2 p2 p1 c1 c2 check
+permutation w3 p3 c3 r1 r2 w2 p2 p1 c2 c1 check
+permutation w3 p3 c3 r1 r2 w2 p2 c2 p1 c1 check
+permutation w3 p3 c3 r1 r2 p1 w2 p2 c1 c2 check
+permutation w3 p3 c3 r1 r2 p1 w2 p2 c2 c1 check
+permutation w3 p3 c3 r1 r2 p1 w2 c1 p2 c2 check
+permutation w3 p3 c3 r1 r2 p1 c1 w2 p2 c2 check
+permutation w3 p3 c3 r1 w2 r2 p1 p2 c1 c2 check
+permutation w3 p3 c3 r1 w2 r2 p1 p2 c2 c1 check
+permutation w3 p3 c3 r1 w2 r2 p1 c1 p2 c2 check
+permutation w3 p3 c3 r1 w2 r2 p2 p1 c1 c2 check
+permutation w3 p3 c3 r1 w2 r2 p2 p1 c2 c1 check
+permutation w3 p3 c3 r1 w2 r2 p2 c2 p1 c1 check
+permutation w3 p3 c3 r1 w2 p1 r2 p2 c1 c2 check
+permutation w3 p3 c3 r1 w2 p1 r2 p2 c2 c1 check
+permutation w3 p3 c3 r1 w2 p1 r2 c1 p2 c2 check
+permutation w3 p3 c3 r1 w2 p1 c1 r2 p2 c2 check
+permutation w3 p3 c3 r1 p1 r2 w2 p2 c1 c2 check
+permutation w3 p3 c3 r1 p1 r2 w2 p2 c2 c1 check
+permutation w3 p3 c3 r1 p1 r2 w2 c1 p2 c2 check
+permutation w3 p3 c3 r1 p1 r2 c1 w2 p2 c2 check
+permutation w3 p3 c3 r1 p1 w2 r2 p2 c1 c2 check
+permutation w3 p3 c3 r1 p1 w2 r2 p2 c2 c1 check
+permutation w3 p3 c3 r1 p1 w2 r2 c1 p2 c2 check
+permutation w3 p3 c3 r1 p1 w2 c1 r2 p2 c2 check
+permutation w3 p3 c3 r1 p1 c1 r2 w2 p2 c2 check
+permutation w3 p3 c3 r1 p1 c1 w2 r2 p2 c2 check
+permutation w3 p3 c3 r2 r1 w2 p1 p2 c1 c2 check
+permutation w3 p3 c3 r2 r1 w2 p1 p2 c2 c1 check
+permutation w3 p3 c3 r2 r1 w2 p1 c1 p2 c2 check
+permutation w3 p3 c3 r2 r1 w2 p2 p1 c1 c2 check
+permutation w3 p3 c3 r2 r1 w2 p2 p1 c2 c1 check
+permutation w3 p3 c3 r2 r1 w2 p2 c2 p1 c1 check
+permutation w3 p3 c3 r2 r1 p1 w2 p2 c1 c2 check
+permutation w3 p3 c3 r2 r1 p1 w2 p2 c2 c1 check
+permutation w3 p3 c3 r2 r1 p1 w2 c1 p2 c2 check
+permutation w3 p3 c3 r2 r1 p1 c1 w2 p2 c2 check
diff --git a/src/test/isolation/specs/project-manager.spec b/src/test/isolation/specs/project-manager.spec
new file mode 100644
index 0000000..42e5fc5
--- /dev/null
+++ b/src/test/isolation/specs/project-manager.spec
@@ -0,0 +1,30 @@
+# Project Manager test
+#
+# Ensure that the person who is on the project as a manager
+# is flagged as a project manager in the person table.
+#
+# Any overlap between the transactions must cause a serialization failure.
+
+setup
+{
+ CREATE TABLE person (person_id int NOT NULL PRIMARY KEY, name text NOT NULL, is_project_manager bool NOT NULL);
+ INSERT INTO person VALUES (1, 'Robert Haas', true);
+ CREATE TABLE project (project_no int NOT NULL PRIMARY KEY, description text NOT NULL, project_manager int NOT NULL);
+}
+
+teardown
+{
+ DROP TABLE person, project;
+}
+
+session s1
+setup { BEGIN ISOLATION LEVEL SERIALIZABLE; }
+step rx1 { SELECT count(*) FROM person WHERE person_id = 1 AND is_project_manager; }
+step wy1 { INSERT INTO project VALUES (101, 'Build Great Wall', 1); }
+step c1 { COMMIT; }
+
+session s2
+setup { BEGIN ISOLATION LEVEL SERIALIZABLE; }
+step ry2 { SELECT count(*) FROM project WHERE project_manager = 1; }
+step wx2 { UPDATE person SET is_project_manager = false WHERE person_id = 1; }
+step c2 { COMMIT; }
diff --git a/src/test/isolation/specs/propagate-lock-delete.spec b/src/test/isolation/specs/propagate-lock-delete.spec
new file mode 100644
index 0000000..641fb84
--- /dev/null
+++ b/src/test/isolation/specs/propagate-lock-delete.spec
@@ -0,0 +1,42 @@
+# When an update propagates a preexisting lock on the updated tuple, make sure
+# we don't ignore the lock in subsequent operations of the new version. (The
+# version with the aborted savepoint uses a slightly different code path).
+setup
+{
+ create table parent (i int, c char(3));
+ create unique index parent_idx on parent (i);
+ insert into parent values (1, 'AAA');
+ create table child (i int references parent(i));
+}
+
+teardown
+{
+ drop table child, parent;
+}
+
+session s1
+step s1b { BEGIN; }
+step s1l { INSERT INTO child VALUES (1); }
+step s1c { COMMIT; }
+
+session s2
+step s2b { BEGIN; }
+step s2l { INSERT INTO child VALUES (1); }
+step s2c { COMMIT; }
+
+session s3
+step s3b { BEGIN; }
+step s3u { UPDATE parent SET c=lower(c); } # no key update
+step s3u2 { UPDATE parent SET i = i; } # key update
+step s3svu { SAVEPOINT f; UPDATE parent SET c = 'bbb'; ROLLBACK TO f; }
+step s3d { DELETE FROM parent; }
+step s3c { COMMIT; }
+
+permutation s1b s1l s2b s2l s3b s3u s3d s1c s2c s3c
+permutation s1b s1l s2b s2l s3b s3u s3svu s3d s1c s2c s3c
+permutation s1b s1l s2b s2l s3b s3u2 s3d s1c s2c s3c
+permutation s1b s1l s2b s2l s3b s3u2 s3svu s3d s1c s2c s3c
+permutation s1b s1l s3b s3u s3d s1c s3c
+permutation s1b s1l s3b s3u s3svu s3d s1c s3c
+permutation s1b s1l s3b s3u2 s3d s1c s3c
+permutation s1b s1l s3b s3u2 s3svu s3d s1c s3c
diff --git a/src/test/isolation/specs/read-only-anomaly-2.spec b/src/test/isolation/specs/read-only-anomaly-2.spec
new file mode 100644
index 0000000..6b579a6
--- /dev/null
+++ b/src/test/isolation/specs/read-only-anomaly-2.spec
@@ -0,0 +1,42 @@
+# The example from the paper "A read-only transaction anomaly under snapshot
+# isolation"[1].
+#
+# Here we test that serializable snapshot isolation (SERIALIZABLE) doesn't
+# suffer from the anomaly, because s2 is aborted upon detection of a cycle.
+#
+# [1] http://www.cs.umb.edu/~poneil/ROAnom.pdf
+
+setup
+{
+ CREATE TABLE bank_account (id TEXT PRIMARY KEY, balance DECIMAL NOT NULL);
+ INSERT INTO bank_account (id, balance) VALUES ('X', 0), ('Y', 0);
+}
+
+teardown
+{
+ DROP TABLE bank_account;
+}
+
+session s1
+setup { BEGIN TRANSACTION ISOLATION LEVEL SERIALIZABLE; }
+step s1ry { SELECT balance FROM bank_account WHERE id = 'Y'; }
+step s1wy { UPDATE bank_account SET balance = 20 WHERE id = 'Y'; }
+step s1c { COMMIT; }
+
+session s2
+setup { BEGIN TRANSACTION ISOLATION LEVEL SERIALIZABLE; }
+step s2rx { SELECT balance FROM bank_account WHERE id = 'X'; }
+step s2ry { SELECT balance FROM bank_account WHERE id = 'Y'; }
+step s2wx { UPDATE bank_account SET balance = -11 WHERE id = 'X'; }
+step s2c { COMMIT; }
+
+session s3
+setup { BEGIN TRANSACTION ISOLATION LEVEL SERIALIZABLE; }
+step s3r { SELECT id, balance FROM bank_account WHERE id IN ('X', 'Y') ORDER BY id; }
+step s3c { COMMIT; }
+
+# without s3, s1 and s2 commit
+permutation s2rx s2ry s1ry s1wy s1c s2wx s2c s3c
+
+# once s3 observes the data committed by s1, a cycle is created and s2 aborts
+permutation s2rx s2ry s1ry s1wy s1c s3r s3c s2wx
diff --git a/src/test/isolation/specs/read-only-anomaly-3.spec b/src/test/isolation/specs/read-only-anomaly-3.spec
new file mode 100644
index 0000000..61d9c0b
--- /dev/null
+++ b/src/test/isolation/specs/read-only-anomaly-3.spec
@@ -0,0 +1,39 @@
+# The example from the paper "A read-only transaction anomaly under snapshot
+# isolation"[1].
+#
+# Here we test that serializable snapshot isolation can avoid the anomaly
+# without aborting any transactions, by instead causing s3 to be deferred
+# until a safe snapshot can be taken.
+#
+# [1] http://www.cs.umb.edu/~poneil/ROAnom.pdf
+
+setup
+{
+ CREATE TABLE bank_account (id TEXT PRIMARY KEY, balance DECIMAL NOT NULL);
+ INSERT INTO bank_account (id, balance) VALUES ('X', 0), ('Y', 0);
+}
+
+teardown
+{
+ DROP TABLE bank_account;
+}
+
+session s1
+setup { BEGIN TRANSACTION ISOLATION LEVEL SERIALIZABLE; }
+step s1ry { SELECT balance FROM bank_account WHERE id = 'Y'; }
+step s1wy { UPDATE bank_account SET balance = 20 WHERE id = 'Y'; }
+step s1c { COMMIT; }
+
+session s2
+setup { BEGIN TRANSACTION ISOLATION LEVEL SERIALIZABLE; }
+step s2rx { SELECT balance FROM bank_account WHERE id = 'X'; }
+step s2ry { SELECT balance FROM bank_account WHERE id = 'Y'; }
+step s2wx { UPDATE bank_account SET balance = -11 WHERE id = 'X'; }
+step s2c { COMMIT; }
+
+session s3
+setup { BEGIN TRANSACTION ISOLATION LEVEL SERIALIZABLE READ ONLY DEFERRABLE; }
+step s3r { SELECT id, balance FROM bank_account WHERE id IN ('X', 'Y') ORDER BY id; }
+step s3c { COMMIT; }
+
+permutation s2rx s2ry s1ry s1wy s1c s3r s2wx s2c s3c
diff --git a/src/test/isolation/specs/read-only-anomaly.spec b/src/test/isolation/specs/read-only-anomaly.spec
new file mode 100644
index 0000000..8ff1af5
--- /dev/null
+++ b/src/test/isolation/specs/read-only-anomaly.spec
@@ -0,0 +1,38 @@
+# The example from the paper "A read-only transaction anomaly under snapshot
+# isolation"[1].
+#
+# Here we use snapshot isolation (REPEATABLE READ), so that s3 sees a state of
+# afairs that is not consistent with any serial ordering of s1 and s2.
+#
+# [1] http://www.cs.umb.edu/~poneil/ROAnom.pdf
+
+setup
+{
+ CREATE TABLE bank_account (id TEXT PRIMARY KEY, balance DECIMAL NOT NULL);
+ INSERT INTO bank_account (id, balance) VALUES ('X', 0), ('Y', 0);
+}
+
+teardown
+{
+ DROP TABLE bank_account;
+}
+
+session s1
+setup { BEGIN TRANSACTION ISOLATION LEVEL REPEATABLE READ; }
+step s1ry { SELECT balance FROM bank_account WHERE id = 'Y'; }
+step s1wy { UPDATE bank_account SET balance = 20 WHERE id = 'Y'; }
+step s1c { COMMIT; }
+
+session s2
+setup { BEGIN TRANSACTION ISOLATION LEVEL REPEATABLE READ; }
+step s2rx { SELECT balance FROM bank_account WHERE id = 'X'; }
+step s2ry { SELECT balance FROM bank_account WHERE id = 'Y'; }
+step s2wx { UPDATE bank_account SET balance = -11 WHERE id = 'X'; }
+step s2c { COMMIT; }
+
+session s3
+setup { BEGIN TRANSACTION ISOLATION LEVEL REPEATABLE READ; }
+step s3r { SELECT id, balance FROM bank_account WHERE id IN ('X', 'Y') ORDER BY id; }
+step s3c { COMMIT; }
+
+permutation s2rx s2ry s1ry s1wy s1c s3r s2wx s2c s3c
diff --git a/src/test/isolation/specs/read-write-unique-2.spec b/src/test/isolation/specs/read-write-unique-2.spec
new file mode 100644
index 0000000..16c73e1
--- /dev/null
+++ b/src/test/isolation/specs/read-write-unique-2.spec
@@ -0,0 +1,36 @@
+# Read-write-unique test.
+
+setup
+{
+ CREATE TABLE test (i integer PRIMARY KEY);
+}
+
+teardown
+{
+ DROP TABLE test;
+}
+
+session s1
+setup { BEGIN ISOLATION LEVEL SERIALIZABLE; }
+step r1 { SELECT * FROM test WHERE i = 42; }
+step w1 { INSERT INTO test VALUES (42); }
+step c1 { COMMIT; }
+
+session s2
+setup { BEGIN ISOLATION LEVEL SERIALIZABLE; }
+step r2 { SELECT * FROM test WHERE i = 42; }
+step w2 { INSERT INTO test VALUES (42); }
+step c2 { COMMIT; }
+
+# Two SSI transactions see that there is no row with value 42
+# in the table, then try to insert that value; T1 inserts,
+# and then T2 blocks waiting for T1 to commit. Finally,
+# T2 reports a serialization failure.
+
+permutation r1 r2 w1 w2 c1 c2
+
+# If the value is already visible before T2 begins, then a
+# regular unique constraint violation should still be raised
+# by T2.
+
+permutation r1 w1 c1 r2 w2 c2
diff --git a/src/test/isolation/specs/read-write-unique-3.spec b/src/test/isolation/specs/read-write-unique-3.spec
new file mode 100644
index 0000000..cba2c4c
--- /dev/null
+++ b/src/test/isolation/specs/read-write-unique-3.spec
@@ -0,0 +1,33 @@
+# Read-write-unique test.
+# From bug report 9301.
+
+setup
+{
+ CREATE TABLE test (
+ key integer UNIQUE,
+ val text
+ );
+
+ CREATE OR REPLACE FUNCTION insert_unique(k integer, v text) RETURNS void
+ LANGUAGE SQL AS $$
+ INSERT INTO test (key, val) SELECT k, v WHERE NOT EXISTS (SELECT key FROM test WHERE key = k);
+ $$;
+}
+
+teardown
+{
+ DROP FUNCTION insert_unique(integer, text);
+ DROP TABLE test;
+}
+
+session s1
+setup { BEGIN ISOLATION LEVEL SERIALIZABLE; }
+step rw1 { SELECT insert_unique(1, '1'); }
+step c1 { COMMIT; }
+
+session s2
+setup { BEGIN ISOLATION LEVEL SERIALIZABLE; }
+step rw2 { SELECT insert_unique(1, '2'); }
+step c2 { COMMIT; }
+
+permutation rw1 rw2 c1 c2
diff --git a/src/test/isolation/specs/read-write-unique-4.spec b/src/test/isolation/specs/read-write-unique-4.spec
new file mode 100644
index 0000000..9002248
--- /dev/null
+++ b/src/test/isolation/specs/read-write-unique-4.spec
@@ -0,0 +1,48 @@
+# Read-write-unique test.
+# Implementing a gapless sequence of ID numbers for each year.
+
+setup
+{
+ CREATE TABLE invoice (
+ year int,
+ invoice_number int,
+ PRIMARY KEY (year, invoice_number)
+ );
+
+ INSERT INTO invoice VALUES (2016, 1), (2016, 2);
+}
+
+teardown
+{
+ DROP TABLE invoice;
+}
+
+session s1
+setup { BEGIN ISOLATION LEVEL SERIALIZABLE; }
+step r1 { SELECT COALESCE(MAX(invoice_number) + 1, 1) FROM invoice WHERE year = 2016; }
+step w1 { INSERT INTO invoice VALUES (2016, 3); }
+step c1 { COMMIT; }
+
+session s2
+setup { BEGIN ISOLATION LEVEL SERIALIZABLE; }
+step r2 { SELECT COALESCE(MAX(invoice_number) + 1, 1) FROM invoice WHERE year = 2016; }
+step w2 { INSERT INTO invoice VALUES (2016, 3); }
+step c2 { COMMIT; }
+
+# if they both read first then there should be an SSI conflict
+permutation r1 r2 w1 w2 c1 c2
+
+# cases where one session doesn't explicitly read before writing:
+
+# if s2 doesn't explicitly read, then trying to insert the value
+# generates a unique constraint violation after s1 commits, as if s2
+# ran after s1
+permutation r1 w1 w2 c1 c2
+
+# if s1 doesn't explicitly read, but s2 does, then s1 inserts and
+# commits first, should s2 experience an SSI failure instead of a
+# unique constraint violation? there is no serial order of operations
+# (s1, s2) or (s2, s1) where s1 succeeds, and s2 doesn't see the row
+# in an explicit select but then fails to insert due to unique
+# constraint violation
+permutation r2 w1 w2 c1 c2
diff --git a/src/test/isolation/specs/read-write-unique.spec b/src/test/isolation/specs/read-write-unique.spec
new file mode 100644
index 0000000..3ce059f
--- /dev/null
+++ b/src/test/isolation/specs/read-write-unique.spec
@@ -0,0 +1,39 @@
+# Read-write-unique test.
+
+setup
+{
+ CREATE TABLE test (i integer PRIMARY KEY);
+}
+
+teardown
+{
+ DROP TABLE test;
+}
+
+session s1
+setup { BEGIN ISOLATION LEVEL SERIALIZABLE; }
+step r1 { SELECT * FROM test; }
+step w1 { INSERT INTO test VALUES (42); }
+step c1 { COMMIT; }
+
+session s2
+setup { BEGIN ISOLATION LEVEL SERIALIZABLE; }
+step r2 { SELECT * FROM test; }
+step w2 { INSERT INTO test VALUES (42); }
+step c2 { COMMIT; }
+
+# Two SSI transactions see that there is no row with value 42
+# in the table, then try to insert that value; T1 inserts,
+# and then T2 blocks waiting for T1 to commit. Finally,
+# T2 reports a serialization failure.
+#
+# (In an earlier version of Postgres, T2 would report a unique
+# constraint violation).
+
+permutation r1 r2 w1 w2 c1 c2
+
+# If the value is already visible before T2 begins, then a
+# regular unique constraint violation should still be raised
+# by T2.
+
+permutation r1 w1 c1 r2 w2 c2
diff --git a/src/test/isolation/specs/receipt-report.spec b/src/test/isolation/specs/receipt-report.spec
new file mode 100644
index 0000000..85ac60f
--- /dev/null
+++ b/src/test/isolation/specs/receipt-report.spec
@@ -0,0 +1,47 @@
+# Daily Report of Receipts test.
+#
+# This test doesn't persist a bad state in the database; rather, it
+# provides a view of the data which is not consistent with any
+# order of execution of the serializable transactions. It
+# demonstrates a situation where the deposit date for receipts could
+# be changed and a report of the closed day's receipts subsequently
+# run which will miss a receipt from the date which has been closed.
+#
+# There are only six permutations which must cause a serialization failure.
+# Failure cases are where s1 overlaps both s2 and s3, but s2 commits before
+# s3 executes its first SELECT.
+#
+# As long as s3 is declared READ ONLY there should be no false positives.
+# If s3 were changed to READ WRITE, we would currently expect 42 false
+# positives. Further work dealing with de facto READ ONLY transactions
+# may be able to reduce or eliminate those false positives.
+
+setup
+{
+ CREATE TABLE ctl (k text NOT NULL PRIMARY KEY, deposit_date date NOT NULL);
+ INSERT INTO ctl VALUES ('receipt', DATE '2008-12-22');
+ CREATE TABLE receipt (receipt_no int NOT NULL PRIMARY KEY, deposit_date date NOT NULL, amount numeric(13,2));
+ INSERT INTO receipt VALUES (1, (SELECT deposit_date FROM ctl WHERE k = 'receipt'), 1.00);
+ INSERT INTO receipt VALUES (2, (SELECT deposit_date FROM ctl WHERE k = 'receipt'), 2.00);
+}
+
+teardown
+{
+ DROP TABLE ctl, receipt;
+}
+
+session s1
+setup { BEGIN ISOLATION LEVEL SERIALIZABLE; }
+step rxwy1 { INSERT INTO receipt VALUES (3, (SELECT deposit_date FROM ctl WHERE k = 'receipt'), 4.00); }
+step c1 { COMMIT; }
+
+session s2
+setup { BEGIN ISOLATION LEVEL SERIALIZABLE; }
+step wx2 { UPDATE ctl SET deposit_date = DATE '2008-12-23' WHERE k = 'receipt'; }
+step c2 { COMMIT; }
+
+session s3
+setup { BEGIN ISOLATION LEVEL SERIALIZABLE, READ ONLY; }
+step rx3 { SELECT * FROM ctl WHERE k = 'receipt'; }
+step ry3 { SELECT * FROM receipt WHERE deposit_date = DATE '2008-12-22'; }
+step c3 { COMMIT; }
diff --git a/src/test/isolation/specs/referential-integrity.spec b/src/test/isolation/specs/referential-integrity.spec
new file mode 100644
index 0000000..ecaa9bb
--- /dev/null
+++ b/src/test/isolation/specs/referential-integrity.spec
@@ -0,0 +1,32 @@
+# Referential Integrity test
+#
+# The assumption here is that the application code issuing the SELECT
+# to test for the presence or absence of a related record would do the
+# right thing -- this script doesn't include that logic.
+#
+# Any overlap between the transactions must cause a serialization failure.
+
+setup
+{
+ CREATE TABLE a (i int PRIMARY KEY);
+ CREATE TABLE b (a_id int);
+ INSERT INTO a VALUES (1);
+}
+
+teardown
+{
+ DROP TABLE a, b;
+}
+
+session s1
+setup { BEGIN ISOLATION LEVEL SERIALIZABLE; }
+step rx1 { SELECT i FROM a WHERE i = 1; }
+step wy1 { INSERT INTO b VALUES (1); }
+step c1 { COMMIT; }
+
+session s2
+setup { BEGIN ISOLATION LEVEL SERIALIZABLE; }
+step rx2 { SELECT i FROM a WHERE i = 1; }
+step ry2 { SELECT a_id FROM b WHERE a_id = 1; }
+step wx2 { DELETE FROM a WHERE i = 1; }
+step c2 { COMMIT; }
diff --git a/src/test/isolation/specs/reindex-concurrently-toast.spec b/src/test/isolation/specs/reindex-concurrently-toast.spec
new file mode 100644
index 0000000..8188792
--- /dev/null
+++ b/src/test/isolation/specs/reindex-concurrently-toast.spec
@@ -0,0 +1,119 @@
+# REINDEX CONCURRENTLY with toast relations
+#
+# Ensure that concurrent operations work correctly when a REINDEX is performed
+# concurrently on toast relations. Toast relation names are not deterministic,
+# so this abuses of allow_system_table_mods to change the names of toast
+# tables and its indexes so as they can be executed with REINDEX CONCURRENTLY,
+# which cannot be launched in a transaction context.
+
+# Create a table, with deterministic names for its toast relation and indexes.
+# Fortunately ALTER TABLE is transactional, making the renaming of toast
+# relations possible with allow_system_table_mods.
+setup
+{
+ CREATE TABLE reind_con_wide(id int primary key, data text);
+ INSERT INTO reind_con_wide
+ SELECT 1, repeat('1', 11) || string_agg(g.i::text || random()::text, '') FROM generate_series(1, 500) g(i);
+ INSERT INTO reind_con_wide
+ SELECT 2, repeat('2', 11) || string_agg(g.i::text || random()::text, '') FROM generate_series(1, 500) g(i);
+ SET allow_system_table_mods TO true;
+ DO $$DECLARE r record;
+ BEGIN
+ SELECT INTO r reltoastrelid::regclass::text AS table_name FROM pg_class
+ WHERE oid = 'reind_con_wide'::regclass;
+ EXECUTE 'ALTER TABLE ' || r.table_name || ' RENAME TO reind_con_toast;';
+ SELECT INTO r indexrelid::regclass::text AS index_name FROM pg_index
+ WHERE indrelid = (SELECT oid FROM pg_class where relname = 'reind_con_toast');
+ EXECUTE 'ALTER INDEX ' || r.index_name || ' RENAME TO reind_con_toast_idx;';
+ END$$;
+}
+
+teardown
+{
+ DROP TABLE IF EXISTS reind_con_wide;
+}
+
+session s1
+setup { BEGIN; }
+step lrex1 { lock TABLE reind_con_wide in ROW EXCLUSIVE MODE; }
+step lsha1 { lock TABLE reind_con_wide in SHARE MODE; }
+step lexc1 { lock TABLE reind_con_wide in EXCLUSIVE MODE; }
+step ins1 { INSERT INTO reind_con_wide SELECT 3, repeat('3', 11) || string_agg(g.i::text || random()::text, '') FROM generate_series(1, 500) g(i); }
+step upd1 { UPDATE reind_con_wide SET data = (SELECT repeat('4', 11) || string_agg(g.i::text || random()::text, '') FROM generate_series(1, 500) g(i)) WHERE id = 1; }
+step del1 { DELETE FROM reind_con_wide WHERE id = 2; }
+step dro1 { DROP TABLE reind_con_wide; }
+step end1 { COMMIT; }
+step rol1 { ROLLBACK; }
+
+session s2
+step retab2 { REINDEX TABLE CONCURRENTLY pg_toast.reind_con_toast; }
+step reind2 { REINDEX INDEX CONCURRENTLY pg_toast.reind_con_toast_idx; }
+step sel2 { SELECT id, substr(data, 1, 10) FROM reind_con_wide ORDER BY id; }
+
+# Transaction commit with ROW EXCLUSIVE MODE
+permutation lrex1 ins1 retab2 end1 sel2
+permutation lrex1 ins1 reind2 end1 sel2
+permutation lrex1 upd1 retab2 end1 sel2
+permutation lrex1 upd1 reind2 end1 sel2
+permutation lrex1 del1 retab2 end1 sel2
+permutation lrex1 del1 reind2 end1 sel2
+permutation lrex1 dro1 retab2 end1 sel2
+permutation lrex1 dro1 reind2 end1 sel2
+permutation lrex1 retab2 dro1 end1 sel2
+permutation lrex1 reind2 dro1 end1 sel2
+# Transaction commit with SHARE MODE
+permutation lsha1 ins1 retab2 end1 sel2
+permutation lsha1 ins1 reind2 end1 sel2
+permutation lsha1 upd1 retab2 end1 sel2
+permutation lsha1 upd1 reind2 end1 sel2
+permutation lsha1 del1 retab2 end1 sel2
+permutation lsha1 del1 reind2 end1 sel2
+permutation lsha1 dro1 retab2 end1 sel2
+permutation lsha1 dro1 reind2 end1 sel2
+permutation lsha1 retab2 dro1 end1 sel2
+permutation lsha1 reind2 dro1 end1 sel2
+# Transaction commit with EXCLUSIVE MODE
+permutation lexc1 ins1 retab2 end1 sel2
+permutation lexc1 ins1 reind2 end1 sel2
+permutation lexc1 upd1 retab2 end1 sel2
+permutation lexc1 upd1 reind2 end1 sel2
+permutation lexc1 del1 retab2 end1 sel2
+permutation lexc1 del1 reind2 end1 sel2
+permutation lexc1 dro1 retab2 end1 sel2
+permutation lexc1 dro1 reind2 end1 sel2
+permutation lexc1 retab2 dro1 end1 sel2
+permutation lexc1 reind2 dro1 end1 sel2
+
+# Transaction rollback with ROW EXCLUSIVE MODE
+permutation lrex1 ins1 retab2 rol1 sel2
+permutation lrex1 ins1 reind2 rol1 sel2
+permutation lrex1 upd1 retab2 rol1 sel2
+permutation lrex1 upd1 reind2 rol1 sel2
+permutation lrex1 del1 retab2 rol1 sel2
+permutation lrex1 del1 reind2 rol1 sel2
+permutation lrex1 dro1 retab2 rol1 sel2
+permutation lrex1 dro1 reind2 rol1 sel2
+permutation lrex1 retab2 dro1 rol1 sel2
+permutation lrex1 reind2 dro1 rol1 sel2
+# Transaction rollback with SHARE MODE
+permutation lsha1 ins1 retab2 rol1 sel2
+permutation lsha1 ins1 reind2 rol1 sel2
+permutation lsha1 upd1 retab2 rol1 sel2
+permutation lsha1 upd1 reind2 rol1 sel2
+permutation lsha1 del1 retab2 rol1 sel2
+permutation lsha1 del1 reind2 rol1 sel2
+permutation lsha1 dro1 retab2 rol1 sel2
+permutation lsha1 dro1 reind2 rol1 sel2
+permutation lsha1 retab2 dro1 rol1 sel2
+permutation lsha1 reind2 dro1 rol1 sel2
+# Transaction rollback with EXCLUSIVE MODE
+permutation lexc1 ins1 retab2 rol1 sel2
+permutation lexc1 ins1 reind2 rol1 sel2
+permutation lexc1 upd1 retab2 rol1 sel2
+permutation lexc1 upd1 reind2 rol1 sel2
+permutation lexc1 del1 retab2 rol1 sel2
+permutation lexc1 del1 reind2 rol1 sel2
+permutation lexc1 dro1 retab2 rol1 sel2
+permutation lexc1 dro1 reind2 rol1 sel2
+permutation lexc1 retab2 dro1 rol1 sel2
+permutation lexc1 reind2 dro1 rol1 sel2
diff --git a/src/test/isolation/specs/reindex-concurrently.spec b/src/test/isolation/specs/reindex-concurrently.spec
new file mode 100644
index 0000000..31844bd
--- /dev/null
+++ b/src/test/isolation/specs/reindex-concurrently.spec
@@ -0,0 +1,40 @@
+# REINDEX CONCURRENTLY
+#
+# Ensure that concurrent operations work correctly when a REINDEX is performed
+# concurrently.
+
+setup
+{
+ CREATE TABLE reind_con_tab(id serial primary key, data text);
+ INSERT INTO reind_con_tab(data) VALUES ('aa');
+ INSERT INTO reind_con_tab(data) VALUES ('aaa');
+ INSERT INTO reind_con_tab(data) VALUES ('aaaa');
+ INSERT INTO reind_con_tab(data) VALUES ('aaaaa');
+}
+
+teardown
+{
+ DROP TABLE reind_con_tab;
+}
+
+session s1
+setup { BEGIN; }
+step sel1 { SELECT data FROM reind_con_tab WHERE id = 3; }
+step end1 { COMMIT; }
+
+session s2
+setup { BEGIN; }
+step upd2 { UPDATE reind_con_tab SET data = 'bbbb' WHERE id = 3; }
+step ins2 { INSERT INTO reind_con_tab(data) VALUES ('cccc'); }
+step del2 { DELETE FROM reind_con_tab WHERE data = 'cccc'; }
+step end2 { COMMIT; }
+
+session s3
+step reindex { REINDEX TABLE CONCURRENTLY reind_con_tab; }
+
+permutation reindex sel1 upd2 ins2 del2 end1 end2
+permutation sel1 reindex upd2 ins2 del2 end1 end2
+permutation sel1 upd2 reindex ins2 del2 end1 end2
+permutation sel1 upd2 ins2 reindex del2 end1 end2
+permutation sel1 upd2 ins2 del2 reindex end1 end2
+permutation sel1 upd2 ins2 del2 end1 reindex end2
diff --git a/src/test/isolation/specs/reindex-schema.spec b/src/test/isolation/specs/reindex-schema.spec
new file mode 100644
index 0000000..dee4ad7
--- /dev/null
+++ b/src/test/isolation/specs/reindex-schema.spec
@@ -0,0 +1,32 @@
+# REINDEX with schemas
+#
+# Check that concurrent drop of relations while doing a REINDEX
+# SCHEMA allows the command to work.
+
+setup
+{
+ CREATE SCHEMA reindex_schema;
+ CREATE TABLE reindex_schema.tab_locked (a int PRIMARY KEY);
+ CREATE TABLE reindex_schema.tab_dropped (a int PRIMARY KEY);
+}
+
+teardown
+{
+ DROP SCHEMA reindex_schema CASCADE;
+}
+
+session s1
+step begin1 { BEGIN; }
+step lock1 { LOCK reindex_schema.tab_locked IN SHARE UPDATE EXCLUSIVE MODE; }
+step end1 { COMMIT; }
+
+session s2
+step reindex2 { REINDEX SCHEMA reindex_schema; }
+step reindex_conc2 { REINDEX SCHEMA CONCURRENTLY reindex_schema; }
+
+session s3
+step drop3 { DROP TABLE reindex_schema.tab_dropped; }
+
+# The table can be dropped while reindex is waiting.
+permutation begin1 lock1 reindex2 drop3 end1
+permutation begin1 lock1 reindex_conc2 drop3 end1
diff --git a/src/test/isolation/specs/ri-trigger.spec b/src/test/isolation/specs/ri-trigger.spec
new file mode 100644
index 0000000..00fcdff
--- /dev/null
+++ b/src/test/isolation/specs/ri-trigger.spec
@@ -0,0 +1,53 @@
+# RI Trigger test
+#
+# Test trigger-based referential integrity enforcement.
+#
+# Any overlap between the transactions must cause a serialization failure.
+
+setup
+{
+ CREATE TABLE parent (parent_id SERIAL NOT NULL PRIMARY KEY);
+ CREATE TABLE child (child_id SERIAL NOT NULL PRIMARY KEY, parent_id INTEGER NOT NULL);
+ CREATE FUNCTION ri_parent() RETURNS TRIGGER LANGUAGE PLPGSQL AS $body$
+ BEGIN
+ PERFORM TRUE FROM child WHERE parent_id = OLD.parent_id;
+ IF FOUND THEN
+ RAISE SQLSTATE '23503' USING MESSAGE = 'child row exists';
+ END IF;
+ IF TG_OP = 'DELETE' THEN
+ RETURN OLD;
+ END IF;
+ RETURN NEW;
+ END;
+ $body$;
+ CREATE TRIGGER ri_parent BEFORE UPDATE OR DELETE ON parent FOR EACH ROW EXECUTE PROCEDURE ri_parent();
+ CREATE FUNCTION ri_child() RETURNS TRIGGER LANGUAGE PLPGSQL AS $body$
+ BEGIN
+ PERFORM TRUE FROM parent WHERE parent_id = NEW.parent_id;
+ IF NOT FOUND THEN
+ RAISE SQLSTATE '23503' USING MESSAGE = 'parent row missing';
+ END IF;
+ RETURN NEW;
+ END;
+ $body$;
+ CREATE TRIGGER ri_child BEFORE INSERT OR UPDATE ON child FOR EACH ROW EXECUTE PROCEDURE ri_child();
+ INSERT INTO parent VALUES(0);
+}
+
+teardown
+{
+ DROP TABLE parent, child;
+ DROP FUNCTION ri_parent();
+ DROP FUNCTION ri_child();
+}
+
+session s1
+setup { BEGIN ISOLATION LEVEL SERIALIZABLE; }
+step wxry1 { INSERT INTO child (parent_id) VALUES (0); }
+step c1 { COMMIT; }
+
+session s2
+setup { BEGIN ISOLATION LEVEL SERIALIZABLE; }
+step r2 { SELECT TRUE; }
+step wyrx2 { DELETE FROM parent WHERE parent_id = 0; }
+step c2 { COMMIT; }
diff --git a/src/test/isolation/specs/sequence-ddl.spec b/src/test/isolation/specs/sequence-ddl.spec
new file mode 100644
index 0000000..7ead8af
--- /dev/null
+++ b/src/test/isolation/specs/sequence-ddl.spec
@@ -0,0 +1,41 @@
+# Test sequence usage and concurrent sequence DDL
+
+setup
+{
+ CREATE SEQUENCE seq1;
+}
+
+teardown
+{
+ DROP SEQUENCE seq1;
+}
+
+session s1
+setup { BEGIN; }
+step s1alter { ALTER SEQUENCE seq1 MAXVALUE 10; }
+step s1alter2 { ALTER SEQUENCE seq1 MAXVALUE 20; }
+step s1restart { ALTER SEQUENCE seq1 RESTART WITH 5; }
+step s1commit { COMMIT; }
+
+session s2
+step s2begin { BEGIN; }
+step s2nv { SELECT nextval('seq1') FROM generate_series(1, 15); }
+step s2commit { COMMIT; }
+
+permutation s1alter s1commit s2nv
+
+# Prior to PG10, the s2nv step would see the uncommitted s1alter
+# change, but now it waits.
+permutation s1alter s2nv s1commit
+
+# Prior to PG10, the s2nv step would see the uncommitted s1restart
+# change, but now it waits.
+permutation s1restart s2nv s1commit
+
+# In contrast to ALTER setval() is non-transactional, so it doesn't
+# have to wait.
+permutation s1restart s2nv s1commit
+
+# nextval doesn't release lock until transaction end, so s1alter2 has
+# to wait for s2commit.
+permutation s2begin s2nv s1alter2 s2commit s1commit
diff --git a/src/test/isolation/specs/serializable-parallel-2.spec b/src/test/isolation/specs/serializable-parallel-2.spec
new file mode 100644
index 0000000..c975d96
--- /dev/null
+++ b/src/test/isolation/specs/serializable-parallel-2.spec
@@ -0,0 +1,34 @@
+# Exercise the case where a read-only serializable transaction has
+# SXACT_FLAG_RO_SAFE set in a parallel query.
+
+setup
+{
+ CREATE TABLE foo AS SELECT generate_series(1, 100)::int a;
+ CREATE INDEX ON foo(a);
+ ALTER TABLE foo SET (parallel_workers = 2);
+}
+
+teardown
+{
+ DROP TABLE foo;
+}
+
+session s1
+setup { BEGIN TRANSACTION ISOLATION LEVEL SERIALIZABLE; }
+step s1r { SELECT COUNT(*) FROM foo; }
+step s1c { COMMIT; }
+
+session s2
+setup {
+ BEGIN TRANSACTION ISOLATION LEVEL SERIALIZABLE READ ONLY;
+ SET parallel_setup_cost = 0;
+ SET parallel_tuple_cost = 0;
+ SET min_parallel_index_scan_size = 0;
+ SET parallel_leader_participation = off;
+ SET enable_seqscan = off;
+ }
+step s2r1 { SELECT COUNT(*) FROM foo; }
+step s2r2 { SELECT COUNT(*) FROM foo; }
+step s2c { COMMIT; }
+
+permutation s1r s2r1 s1c s2r2 s2c
diff --git a/src/test/isolation/specs/serializable-parallel-3.spec b/src/test/isolation/specs/serializable-parallel-3.spec
new file mode 100644
index 0000000..858156c
--- /dev/null
+++ b/src/test/isolation/specs/serializable-parallel-3.spec
@@ -0,0 +1,47 @@
+# Exercise the case where a read-only serializable transaction has
+# SXACT_FLAG_RO_SAFE set in a parallel query. This variant is like
+# two copies of #2 running at the same time, and exercises the case
+# where another transaction has the same xmin, and it is the oldest.
+
+setup
+{
+ CREATE TABLE foo AS SELECT generate_series(1, 10)::int a;
+ ALTER TABLE foo SET (parallel_workers = 2);
+}
+
+teardown
+{
+ DROP TABLE foo;
+}
+
+session s1
+setup { BEGIN TRANSACTION ISOLATION LEVEL SERIALIZABLE; }
+step s1r { SELECT * FROM foo; }
+step s1c { COMMIT; }
+
+session s2
+setup {
+ BEGIN TRANSACTION ISOLATION LEVEL SERIALIZABLE READ ONLY;
+ SET parallel_setup_cost = 0;
+ SET parallel_tuple_cost = 0;
+ }
+step s2r1 { SELECT * FROM foo; }
+step s2r2 { SELECT * FROM foo; }
+step s2c { COMMIT; }
+
+session s3
+setup { BEGIN TRANSACTION ISOLATION LEVEL SERIALIZABLE; }
+step s3r { SELECT * FROM foo; }
+step s3c { COMMIT; }
+
+session s4
+setup {
+ BEGIN TRANSACTION ISOLATION LEVEL SERIALIZABLE READ ONLY;
+ SET parallel_setup_cost = 0;
+ SET parallel_tuple_cost = 0;
+ }
+step s4r1 { SELECT * FROM foo; }
+step s4r2 { SELECT * FROM foo; }
+step s4c { COMMIT; }
+
+permutation s1r s3r s2r1 s4r1 s1c s2r2 s3c s4r2 s4c s2c
diff --git a/src/test/isolation/specs/serializable-parallel.spec b/src/test/isolation/specs/serializable-parallel.spec
new file mode 100644
index 0000000..f786b1d
--- /dev/null
+++ b/src/test/isolation/specs/serializable-parallel.spec
@@ -0,0 +1,47 @@
+# The example from the paper "A read-only transaction anomaly under snapshot
+# isolation"[1].
+#
+# Here we test that serializable snapshot isolation (SERIALIZABLE) doesn't
+# suffer from the anomaly, because s2 is aborted upon detection of a cycle.
+# In this case the read only query s3 happens to be running in a parallel
+# worker.
+#
+# [1] http://www.cs.umb.edu/~poneil/ROAnom.pdf
+
+setup
+{
+ CREATE TABLE bank_account (id TEXT PRIMARY KEY, balance DECIMAL NOT NULL);
+ INSERT INTO bank_account (id, balance) VALUES ('X', 0), ('Y', 0);
+}
+
+teardown
+{
+ DROP TABLE bank_account;
+}
+
+session s1
+setup { BEGIN TRANSACTION ISOLATION LEVEL SERIALIZABLE; }
+step s1ry { SELECT balance FROM bank_account WHERE id = 'Y'; }
+step s1wy { UPDATE bank_account SET balance = 20 WHERE id = 'Y'; }
+step s1c { COMMIT; }
+
+session s2
+setup { BEGIN TRANSACTION ISOLATION LEVEL SERIALIZABLE; }
+step s2rx { SELECT balance FROM bank_account WHERE id = 'X'; }
+step s2ry { SELECT balance FROM bank_account WHERE id = 'Y'; }
+step s2wx { UPDATE bank_account SET balance = -11 WHERE id = 'X'; }
+step s2c { COMMIT; }
+
+session s3
+setup {
+ BEGIN TRANSACTION ISOLATION LEVEL SERIALIZABLE;
+ SET debug_parallel_query = on;
+ }
+step s3r { SELECT id, balance FROM bank_account WHERE id IN ('X', 'Y') ORDER BY id; }
+step s3c { COMMIT; }
+
+# without s3, s1 and s2 commit
+permutation s2rx s2ry s1ry s1wy s1c s2wx s2c s3c
+
+# once s3 observes the data committed by s1, a cycle is created and s2 aborts
+permutation s2rx s2ry s1ry s1wy s1c s3r s3c s2wx
diff --git a/src/test/isolation/specs/simple-write-skew.spec b/src/test/isolation/specs/simple-write-skew.spec
new file mode 100644
index 0000000..ecabbf1
--- /dev/null
+++ b/src/test/isolation/specs/simple-write-skew.spec
@@ -0,0 +1,30 @@
+# Write skew test.
+#
+# This test has two serializable transactions: one which updates all
+# 'apple' rows to 'pear' and one which updates all 'pear' rows to
+# 'apple'. If these were serialized (run one at a time) either
+# value could be present, but not both. One must be rolled back to
+# prevent the write skew anomaly.
+#
+# Any overlap between the transactions must cause a serialization failure.
+
+setup
+{
+ CREATE TABLE test (i int PRIMARY KEY, t text);
+ INSERT INTO test VALUES (5, 'apple'), (7, 'pear'), (11, 'banana');
+}
+
+teardown
+{
+ DROP TABLE test;
+}
+
+session s1
+setup { BEGIN ISOLATION LEVEL SERIALIZABLE; }
+step rwx1 { UPDATE test SET t = 'apple' WHERE t = 'pear'; }
+step c1 { COMMIT; }
+
+session s2
+setup { BEGIN ISOLATION LEVEL SERIALIZABLE; }
+step rwx2 { UPDATE test SET t = 'pear' WHERE t = 'apple'}
+step c2 { COMMIT; }
diff --git a/src/test/isolation/specs/skip-locked-2.spec b/src/test/isolation/specs/skip-locked-2.spec
new file mode 100644
index 0000000..cfdaa93
--- /dev/null
+++ b/src/test/isolation/specs/skip-locked-2.spec
@@ -0,0 +1,41 @@
+# Test SKIP LOCKED with multixact locks.
+
+setup
+{
+ CREATE TABLE queue (
+ id int PRIMARY KEY,
+ data text NOT NULL,
+ status text NOT NULL
+ );
+ INSERT INTO queue VALUES (1, 'foo', 'NEW'), (2, 'bar', 'NEW');
+}
+
+teardown
+{
+ DROP TABLE queue;
+}
+
+session s1
+setup { BEGIN; }
+step s1a { SELECT * FROM queue ORDER BY id FOR SHARE SKIP LOCKED LIMIT 1; }
+step s1b { COMMIT; }
+
+session s2
+setup { BEGIN; }
+step s2a { SELECT * FROM queue ORDER BY id FOR SHARE SKIP LOCKED LIMIT 1; }
+step s2b { SELECT * FROM queue ORDER BY id FOR UPDATE SKIP LOCKED LIMIT 1; }
+step s2c { COMMIT; }
+
+# s1 and s2 both get SHARE lock, creating a multixact lock, then s2
+# tries to update to UPDATE but skips the record because it can't
+# acquire a multixact lock
+permutation s1a s2a s2b s1b s2c
+
+# the same but with the SHARE locks acquired in a different order, so
+# s2 again skips because it can't acquired a multixact lock
+permutation s2a s1a s2b s1b s2c
+
+# s2 acquires SHARE then UPDATE, then s1 tries to acquire SHARE but
+# can't so skips the first record because it can't acquire a regular
+# lock
+permutation s2a s2b s1a s1b s2c
diff --git a/src/test/isolation/specs/skip-locked-3.spec b/src/test/isolation/specs/skip-locked-3.spec
new file mode 100644
index 0000000..7921425
--- /dev/null
+++ b/src/test/isolation/specs/skip-locked-3.spec
@@ -0,0 +1,36 @@
+# Test SKIP LOCKED with tuple locks.
+
+setup
+{
+ CREATE TABLE queue (
+ id int PRIMARY KEY,
+ data text NOT NULL,
+ status text NOT NULL
+ );
+ INSERT INTO queue VALUES (1, 'foo', 'NEW'), (2, 'bar', 'NEW');
+}
+
+teardown
+{
+ DROP TABLE queue;
+}
+
+session s1
+setup { BEGIN; }
+step s1a { SELECT * FROM queue ORDER BY id FOR UPDATE LIMIT 1; }
+step s1b { COMMIT; }
+
+session s2
+setup { BEGIN; }
+step s2a { SELECT * FROM queue ORDER BY id FOR UPDATE LIMIT 1; }
+step s2b { COMMIT; }
+
+session s3
+setup { BEGIN; }
+step s3a { SELECT * FROM queue ORDER BY id FOR UPDATE SKIP LOCKED LIMIT 1; }
+step s3b { COMMIT; }
+
+# s3 skips to the second record because it can't obtain the tuple lock
+# (s2 holds the tuple lock because it is next in line to obtain the
+# row lock, and s1 holds the row lock)
+permutation s1a s2a s3a s1b s2b s3b
diff --git a/src/test/isolation/specs/skip-locked-4.spec b/src/test/isolation/specs/skip-locked-4.spec
new file mode 100644
index 0000000..02994a3
--- /dev/null
+++ b/src/test/isolation/specs/skip-locked-4.spec
@@ -0,0 +1,36 @@
+# Test SKIP LOCKED with an updated tuple chain.
+
+setup
+{
+ CREATE TABLE foo (
+ id int PRIMARY KEY,
+ data text NOT NULL
+ );
+ INSERT INTO foo VALUES (1, 'x'), (2, 'x');
+}
+
+teardown
+{
+ DROP TABLE foo;
+}
+
+session s1
+setup { BEGIN; }
+step s1a { SELECT * FROM foo WHERE pg_advisory_lock(0) IS NOT NULL ORDER BY id LIMIT 1 FOR UPDATE SKIP LOCKED; }
+step s1b { COMMIT; }
+
+session s2
+step s2a { SELECT pg_advisory_lock(0); }
+step s2b { UPDATE foo SET data = data WHERE id = 1; }
+step s2c { BEGIN; }
+step s2d { UPDATE foo SET data = data WHERE id = 1; }
+step s2e { SELECT pg_advisory_unlock(0); }
+step s2f { COMMIT; }
+
+# s1 takes a snapshot but then waits on an advisory lock, then s2
+# updates the row in one transaction, then again in another without
+# committing, before allowing s1 to proceed to try to lock a row;
+# because it has a snapshot that sees the older version, we reach the
+# waiting code in EvalPlanQualFetch which skips rows when in SKIP
+# LOCKED mode, so s1 sees the second row
+permutation s2a s1a s2b s2c s2d s2e s1b s2f
diff --git a/src/test/isolation/specs/skip-locked.spec b/src/test/isolation/specs/skip-locked.spec
new file mode 100644
index 0000000..12168f8
--- /dev/null
+++ b/src/test/isolation/specs/skip-locked.spec
@@ -0,0 +1,28 @@
+# Test SKIP LOCKED when regular row locks can't be acquired.
+
+setup
+{
+ CREATE TABLE queue (
+ id int PRIMARY KEY,
+ data text NOT NULL,
+ status text NOT NULL
+ );
+ INSERT INTO queue VALUES (1, 'foo', 'NEW'), (2, 'bar', 'NEW');
+}
+
+teardown
+{
+ DROP TABLE queue;
+}
+
+session s1
+setup { BEGIN; }
+step s1a { SELECT * FROM queue ORDER BY id FOR UPDATE SKIP LOCKED LIMIT 1; }
+step s1b { SELECT * FROM queue ORDER BY id FOR UPDATE SKIP LOCKED LIMIT 1; }
+step s1c { COMMIT; }
+
+session s2
+setup { BEGIN; }
+step s2a { SELECT * FROM queue ORDER BY id FOR UPDATE SKIP LOCKED LIMIT 1; }
+step s2b { SELECT * FROM queue ORDER BY id FOR UPDATE SKIP LOCKED LIMIT 1; }
+step s2c { COMMIT; }
diff --git a/src/test/isolation/specs/stats.spec b/src/test/isolation/specs/stats.spec
new file mode 100644
index 0000000..5b922d7
--- /dev/null
+++ b/src/test/isolation/specs/stats.spec
@@ -0,0 +1,760 @@
+setup
+{
+ CREATE TABLE test_stat_oid(name text NOT NULL, oid oid);
+
+ CREATE TABLE test_stat_tab(key text not null, value int);
+ INSERT INTO test_stat_tab(key, value) VALUES('k0', 1);
+ INSERT INTO test_stat_oid(name, oid) VALUES('test_stat_tab', 'test_stat_tab'::regclass);
+
+ CREATE FUNCTION test_stat_func() RETURNS VOID LANGUAGE plpgsql AS $$BEGIN END;$$;
+ INSERT INTO test_stat_oid(name, oid) VALUES('test_stat_func', 'test_stat_func'::regproc);
+
+ CREATE FUNCTION test_stat_func2() RETURNS VOID LANGUAGE plpgsql AS $$BEGIN END;$$;
+ INSERT INTO test_stat_oid(name, oid) VALUES('test_stat_func2', 'test_stat_func2'::regproc);
+
+ CREATE TABLE test_slru_stats(slru TEXT, stat TEXT, value INT);
+
+ -- calls test_stat_func, but hides error if it doesn't exist
+ CREATE FUNCTION test_stat_func_ifexists() RETURNS VOID LANGUAGE plpgsql AS $$
+ BEGIN
+ PERFORM test_stat_func();
+ EXCEPTION WHEN undefined_function THEN
+ END;$$;
+
+ SELECT pg_stat_force_next_flush();
+}
+
+teardown
+{
+ DROP TABLE test_stat_oid;
+ DROP TABLE test_slru_stats;
+
+ DROP TABLE IF EXISTS test_stat_tab;
+ DROP FUNCTION IF EXISTS test_stat_func();
+ DROP FUNCTION IF EXISTS test_stat_func2();
+ DROP FUNCTION test_stat_func_ifexists();
+}
+
+session s1
+setup { SET stats_fetch_consistency = 'none'; }
+step s1_fetch_consistency_none { SET stats_fetch_consistency = 'none'; }
+step s1_fetch_consistency_cache { SET stats_fetch_consistency = 'cache'; }
+step s1_fetch_consistency_snapshot { SET stats_fetch_consistency = 'snapshot'; }
+step s1_clear_snapshot { SELECT pg_stat_clear_snapshot(); }
+step s1_begin { BEGIN; }
+step s1_commit { COMMIT; }
+step s1_rollback { ROLLBACK; }
+step s1_prepare_a { PREPARE TRANSACTION 'a'; }
+step s1_commit_prepared_a { COMMIT PREPARED 'a'; }
+step s1_rollback_prepared_a { ROLLBACK PREPARED 'a'; }
+
+# Function stats steps
+step s1_ff { SELECT pg_stat_force_next_flush(); }
+step s1_track_funcs_all { SET track_functions = 'all'; }
+step s1_track_funcs_none { SET track_functions = 'none'; }
+step s1_func_call { SELECT test_stat_func(); }
+step s1_func_drop { DROP FUNCTION test_stat_func(); }
+step s1_func_stats_reset { SELECT pg_stat_reset_single_function_counters('test_stat_func'::regproc); }
+step s1_func_stats_reset_nonexistent { SELECT pg_stat_reset_single_function_counters(12000); }
+step s1_reset { SELECT pg_stat_reset(); }
+step s1_func_stats {
+ SELECT
+ tso.name,
+ pg_stat_get_function_calls(tso.oid),
+ pg_stat_get_function_total_time(tso.oid) > 0 total_above_zero,
+ pg_stat_get_function_self_time(tso.oid) > 0 self_above_zero
+ FROM test_stat_oid AS tso
+ WHERE tso.name = 'test_stat_func'
+}
+step s1_func_stats2 {
+ SELECT
+ tso.name,
+ pg_stat_get_function_calls(tso.oid),
+ pg_stat_get_function_total_time(tso.oid) > 0 total_above_zero,
+ pg_stat_get_function_self_time(tso.oid) > 0 self_above_zero
+ FROM test_stat_oid AS tso
+ WHERE tso.name = 'test_stat_func2'
+}
+step s1_func_stats_nonexistent {
+ SELECT pg_stat_get_function_calls(12000);
+}
+
+# Relation stats steps
+step s1_track_counts_on { SET track_counts = on; }
+step s1_track_counts_off { SET track_counts = off; }
+step s1_table_select { SELECT * FROM test_stat_tab ORDER BY key, value; }
+step s1_table_insert { INSERT INTO test_stat_tab(key, value) VALUES('k1', 1), ('k2', 1), ('k3', 1);}
+step s1_table_insert_k1 { INSERT INTO test_stat_tab(key, value) VALUES('k1', 1);}
+step s1_table_update_k1 { UPDATE test_stat_tab SET value = value + 1 WHERE key = 'k1';}
+step s1_table_update_k2 { UPDATE test_stat_tab SET value = value + 1 WHERE key = 'k2';}
+step s1_table_delete_k1 { DELETE FROM test_stat_tab WHERE key = 'k1';}
+step s1_table_truncate { TRUNCATE test_stat_tab; }
+step s1_table_drop { DROP TABLE test_stat_tab; }
+
+step s1_table_stats {
+ SELECT
+ pg_stat_get_numscans(tso.oid) AS seq_scan,
+ pg_stat_get_tuples_returned(tso.oid) AS seq_tup_read,
+ pg_stat_get_tuples_inserted(tso.oid) AS n_tup_ins,
+ pg_stat_get_tuples_updated(tso.oid) AS n_tup_upd,
+ pg_stat_get_tuples_deleted(tso.oid) AS n_tup_del,
+ pg_stat_get_live_tuples(tso.oid) AS n_live_tup,
+ pg_stat_get_dead_tuples(tso.oid) AS n_dead_tup,
+ pg_stat_get_vacuum_count(tso.oid) AS vacuum_count
+ FROM test_stat_oid AS tso
+ WHERE tso.name = 'test_stat_tab'
+}
+
+# SLRU stats steps
+step s1_slru_save_stats {
+ INSERT INTO test_slru_stats VALUES('Notify', 'blks_zeroed',
+ (SELECT blks_zeroed FROM pg_stat_slru WHERE name = 'Notify'));
+}
+step s1_listen { LISTEN stats_test_nothing; }
+step s1_big_notify { SELECT pg_notify('stats_test_use',
+ repeat(i::text, current_setting('block_size')::int / 2)) FROM generate_series(1, 3) g(i);
+ }
+
+step s1_slru_check_stats {
+ SELECT current.blks_zeroed > before.value
+ FROM test_slru_stats before
+ INNER JOIN pg_stat_slru current
+ ON before.slru = current.name
+ WHERE before.stat = 'blks_zeroed';
+ }
+
+
+session s2
+setup { SET stats_fetch_consistency = 'none'; }
+step s2_begin { BEGIN; }
+step s2_commit { COMMIT; }
+step s2_commit_prepared_a { COMMIT PREPARED 'a'; }
+step s2_rollback_prepared_a { ROLLBACK PREPARED 'a'; }
+step s2_ff { SELECT pg_stat_force_next_flush(); }
+
+# Function stats steps
+step s2_track_funcs_all { SET track_functions = 'all'; }
+step s2_track_funcs_none { SET track_functions = 'none'; }
+step s2_func_call { SELECT test_stat_func() }
+step s2_func_call_ifexists { SELECT test_stat_func_ifexists(); }
+step s2_func_call2 { SELECT test_stat_func2() }
+step s2_func_stats {
+ SELECT
+ tso.name,
+ pg_stat_get_function_calls(tso.oid),
+ pg_stat_get_function_total_time(tso.oid) > 0 total_above_zero,
+ pg_stat_get_function_self_time(tso.oid) > 0 self_above_zero
+ FROM test_stat_oid AS tso
+ WHERE tso.name = 'test_stat_func'
+}
+
+# Relation stats steps
+step s2_table_select { SELECT * FROM test_stat_tab ORDER BY key, value; }
+step s2_table_update_k1 { UPDATE test_stat_tab SET value = value + 1 WHERE key = 'k1';}
+
+# SLRU stats steps
+step s2_big_notify { SELECT pg_notify('stats_test_use',
+ repeat(i::text, current_setting('block_size')::int / 2)) FROM generate_series(1, 3) g(i);
+ }
+
+
+######################
+# Function stats tests
+######################
+
+# check that stats are collected iff enabled
+permutation
+ s1_track_funcs_none s1_func_stats s1_func_call s1_func_call s1_ff s1_func_stats
+permutation
+ s1_track_funcs_all s1_func_stats s1_func_call s1_func_call s1_ff s1_func_stats
+
+# multiple function calls are accurately reported, across separate connections
+permutation
+ s1_track_funcs_all s2_track_funcs_all s1_func_stats s2_func_stats
+ s1_func_call s2_func_call s1_func_call s2_func_call s2_func_call s1_ff s2_ff s1_func_stats s2_func_stats
+permutation
+ s1_track_funcs_all s2_track_funcs_all s1_func_stats s2_func_stats
+ s1_func_call s1_ff s2_func_call s2_func_call s2_ff s1_func_stats s2_func_stats
+permutation
+ s1_track_funcs_all s2_track_funcs_all s1_func_stats s2_func_stats
+ s1_begin s1_func_call s1_func_call s1_commit s1_ff s1_func_stats s2_func_stats
+
+
+### Check interaction between dropping and stats reporting
+
+# Some of these tests try to test behavior in cases where no invalidation
+# processing is triggered. To prevent output changes when
+# debug_discard_caches, CATCACHE_FORCE_RELEASE or RELCACHE_FORCE_RELEASE are
+# used (which trigger invalidation processing in paths that normally don't),
+# test_stat_func_ifexists() can be used, which tries to call test_stat_func(),
+# but doesn't raise an error if the function doesn't exist.
+
+# dropping a table remove stats iff committed
+permutation
+ s1_track_funcs_all s2_track_funcs_all s1_func_stats s2_func_stats
+ s1_begin s1_func_call s2_func_call s1_func_drop s2_func_call s2_ff s2_func_stats s1_commit s1_ff s1_func_stats s2_func_stats
+permutation
+ s1_track_funcs_all s2_track_funcs_all s1_func_stats s2_func_stats
+ s1_begin s1_func_call s2_func_call s1_func_drop s2_func_call s2_ff s2_func_stats s1_rollback s1_ff s1_func_stats s2_func_stats
+
+# Verify that pending stats from before a drop do not lead to
+# reviving stats for a dropped object
+permutation
+ s1_track_funcs_all s2_track_funcs_all
+ s2_func_call s2_ff # this access increments refcount, preventing the shared entry from being dropped
+ s2_begin s2_func_call_ifexists s1_func_drop s1_func_stats s2_commit s2_ff s1_func_stats s2_func_stats
+permutation
+ s1_track_funcs_all s2_track_funcs_all
+ s2_begin s2_func_call_ifexists s1_func_drop s1_func_stats s2_commit s2_ff s1_func_stats s2_func_stats
+permutation
+ s1_track_funcs_all s2_track_funcs_all
+ s1_func_call s2_begin s2_func_call_ifexists s1_func_drop s2_func_call_ifexists s2_commit s2_ff s1_func_stats s2_func_stats
+
+# Function calls don't necessarily trigger cache invalidation processing. The
+# default handling of dropped stats could therefore end up with stats getting
+# revived by a function call done after stats processing - but
+# pgstat_init_function_usage() protects against that if track_functions is
+# on. Verify that the stats are indeed dropped, and document the behavioral
+# difference between track_functions settings.
+permutation
+ s1_track_funcs_all s2_track_funcs_none
+ s1_func_call s2_begin s2_func_call_ifexists s1_ff s1_func_stats s1_func_drop s2_track_funcs_none s1_func_stats s2_func_call_ifexists s2_commit s2_ff s1_func_stats s2_func_stats
+permutation
+ s1_track_funcs_all s2_track_funcs_none
+ s1_func_call s2_begin s2_func_call_ifexists s1_ff s1_func_stats s1_func_drop s2_track_funcs_all s1_func_stats s2_func_call_ifexists s2_commit s2_ff s1_func_stats s2_func_stats
+
+# test pg_stat_reset_single_function_counters
+permutation
+ s1_track_funcs_all s2_track_funcs_all
+ s1_func_call
+ s2_func_call
+ s2_func_call2
+ s1_ff s2_ff
+ s1_func_stats
+ s2_func_call s2_func_call2 s2_ff
+ s1_func_stats s1_func_stats2 s1_func_stats
+ s1_func_stats_reset
+ s1_func_stats s1_func_stats2 s1_func_stats
+
+# test pg_stat_reset_single_function_counters of non-existing function
+permutation
+ s1_func_stats_nonexistent
+ s1_func_stats_reset_nonexistent
+ s1_func_stats_nonexistent
+
+# test pg_stat_reset
+permutation
+ s1_track_funcs_all s2_track_funcs_all
+ s1_func_call
+ s2_func_call
+ s2_func_call2
+ s1_ff s2_ff
+ s1_func_stats s1_func_stats2 s1_func_stats
+ s1_reset
+ s1_func_stats s1_func_stats2 s1_func_stats
+
+
+### Check the different snapshot consistency models
+
+# First just some dead-trivial test verifying each model doesn't crash
+permutation
+ s1_track_funcs_all s1_fetch_consistency_none s1_func_call s1_ff s1_func_stats
+permutation
+ s1_track_funcs_all s1_fetch_consistency_cache s1_func_call s1_ff s1_func_stats
+permutation
+ s1_track_funcs_all s1_fetch_consistency_snapshot s1_func_call s1_ff s1_func_stats
+
+# with stats_fetch_consistency=none s1 should see flushed changes in s2, despite being in a transaction
+permutation
+ s1_track_funcs_all s2_track_funcs_all
+ s1_fetch_consistency_none
+ s2_func_call s2_ff
+ s1_begin
+ s1_func_stats
+ s2_func_call s2_ff
+ s1_func_stats
+ s1_commit
+
+# with stats_fetch_consistency=cache s1 should not see concurrent
+# changes to the same object after the first access, but a separate
+# object should show changes
+permutation
+ s1_track_funcs_all s2_track_funcs_all
+ s1_fetch_consistency_cache
+ s2_func_call s2_func_call2 s2_ff
+ s1_begin
+ s1_func_stats
+ s2_func_call s2_func_call2 s2_ff
+ s1_func_stats s1_func_stats2
+ s1_commit
+
+# with stats_fetch_consistency=snapshot s1 should not see any
+# concurrent changes after the first access
+permutation
+ s1_track_funcs_all s2_track_funcs_all
+ s1_fetch_consistency_snapshot
+ s2_func_call s2_func_call2 s2_ff
+ s1_begin
+ s1_func_stats
+ s2_func_call s2_func_call2 s2_ff
+ s1_func_stats s1_func_stats2
+ s1_commit
+
+# Check access to non-existing stats works correctly and repeatedly
+permutation
+ s1_fetch_consistency_none
+ s1_begin
+ s1_func_stats_nonexistent
+ s1_func_stats_nonexistent
+ s1_commit
+permutation
+ s1_fetch_consistency_cache
+ s1_begin
+ s1_func_stats_nonexistent
+ s1_func_stats_nonexistent
+ s1_commit
+permutation
+ s1_fetch_consistency_snapshot
+ s1_begin
+ s1_func_stats_nonexistent
+ s1_func_stats_nonexistent
+ s1_commit
+
+
+### Check 2PC handling of stat drops
+
+# S1 prepared, S1 commits prepared
+permutation
+ s1_track_funcs_all s2_track_funcs_all
+ s1_begin
+ s1_func_call
+ s2_func_call
+ s1_func_drop
+ s2_func_call
+ s2_ff
+ s1_prepare_a
+ s2_func_call
+ s2_ff
+ s1_func_call
+ s1_ff
+ s1_func_stats
+ s1_commit_prepared_a
+ s1_func_stats
+
+# S1 prepared, S1 aborts prepared
+permutation
+ s1_track_funcs_all s2_track_funcs_all
+ s1_begin
+ s1_func_call
+ s2_func_call
+ s1_func_drop
+ s2_func_call
+ s2_ff
+ s1_prepare_a
+ s2_func_call
+ s2_ff
+ s1_func_call
+ s1_ff
+ s1_func_stats
+ s1_rollback_prepared_a
+ s1_func_stats
+
+# S1 prepares, S2 commits prepared
+permutation
+ s1_track_funcs_all s2_track_funcs_all
+ s1_begin
+ s1_func_call
+ s2_func_call
+ s1_func_drop
+ s2_func_call
+ s2_ff
+ s1_prepare_a
+ s2_func_call
+ s2_ff
+ s1_func_call
+ s1_ff
+ s1_func_stats
+ s2_commit_prepared_a
+ s1_func_stats
+
+# S1 prepared, S2 aborts prepared
+permutation
+ s1_track_funcs_all s2_track_funcs_all
+ s1_begin
+ s1_func_call
+ s2_func_call
+ s1_func_drop
+ s2_func_call
+ s2_ff
+ s1_prepare_a
+ s2_func_call
+ s2_ff
+ s1_func_call
+ s1_ff
+ s1_func_stats
+ s2_rollback_prepared_a
+ s1_func_stats
+
+
+######################
+# Table stats tests
+######################
+
+# Most of the stats handling mechanism has already been tested in the function
+# stats tests above - that's cheaper than testing with relations. But
+# particularly for 2PC there are special cases
+
+
+### Verify that pending stats from before a drop do not lead to reviving
+### of stats for a dropped object
+
+permutation
+ s1_table_select
+ s1_table_insert
+ s2_table_select
+ s2_table_update_k1
+ s1_ff
+ s2_table_update_k1
+ s1_table_drop
+ s2_ff
+ s1_table_stats
+
+permutation
+ s1_table_select
+ s1_table_insert
+ s2_table_select
+ s2_table_update_k1
+ s2_table_update_k1
+ s1_table_drop
+ s1_table_stats
+
+
+### Check that we don't count changes with track counts off, but allow access
+### to prior stats
+
+# simple read access with stats off
+permutation
+ s1_track_counts_off
+ s1_table_stats
+ s1_track_counts_on
+
+# simple read access with stats off, previously accessed
+permutation
+ s1_table_select
+ s1_track_counts_off
+ s1_ff
+ s1_table_stats
+ s1_track_counts_on
+permutation
+ s1_table_select
+ s1_ff
+ s1_track_counts_off
+ s1_table_stats
+ s1_track_counts_on
+
+# ensure we don't count anything with stats off
+permutation
+ s1_track_counts_off
+ s1_table_select
+ s1_table_insert_k1
+ s1_table_update_k1
+ s2_table_select
+ s1_track_counts_on
+ s1_ff s2_ff
+ s1_table_stats
+ # but can count again after
+ s1_table_select
+ s1_table_update_k1
+ s1_ff
+ s1_table_stats
+permutation
+ s1_table_select
+ s1_table_insert_k1
+ s1_table_delete_k1
+ s1_track_counts_off
+ s1_table_select
+ s1_table_insert_k1
+ s1_table_update_k1
+ s2_table_select
+ s1_track_counts_on
+ s1_ff s2_ff
+ s1_table_stats
+ s1_table_select
+ s1_table_update_k1
+ s1_ff
+ s1_table_stats
+
+
+### 2PC: transactional and non-transactional counters work correctly
+
+# S1 prepares, S2 commits prepared
+permutation
+ s1_begin
+ s1_table_insert s1_table_update_k1 s1_table_update_k1 s1_table_update_k2 s1_table_update_k2 s1_table_update_k2 s1_table_delete_k1
+ s1_table_select
+ s1_prepare_a
+ s1_table_select
+ s1_commit_prepared_a
+ s1_table_select
+ s1_ff
+ s1_table_stats
+
+# S1 prepares, S2 commits prepared
+permutation
+ s1_begin
+ s1_table_insert s1_table_update_k1 s1_table_update_k1 s1_table_update_k2 s1_table_update_k2 s1_table_update_k2 s1_table_delete_k1
+ s1_table_select
+ s1_prepare_a
+ s1_table_select
+ s2_commit_prepared_a
+ s1_table_select
+ s1_ff s2_ff
+ s1_table_stats
+
+# S1 prepares, S2 commits prepared
+permutation
+ s1_begin
+ s1_table_insert s1_table_update_k1 s1_table_update_k1 s1_table_update_k2 s1_table_update_k2 s1_table_update_k2 s1_table_delete_k1
+ s1_table_select
+ s1_prepare_a
+ s1_table_select
+ s1_rollback_prepared_a
+ s1_table_select
+ s1_ff
+ s1_table_stats
+
+# S1 prepares, S1 aborts prepared
+permutation
+ s1_begin
+ s1_table_insert s1_table_update_k1 s1_table_update_k1 s1_table_update_k2 s1_table_update_k2 s1_table_update_k2 s1_table_delete_k1
+ s1_table_select
+ s1_prepare_a
+ s1_table_select
+ s2_rollback_prepared_a
+ s1_table_select
+ s1_ff s2_ff
+ s1_table_stats
+
+
+### 2PC: truncate handling
+
+# S1 prepares, S1 commits prepared
+permutation
+ s1_table_insert
+ s1_begin
+ s1_table_update_k1 # should *not* be counted, different rel
+ s1_table_update_k1 # dito
+ s1_table_truncate
+ s1_table_insert_k1 # should be counted
+ s1_table_update_k1 # dito
+ s1_prepare_a
+ s1_commit_prepared_a
+ s1_ff
+ s1_table_stats
+
+# S1 prepares, S2 commits prepared
+permutation
+ s1_table_insert
+ s1_begin
+ s1_table_update_k1 # should *not* be counted, different rel
+ s1_table_update_k1 # dito
+ s1_table_truncate
+ s1_table_insert_k1 # should be counted
+ s1_table_update_k1 # dito
+ s1_prepare_a
+ s1_ff # flush out non-transactional stats, might happen anyway
+ s2_commit_prepared_a
+ s2_ff
+ s1_table_stats
+
+# S1 prepares, S1 aborts prepared
+permutation
+ s1_table_insert
+ s1_begin
+ s1_table_update_k1 # should be counted
+ s1_table_update_k1 # dito
+ s1_table_truncate
+ s1_table_insert_k1 # should *not* be counted, different rel
+ s1_table_update_k1 # dito
+ s1_prepare_a
+ s1_rollback_prepared_a
+ s1_ff
+ s1_table_stats
+
+# S1 prepares, S2 aborts prepared
+permutation
+ s1_table_insert
+ s1_begin
+ s1_table_update_k1 # should be counted
+ s1_table_update_k1 # dito
+ s1_table_truncate
+ s1_table_insert_k1 # should *not* be counted, different rel
+ s1_table_update_k1 # dito
+ s1_prepare_a
+ s2_rollback_prepared_a
+ s1_ff s2_ff
+ s1_table_stats
+
+
+### 2PC: rolled back drop maintains live / dead counters
+
+# S1 prepares, S1 aborts prepared
+permutation
+ s1_table_insert
+ s1_table_update_k1
+ s1_begin
+ # should all be counted
+ s1_table_delete_k1
+ s1_table_insert_k1
+ s1_table_update_k1
+ s1_table_update_k1
+ s1_table_drop
+ s1_prepare_a
+ s1_rollback_prepared_a
+ s1_ff
+ s1_table_stats
+
+# S1 prepares, S1 aborts prepared
+permutation
+ s1_table_insert
+ s1_table_update_k1
+ s1_begin
+ # should all be counted
+ s1_table_delete_k1
+ s1_table_insert_k1
+ s1_table_update_k1
+ s1_table_update_k1
+ s1_table_drop
+ s1_prepare_a
+ s2_rollback_prepared_a
+ s1_ff s2_ff
+ s1_table_stats
+
+
+######################
+# SLRU stats tests
+######################
+
+# Verify SLRU stats generated in own transaction
+permutation
+ s1_slru_save_stats
+ s1_listen
+ s1_begin
+ s1_big_notify
+ s1_ff
+ s1_slru_check_stats
+ s1_commit
+ s1_slru_check_stats
+
+# Verify SLRU stats generated in separate transaction
+permutation
+ s1_slru_save_stats
+ s1_listen
+ s2_big_notify
+ s2_ff
+ s1_slru_check_stats
+
+# shouldn't see stats yet, not committed
+permutation
+ s1_slru_save_stats
+ s1_listen
+ s2_begin
+ s2_big_notify
+ s2_ff
+ s1_slru_check_stats
+ s2_commit
+
+
+### Check the different snapshot consistency models for fixed-amount statistics
+
+permutation
+ s1_fetch_consistency_none
+ s1_slru_save_stats s1_listen
+ s1_begin
+ s1_slru_check_stats
+ s2_big_notify
+ s2_ff
+ s1_slru_check_stats
+ s1_commit
+ s1_slru_check_stats
+permutation
+ s1_fetch_consistency_cache
+ s1_slru_save_stats s1_listen
+ s1_begin
+ s1_slru_check_stats
+ s2_big_notify
+ s2_ff
+ s1_slru_check_stats
+ s1_commit
+ s1_slru_check_stats
+permutation
+ s1_fetch_consistency_snapshot
+ s1_slru_save_stats s1_listen
+ s1_begin
+ s1_slru_check_stats
+ s2_big_notify
+ s2_ff
+ s1_slru_check_stats
+ s1_commit
+ s1_slru_check_stats
+
+# check that pg_stat_clear_snapshot(), well ...
+permutation
+ s1_fetch_consistency_none
+ s1_slru_save_stats s1_listen
+ s1_begin
+ s1_slru_check_stats
+ s2_big_notify
+ s2_ff
+ s1_slru_check_stats
+ s1_clear_snapshot
+ s1_slru_check_stats
+ s1_commit
+permutation
+ s1_fetch_consistency_cache
+ s1_slru_save_stats s1_listen
+ s1_begin
+ s1_slru_check_stats
+ s2_big_notify
+ s2_ff
+ s1_slru_check_stats
+ s1_clear_snapshot
+ s1_slru_check_stats
+ s1_commit
+permutation
+ s1_fetch_consistency_snapshot
+ s1_slru_save_stats s1_listen
+ s1_begin
+ s1_slru_check_stats
+ s2_big_notify
+ s2_ff
+ s1_slru_check_stats
+ s1_clear_snapshot
+ s1_slru_check_stats
+ s1_commit
+
+# check that a variable-amount stats access caches fixed-amount stat too
+permutation
+ s1_fetch_consistency_snapshot
+ s1_slru_save_stats s1_listen
+ s1_begin
+ s1_func_stats
+ s2_big_notify
+ s2_ff
+ s1_slru_check_stats
+ s1_commit
+
+# and the other way round
+permutation
+ s1_fetch_consistency_snapshot
+ s1_slru_save_stats s1_listen
+ s1_begin
+ s2_big_notify
+ s2_ff
+ s1_slru_check_stats
+ s2_func_call
+ s2_ff
+ s1_func_stats
+ s1_clear_snapshot
+ s1_func_stats
+ s1_commit
diff --git a/src/test/isolation/specs/subxid-overflow.spec b/src/test/isolation/specs/subxid-overflow.spec
new file mode 100644
index 0000000..9a69db4
--- /dev/null
+++ b/src/test/isolation/specs/subxid-overflow.spec
@@ -0,0 +1,79 @@
+# Subtransaction overflow
+#
+# This test is designed to cover some code paths which only occur when
+# one transaction has overflowed the subtransaction cache.
+
+setup
+{
+DROP TABLE IF EXISTS subxids;
+CREATE TABLE subxids (subx integer, val integer);
+
+CREATE OR REPLACE FUNCTION gen_subxids (n integer)
+ RETURNS VOID
+ LANGUAGE plpgsql
+AS $$
+BEGIN
+ IF n <= 0 THEN
+ UPDATE subxids SET val = 1 WHERE subx = 0;
+ RETURN;
+ ELSE
+ PERFORM gen_subxids(n - 1);
+ RETURN;
+ END IF;
+EXCEPTION /* generates a subxid */
+ WHEN raise_exception THEN NULL;
+END;
+$$;
+}
+
+teardown
+{
+ DROP TABLE subxids;
+ DROP FUNCTION gen_subxids(integer);
+}
+
+session s1
+# setup step for each test
+step ins { TRUNCATE subxids; INSERT INTO subxids VALUES (0, 0); }
+# long running transaction with overflowed subxids
+step subxov { BEGIN; SELECT gen_subxids(100); }
+# commit should always come last to make this long running
+step s1c { COMMIT; }
+
+session s2
+# move xmax forwards
+step xmax { BEGIN; INSERT INTO subxids VALUES (99, 0); COMMIT;}
+
+# step for test1
+step s2sel { SELECT val FROM subxids WHERE subx = 0; }
+
+# steps for test2
+step s2brr { BEGIN ISOLATION LEVEL REPEATABLE READ; }
+step s2brc { BEGIN ISOLATION LEVEL READ COMMITTED; }
+# look for data written by sub3
+step s2s3 { SELECT val FROM subxids WHERE subx = 1; }
+step s2c { COMMIT; }
+
+# step for test3
+step s2upd { UPDATE subxids SET val = 1 WHERE subx = 0; }
+
+session s3
+# transaction with subxids that can commit before s1c
+step sub3 { BEGIN; SAVEPOINT s; INSERT INTO subxids VALUES (1, 0); }
+step s3c { COMMIT; }
+
+# test1
+# s2sel will see subxid as still running
+# designed to test XidInMVCCSnapshot() when overflows, xid is found
+permutation ins subxov xmax s2sel s1c
+
+# test2
+# designed to test XidInMVCCSnapshot() when overflows, xid is not found
+# both SELECTs invisible
+permutation ins subxov sub3 xmax s2brr s2s3 s3c s2s3 s2c s1c
+# 2nd SELECT visible after commit
+permutation ins subxov sub3 xmax s2brc s2s3 s3c s2s3 s2c s1c
+
+# test3
+# designed to test XactLockTableWait() for overflows
+permutation ins subxov xmax s2upd s1c
diff --git a/src/test/isolation/specs/temp-schema-cleanup.spec b/src/test/isolation/specs/temp-schema-cleanup.spec
new file mode 100644
index 0000000..a9417b7
--- /dev/null
+++ b/src/test/isolation/specs/temp-schema-cleanup.spec
@@ -0,0 +1,85 @@
+# Test cleanup of objects in temporary schema.
+
+setup {
+ CREATE TABLE s1_temp_schema(oid oid);
+ -- to help create a long function
+ CREATE FUNCTION exec(p_foo text) RETURNS void LANGUAGE plpgsql AS $$BEGIN EXECUTE p_foo; END;$$;
+}
+
+teardown {
+ DROP TABLE s1_temp_schema;
+ DROP FUNCTION exec(text);
+}
+
+session "s1"
+setup {
+ CREATE TEMPORARY TABLE just_to_create_temp_schema();
+ DROP TABLE just_to_create_temp_schema;
+ INSERT INTO s1_temp_schema SELECT pg_my_temp_schema();
+}
+
+step s1_advisory {
+ SELECT pg_advisory_lock('pg_namespace'::regclass::int8);
+}
+
+step s1_create_temp_objects {
+
+ -- create function large enough to be toasted, to ensure we correctly clean those up, a prior bug
+ -- https://postgr.es/m/CAOFAq3BU5Mf2TTvu8D9n_ZOoFAeQswuzk7yziAb7xuw_qyw5gw%40mail.gmail.com
+ SELECT exec(format($outer$
+ CREATE OR REPLACE FUNCTION pg_temp.long() RETURNS text LANGUAGE sql AS $body$ SELECT %L; $body$$outer$,
+ (SELECT string_agg(g.i::text||':'||random()::text, '|') FROM generate_series(1, 100) g(i))));
+
+ -- The above bug requirs function removal to happen after a catalog
+ -- invalidation. dependency.c sorts objects in descending oid order so
+ -- that newer objects are deleted before older objects, so create a
+ -- table after.
+ CREATE TEMPORARY TABLE invalidate_catalog_cache();
+
+ -- test non-temp function is dropped when depending on temp table
+ CREATE TEMPORARY TABLE just_give_me_a_type(id serial primary key);
+
+ CREATE FUNCTION uses_a_temp_type(just_give_me_a_type) RETURNS int LANGUAGE sql AS $$SELECT 1;$$;
+}
+
+step s1_discard_temp {
+ DISCARD TEMP;
+}
+
+step s1_exit {
+ SELECT pg_terminate_backend(pg_backend_pid());
+}
+
+
+session "s2"
+
+step s2_advisory {
+ SELECT pg_advisory_lock('pg_namespace'::regclass::int8);
+}
+
+step s2_check_schema {
+ SELECT oid::regclass FROM pg_class WHERE relnamespace = (SELECT oid FROM s1_temp_schema);
+ SELECT oid::regproc FROM pg_proc WHERE pronamespace = (SELECT oid FROM s1_temp_schema);
+ SELECT oid::regproc FROM pg_type WHERE typnamespace = (SELECT oid FROM s1_temp_schema);
+}
+
+
+# Test temporary object cleanup during DISCARD.
+permutation
+ s1_create_temp_objects
+ s1_discard_temp
+ s2_check_schema
+
+# Test temporary object cleanup during process exit.
+#
+# To check (in s2) if temporary objects (in s1) have properly been removed we
+# need to wait for s1 to finish cleaning up. Luckily session level advisory
+# locks are released only after temp table cleanup.
+permutation
+ s1_advisory
+ s2_advisory
+ s1_create_temp_objects
+ s1_exit
+ s2_check_schema
+
+# Can't run further tests here, because s1's connection is dead
diff --git a/src/test/isolation/specs/temporal-range-integrity.spec b/src/test/isolation/specs/temporal-range-integrity.spec
new file mode 100644
index 0000000..2d4c59c
--- /dev/null
+++ b/src/test/isolation/specs/temporal-range-integrity.spec
@@ -0,0 +1,38 @@
+# Temporal Range Integrity test
+#
+# Snapshot integrity fails with simple referential integrity tests,
+# but those don't make for good demonstrations because people just
+# say that foreign key definitions should be used instead. There
+# are many integrity tests which are conceptually very similar but
+# don't have built-in support which will fail when used in triggers.
+# This is intended to illustrate such cases. It is obviously very
+# hard to exercise all these permutations when the code is actually
+# in a trigger; this test pulls what would normally be inside of
+# triggers out to the top level to control the permutations.
+#
+# Any overlap between the transactions must cause a serialization failure.
+
+
+setup
+{
+ CREATE TABLE statute (statute_cite text NOT NULL, eff_date date NOT NULL, exp_date date, CONSTRAINT statute_pkey PRIMARY KEY (statute_cite, eff_date));
+ INSERT INTO statute VALUES ('123.45(1)a', DATE '2008-01-01', NULL);
+ CREATE TABLE offense (offense_no int NOT NULL, statute_cite text NOT NULL, offense_date date NOT NULL, CONSTRAINT offense_pkey PRIMARY KEY (offense_no));
+}
+
+teardown
+{
+ DROP TABLE statute, offense;
+}
+
+session s1
+setup { BEGIN ISOLATION LEVEL SERIALIZABLE; }
+step rx1 { SELECT count(*) FROM statute WHERE statute_cite = '123.45(1)a' AND eff_date <= DATE '2009-05-15' AND (exp_date IS NULL OR exp_date > DATE '2009-05-15'); }
+step wy1 { INSERT INTO offense VALUES (1, '123.45(1)a', DATE '2009-05-15'); }
+step c1 { COMMIT; }
+
+session s2
+setup { BEGIN ISOLATION LEVEL SERIALIZABLE; }
+step ry2 { SELECT count(*) FROM offense WHERE statute_cite = '123.45(1)a' AND offense_date >= DATE '2008-01-01'; }
+step wx2 { DELETE FROM statute WHERE statute_cite = '123.45(1)a' AND eff_date = DATE '2008-01-01'; }
+step c2 { COMMIT; }
diff --git a/src/test/isolation/specs/timeouts.spec b/src/test/isolation/specs/timeouts.spec
new file mode 100644
index 0000000..c747b4a
--- /dev/null
+++ b/src/test/isolation/specs/timeouts.spec
@@ -0,0 +1,49 @@
+# Simple tests for statement_timeout and lock_timeout features
+
+setup
+{
+ CREATE TABLE accounts (accountid text PRIMARY KEY, balance numeric not null);
+ INSERT INTO accounts VALUES ('checking', 600), ('savings', 600);
+}
+
+teardown
+{
+ DROP TABLE accounts;
+}
+
+session s1
+setup { BEGIN ISOLATION LEVEL READ COMMITTED; }
+step rdtbl { SELECT * FROM accounts; }
+step wrtbl { UPDATE accounts SET balance = balance + 100; }
+teardown { ABORT; }
+
+session s2
+setup { BEGIN ISOLATION LEVEL READ COMMITTED; }
+step sto { SET statement_timeout = '10ms'; }
+step lto { SET lock_timeout = '10ms'; }
+step lsto { SET lock_timeout = '10ms'; SET statement_timeout = '10s'; }
+step slto { SET lock_timeout = '10s'; SET statement_timeout = '10ms'; }
+step locktbl { LOCK TABLE accounts; }
+step update { DELETE FROM accounts WHERE accountid = 'checking'; }
+teardown { ABORT; }
+
+# It's possible that the isolation tester will not observe the final
+# steps as "waiting", thanks to the relatively short timeouts we use.
+# We can ensure consistent test output by marking those steps with (*).
+
+# statement timeout, table-level lock
+permutation rdtbl sto locktbl(*)
+# lock timeout, table-level lock
+permutation rdtbl lto locktbl(*)
+# lock timeout expires first, table-level lock
+permutation rdtbl lsto locktbl(*)
+# statement timeout expires first, table-level lock
+permutation rdtbl slto locktbl(*)
+# statement timeout, row-level lock
+permutation wrtbl sto update(*)
+# lock timeout, row-level lock
+permutation wrtbl lto update(*)
+# lock timeout expires first, row-level lock
+permutation wrtbl lsto update(*)
+# statement timeout expires first, row-level lock
+permutation wrtbl slto update(*)
diff --git a/src/test/isolation/specs/total-cash.spec b/src/test/isolation/specs/total-cash.spec
new file mode 100644
index 0000000..d98121a
--- /dev/null
+++ b/src/test/isolation/specs/total-cash.spec
@@ -0,0 +1,28 @@
+# Total Cash test
+#
+# Another famous test of snapshot isolation anomaly.
+#
+# Any overlap between the transactions must cause a serialization failure.
+
+setup
+{
+ CREATE TABLE accounts (accountid text NOT NULL PRIMARY KEY, balance numeric not null);
+ INSERT INTO accounts VALUES ('checking', 600),('savings',600);
+}
+
+teardown
+{
+ DROP TABLE accounts;
+}
+
+session s1
+setup { BEGIN ISOLATION LEVEL SERIALIZABLE; }
+step wx1 { UPDATE accounts SET balance = balance - 200 WHERE accountid = 'checking'; }
+step rxy1 { SELECT SUM(balance) FROM accounts; }
+step c1 { COMMIT; }
+
+session s2
+setup { BEGIN ISOLATION LEVEL SERIALIZABLE; }
+step wy2 { UPDATE accounts SET balance = balance - 200 WHERE accountid = 'savings'; }
+step rxy2 { SELECT SUM(balance) FROM accounts; }
+step c2 { COMMIT; }
diff --git a/src/test/isolation/specs/truncate-conflict.spec b/src/test/isolation/specs/truncate-conflict.spec
new file mode 100644
index 0000000..0f77ff0
--- /dev/null
+++ b/src/test/isolation/specs/truncate-conflict.spec
@@ -0,0 +1,38 @@
+# Tests for locking conflicts with TRUNCATE commands.
+
+setup
+{
+ CREATE ROLE regress_truncate_conflict;
+ CREATE TABLE truncate_tab (a int);
+}
+
+teardown
+{
+ DROP TABLE truncate_tab;
+ DROP ROLE regress_truncate_conflict;
+}
+
+session s1
+step s1_begin { BEGIN; }
+step s1_tab_lookup { SELECT count(*) >= 0 FROM truncate_tab; }
+step s1_commit { COMMIT; }
+
+session s2
+step s2_grant { GRANT TRUNCATE ON truncate_tab TO regress_truncate_conflict; }
+step s2_auth { SET ROLE regress_truncate_conflict; }
+step s2_truncate { TRUNCATE truncate_tab; }
+step s2_reset { RESET ROLE; }
+
+# The role doesn't have privileges to truncate the table, so TRUNCATE should
+# immediately fail without waiting for a lock.
+permutation s1_begin s1_tab_lookup s2_auth s2_truncate s1_commit s2_reset
+permutation s1_begin s2_auth s2_truncate s1_tab_lookup s1_commit s2_reset
+permutation s1_begin s2_auth s1_tab_lookup s2_truncate s1_commit s2_reset
+permutation s2_auth s2_truncate s1_begin s1_tab_lookup s1_commit s2_reset
+
+# The role has privileges to truncate the table, TRUNCATE will block if
+# another session holds a lock on the table and succeed in all cases.
+permutation s1_begin s1_tab_lookup s2_grant s2_auth s2_truncate s1_commit s2_reset
+permutation s1_begin s2_grant s2_auth s2_truncate s1_tab_lookup s1_commit s2_reset
+permutation s1_begin s2_grant s2_auth s1_tab_lookup s2_truncate s1_commit s2_reset
+permutation s2_grant s2_auth s2_truncate s1_begin s1_tab_lookup s1_commit s2_reset
diff --git a/src/test/isolation/specs/tuplelock-conflict.spec b/src/test/isolation/specs/tuplelock-conflict.spec
new file mode 100644
index 0000000..8558230
--- /dev/null
+++ b/src/test/isolation/specs/tuplelock-conflict.spec
@@ -0,0 +1,63 @@
+# Here we verify that tuple lock levels conform to their documented
+# conflict tables.
+
+setup {
+ DROP TABLE IF EXISTS multixact_conflict;
+ CREATE TABLE multixact_conflict (a int primary key);
+ INSERT INTO multixact_conflict VALUES (1);
+}
+
+teardown {
+ DROP TABLE multixact_conflict;
+}
+
+session s1
+step s1_begin { BEGIN; }
+step s1_lcksvpt { SELECT * FROM multixact_conflict FOR KEY SHARE; SAVEPOINT foo; }
+step s1_tuplock1 { SELECT * FROM multixact_conflict FOR KEY SHARE; }
+step s1_tuplock2 { SELECT * FROM multixact_conflict FOR SHARE; }
+step s1_tuplock3 { SELECT * FROM multixact_conflict FOR NO KEY UPDATE; }
+step s1_tuplock4 { SELECT * FROM multixact_conflict FOR UPDATE; }
+step s1_commit { COMMIT; }
+
+session s2
+step s2_tuplock1 { SELECT * FROM multixact_conflict FOR KEY SHARE; }
+step s2_tuplock2 { SELECT * FROM multixact_conflict FOR SHARE; }
+step s2_tuplock3 { SELECT * FROM multixact_conflict FOR NO KEY UPDATE; }
+step s2_tuplock4 { SELECT * FROM multixact_conflict FOR UPDATE; }
+
+# The version with savepoints test the multixact cases
+permutation s1_begin s1_lcksvpt s1_tuplock1 s2_tuplock1 s1_commit
+permutation s1_begin s1_lcksvpt s1_tuplock1 s2_tuplock2 s1_commit
+permutation s1_begin s1_lcksvpt s1_tuplock1 s2_tuplock3 s1_commit
+permutation s1_begin s1_lcksvpt s1_tuplock1 s2_tuplock4 s1_commit
+permutation s1_begin s1_lcksvpt s1_tuplock2 s2_tuplock1 s1_commit
+permutation s1_begin s1_lcksvpt s1_tuplock2 s2_tuplock2 s1_commit
+permutation s1_begin s1_lcksvpt s1_tuplock2 s2_tuplock3 s1_commit
+permutation s1_begin s1_lcksvpt s1_tuplock2 s2_tuplock4 s1_commit
+permutation s1_begin s1_lcksvpt s1_tuplock3 s2_tuplock1 s1_commit
+permutation s1_begin s1_lcksvpt s1_tuplock3 s2_tuplock2 s1_commit
+permutation s1_begin s1_lcksvpt s1_tuplock3 s2_tuplock3 s1_commit
+permutation s1_begin s1_lcksvpt s1_tuplock3 s2_tuplock4 s1_commit
+permutation s1_begin s1_lcksvpt s1_tuplock4 s2_tuplock1 s1_commit
+permutation s1_begin s1_lcksvpt s1_tuplock4 s2_tuplock2 s1_commit
+permutation s1_begin s1_lcksvpt s1_tuplock4 s2_tuplock3 s1_commit
+permutation s1_begin s1_lcksvpt s1_tuplock4 s2_tuplock4 s1_commit
+
+# no multixacts here
+permutation s1_begin s1_tuplock1 s2_tuplock1 s1_commit
+permutation s1_begin s1_tuplock1 s2_tuplock2 s1_commit
+permutation s1_begin s1_tuplock1 s2_tuplock3 s1_commit
+permutation s1_begin s1_tuplock1 s2_tuplock4 s1_commit
+permutation s1_begin s1_tuplock2 s2_tuplock1 s1_commit
+permutation s1_begin s1_tuplock2 s2_tuplock2 s1_commit
+permutation s1_begin s1_tuplock2 s2_tuplock3 s1_commit
+permutation s1_begin s1_tuplock2 s2_tuplock4 s1_commit
+permutation s1_begin s1_tuplock3 s2_tuplock1 s1_commit
+permutation s1_begin s1_tuplock3 s2_tuplock2 s1_commit
+permutation s1_begin s1_tuplock3 s2_tuplock3 s1_commit
+permutation s1_begin s1_tuplock3 s2_tuplock4 s1_commit
+permutation s1_begin s1_tuplock4 s2_tuplock1 s1_commit
+permutation s1_begin s1_tuplock4 s2_tuplock2 s1_commit
+permutation s1_begin s1_tuplock4 s2_tuplock3 s1_commit
+permutation s1_begin s1_tuplock4 s2_tuplock4 s1_commit
diff --git a/src/test/isolation/specs/tuplelock-partition.spec b/src/test/isolation/specs/tuplelock-partition.spec
new file mode 100644
index 0000000..c267b28
--- /dev/null
+++ b/src/test/isolation/specs/tuplelock-partition.spec
@@ -0,0 +1,32 @@
+# Test tuple locking on INSERT ON CONFLICT UPDATE on a partitioned table.
+
+setup
+{
+ DROP TABLE IF EXISTS parttab;
+ CREATE TABLE parttab (col1 text, key INTEGER PRIMARY KEY, col2 text) PARTITION BY LIST (key);
+ CREATE TABLE parttab1 (key INTEGER PRIMARY KEY, col1 text, col2 text);
+ CREATE TABLE parttab2 (key INTEGER PRIMARY KEY, col1 text, col2 text);
+ ALTER TABLE parttab ATTACH PARTITION parttab1 FOR VALUES IN (1);
+ ALTER TABLE parttab ATTACH PARTITION parttab2 FOR VALUES IN (2);
+ INSERT INTO parttab (key, col1, col2) VALUES (1, 'a', 'b');
+}
+
+teardown
+{
+ DROP TABLE parttab;
+}
+
+session s1
+step s1b { BEGIN; }
+step s1update_nokey { INSERT INTO parttab (key, col1, col2) VALUES (1, 'a', 'b') ON CONFLICT (key) DO UPDATE SET col1 = 'x', col2 = 'y'; }
+step s1update_key { INSERT INTO parttab (key, col1, col2) VALUES (1, 'a', 'b') ON CONFLICT (key) DO UPDATE SET key=1; }
+step s1c { COMMIT; }
+
+session s2
+step s2locktuple { SELECT * FROM parttab FOR KEY SHARE; }
+
+# INSERT ON CONFLICT UPDATE, performs an UPDATE on non-key columns
+permutation s1b s1update_nokey s2locktuple s1c
+
+# INSERT ON CONFLICT UPDATE, performs an UPDATE on key column
+permutation s1b s1update_key s2locktuple s1c
diff --git a/src/test/isolation/specs/tuplelock-update.spec b/src/test/isolation/specs/tuplelock-update.spec
new file mode 100644
index 0000000..4b940bc
--- /dev/null
+++ b/src/test/isolation/specs/tuplelock-update.spec
@@ -0,0 +1,37 @@
+setup {
+ DROP TABLE IF EXISTS pktab;
+ CREATE TABLE pktab (id int PRIMARY KEY, data SERIAL NOT NULL);
+ INSERT INTO pktab VALUES (1, DEFAULT);
+}
+
+teardown {
+ DROP TABLE pktab;
+}
+
+session s1
+step s1_advlock {
+ SELECT pg_advisory_lock(142857),
+ pg_advisory_lock(285714),
+ pg_advisory_lock(571428);
+ }
+step s1_chain { UPDATE pktab SET data = DEFAULT; }
+step s1_begin { BEGIN; }
+step s1_grablock { SELECT * FROM pktab FOR KEY SHARE; }
+step s1_advunlock1 { SELECT pg_advisory_unlock(142857); }
+step s1_advunlock2 { SELECT pg_advisory_unlock(285714); }
+step s1_advunlock3 { SELECT pg_advisory_unlock(571428); }
+step s1_commit { COMMIT; }
+
+session s2
+step s2_update { UPDATE pktab SET data = DEFAULT WHERE pg_advisory_lock_shared(142857) IS NOT NULL; }
+
+session s3
+step s3_update { UPDATE pktab SET data = DEFAULT WHERE pg_advisory_lock_shared(285714) IS NOT NULL; }
+
+session s4
+step s4_update { UPDATE pktab SET data = DEFAULT WHERE pg_advisory_lock_shared(571428) IS NOT NULL; }
+
+# We use blocker annotations on the s1_advunlockN steps so that we will not
+# move on to the next step until the other session's released step finishes.
+# This ensures stable ordering of the test output.
+permutation s1_advlock s2_update s3_update s4_update s1_chain s1_begin s1_grablock s1_advunlock1(s2_update) s1_advunlock2(s3_update) s1_advunlock3(s4_update) s1_commit
diff --git a/src/test/isolation/specs/tuplelock-upgrade-no-deadlock.spec b/src/test/isolation/specs/tuplelock-upgrade-no-deadlock.spec
new file mode 100644
index 0000000..6221a27
--- /dev/null
+++ b/src/test/isolation/specs/tuplelock-upgrade-no-deadlock.spec
@@ -0,0 +1,69 @@
+# This test checks that multiple sessions locking a single row in a table
+# do not deadlock each other when one of them upgrades its existing lock
+# while the others are waiting for it.
+
+setup
+{
+ drop table if exists tlu_job;
+ create table tlu_job (id integer primary key, name text);
+
+ insert into tlu_job values(1, 'a');
+}
+
+
+teardown
+{
+ drop table tlu_job;
+}
+
+session s0
+step s0_begin { begin; }
+step s0_keyshare { select id from tlu_job where id = 1 for key share;}
+step s0_rollback { rollback; }
+
+session s1
+setup { begin; }
+step s1_keyshare { select id from tlu_job where id = 1 for key share;}
+step s1_share { select id from tlu_job where id = 1 for share; }
+step s1_fornokeyupd { select id from tlu_job where id = 1 for no key update; }
+step s1_update { update tlu_job set name = 'b' where id = 1; }
+step s1_savept_e { savepoint s1_e; }
+step s1_savept_f { savepoint s1_f; }
+step s1_rollback_e { rollback to s1_e; }
+step s1_rollback_f { rollback to s1_f; }
+step s1_rollback { rollback; }
+step s1_commit { commit; }
+
+session s2
+setup { begin; }
+step s2_for_keyshare { select id from tlu_job where id = 1 for key share; }
+step s2_fornokeyupd { select id from tlu_job where id = 1 for no key update; }
+step s2_for_update { select id from tlu_job where id = 1 for update; }
+step s2_update { update tlu_job set name = 'b' where id = 1; }
+step s2_delete { delete from tlu_job where id = 1; }
+step s2_rollback { rollback; }
+
+session s3
+setup { begin; }
+step s3_keyshare { select id from tlu_job where id = 1 for key share; }
+step s3_share { select id from tlu_job where id = 1 for share; }
+step s3_for_update { select id from tlu_job where id = 1 for update; }
+step s3_update { update tlu_job set name = 'c' where id = 1; }
+step s3_delete { delete from tlu_job where id = 1; }
+step s3_rollback { rollback; }
+step s3_commit { commit; }
+
+# test that s2 will not deadlock with s3 when s1 is rolled back
+permutation s1_share s2_for_update s3_share s3_for_update s1_rollback s3_rollback s2_rollback
+# test that update does not cause deadlocks if it can proceed
+permutation s1_keyshare s2_for_update s3_keyshare s1_update s3_update s1_rollback s3_rollback s2_rollback
+permutation s1_keyshare s2_for_update s3_keyshare s1_update s3_update s1_commit s3_rollback s2_rollback
+# test that delete does not cause deadlocks if it can proceed
+permutation s1_keyshare s2_for_update s3_keyshare s3_delete s1_rollback s3_rollback s2_rollback
+permutation s1_keyshare s2_for_update s3_keyshare s3_delete s1_rollback s3_commit s2_rollback
+# test that sessions that don't upgrade locks acquire them in order
+permutation s1_share s2_for_update s3_for_update s1_rollback s2_rollback s3_rollback
+permutation s1_share s2_update s3_update s1_rollback s2_rollback s3_rollback
+permutation s1_share s2_delete s3_delete s1_rollback s2_rollback s3_rollback
+# test s2 retrying the overall tuple lock algorithm after initially avoiding deadlock
+permutation s1_keyshare s3_for_update s2_for_keyshare s1_savept_e s1_share s1_savept_f s1_fornokeyupd s2_fornokeyupd s0_begin s0_keyshare s1_rollback_f s0_keyshare s1_rollback_e s1_rollback s2_rollback s0_rollback s3_rollback
diff --git a/src/test/isolation/specs/two-ids.spec b/src/test/isolation/specs/two-ids.spec
new file mode 100644
index 0000000..fc0289f
--- /dev/null
+++ b/src/test/isolation/specs/two-ids.spec
@@ -0,0 +1,40 @@
+# Two IDs test
+#
+# Small, simple test showing read-only anomalies.
+#
+# There are only four permutations which must cause a serialization failure.
+# Required failure cases are where s2 overlaps both s1 and s3, but s1
+# commits before s3 executes its first SELECT.
+#
+# If s3 were declared READ ONLY there would be no false positives.
+# With s3 defaulting to READ WRITE, we currently expect 12 false
+# positives. Further work dealing with de facto READ ONLY transactions
+# may be able to reduce or eliminate those false positives.
+
+setup
+{
+ create table D1 (id int not null);
+ create table D2 (id int not null);
+ insert into D1 values (1);
+ insert into D2 values (1);
+}
+
+teardown
+{
+ DROP TABLE D1, D2;
+}
+
+session s1
+setup { BEGIN ISOLATION LEVEL SERIALIZABLE; }
+step wx1 { update D1 set id = id + 1; }
+step c1 { COMMIT; }
+
+session s2
+setup { BEGIN ISOLATION LEVEL SERIALIZABLE; }
+step rxwy2 { update D2 set id = (select id+1 from D1); }
+step c2 { COMMIT; }
+
+session s3
+setup { BEGIN ISOLATION LEVEL SERIALIZABLE; }
+step ry3 { select id from D2; }
+step c3 { COMMIT; }
diff --git a/src/test/isolation/specs/update-conflict-out.spec b/src/test/isolation/specs/update-conflict-out.spec
new file mode 100644
index 0000000..8aad6aa
--- /dev/null
+++ b/src/test/isolation/specs/update-conflict-out.spec
@@ -0,0 +1,54 @@
+# Test for interactions between SSI's "conflict out" handling for heapam and
+# concurrently updated tuple
+#
+# See bug report:
+# https://postgr.es/m/db7b729d-0226-d162-a126-8a8ab2dc4443%40jepsen.io
+
+setup
+{
+ CREATE TABLE txn0(id int4 PRIMARY KEY, val TEXT);
+ CREATE TABLE txn1(id int4 PRIMARY KEY, val TEXT);
+}
+
+teardown
+{
+ DROP TABLE txn0;
+ DROP TABLE txn1;
+}
+
+session foo
+setup { BEGIN TRANSACTION ISOLATION LEVEL SERIALIZABLE; }
+step foo_select { SELECT * FROM txn0 WHERE id = 42; }
+step foo_insert { INSERT INTO txn1 SELECT 7, 'foo_insert'; }
+step foo_commit { COMMIT; }
+
+session bar
+setup { BEGIN TRANSACTION ISOLATION LEVEL SERIALIZABLE; }
+step bar_select { SELECT * FROM txn1 WHERE id = 7; }
+step bar_insert { INSERT INTO txn0 SELECT 42, 'bar_insert'; }
+step bar_commit { COMMIT; }
+
+# This session creates the conditions that confused bar's "conflict out"
+# handling in old releases affected by bug:
+session trouble
+setup { BEGIN TRANSACTION ISOLATION LEVEL SERIALIZABLE; }
+step trouble_update { UPDATE txn1 SET val = 'add physical version for "bar_select"' WHERE id = 7; }
+step trouble_delete { DELETE FROM txn1 WHERE id = 7; }
+step trouble_abort { ABORT; }
+
+permutation foo_select
+ bar_insert
+ foo_insert foo_commit
+ trouble_update # Updates tuple...
+ bar_select # Should observe one distinct XID per version
+ bar_commit # "bar" should fail here at the latest
+ trouble_abort
+
+# Same as above, but "trouble" session DELETEs this time around
+permutation foo_select
+ bar_insert
+ foo_insert foo_commit
+ trouble_delete # Deletes tuple...
+ bar_select # Should observe foo's XID
+ bar_commit # "bar" should fail here at the latest
+ trouble_abort
diff --git a/src/test/isolation/specs/update-locked-tuple.spec b/src/test/isolation/specs/update-locked-tuple.spec
new file mode 100644
index 0000000..0dad792
--- /dev/null
+++ b/src/test/isolation/specs/update-locked-tuple.spec
@@ -0,0 +1,38 @@
+# Test updating a locked tuple. When the lock doesn't conflict with the
+# update, no blocking nor serializability problems should occur.
+
+setup
+{
+ DROP TABLE IF EXISTS users, orders;
+ CREATE TABLE users (id INTEGER PRIMARY KEY,
+ name varchar,
+ sometime timestamp);
+ CREATE TABLE orders (id INTEGER PRIMARY KEY,
+ name varchar,
+ user_id INTEGER REFERENCES users (id));
+ INSERT INTO users (id, name) VALUES (1, 'olivier');
+ INSERT INTO orders (id, name) VALUES (1, 'order of olivier (1)');
+}
+
+teardown
+{
+ DROP TABLE users, orders;
+}
+
+session s1
+step s1b { BEGIN ISOLATION LEVEL REPEATABLE READ; }
+step s1u1 { UPDATE orders SET name = 'order of olivier (2)', user_id = 1 WHERE id = 1; }
+step s1u2 { UPDATE orders SET name = 'order of olivier (3)', user_id = 1 WHERE id = 1; }
+step s1c { COMMIT; }
+
+session s2
+step s2b { BEGIN ISOLATION LEVEL REPEATABLE READ; }
+step s2u { UPDATE users SET sometime = '1830-10-04' WHERE id = 1; }
+step s2c { COMMIT; }
+
+permutation s1b s2b s2u s2c s1u1 s1u2 s1c
+permutation s1b s2b s2u s1u1 s2c s1u2 s1c
+permutation s1b s2b s1u1 s2u s2c s1u2 s1c
+permutation s1b s1u1 s2b s2u s2c s1u2 s1c
+permutation s1b s1u1 s2b s1u2 s2u s2c s1c
+permutation s1b s1u1 s1u2 s2b s2u s2c s1c
diff --git a/src/test/isolation/specs/vacuum-concurrent-drop.spec b/src/test/isolation/specs/vacuum-concurrent-drop.spec
new file mode 100644
index 0000000..148a2d5
--- /dev/null
+++ b/src/test/isolation/specs/vacuum-concurrent-drop.spec
@@ -0,0 +1,45 @@
+# Test for log messages emitted by VACUUM and ANALYZE when a specified
+# relation is concurrently dropped.
+#
+# This also verifies that log messages are not emitted for concurrently
+# dropped relations that were not specified in the VACUUM or ANALYZE
+# command.
+
+setup
+{
+ CREATE TABLE parted (a INT) PARTITION BY LIST (a);
+ CREATE TABLE part1 PARTITION OF parted FOR VALUES IN (1);
+ CREATE TABLE part2 PARTITION OF parted FOR VALUES IN (2);
+}
+
+teardown
+{
+ DROP TABLE IF EXISTS parted;
+}
+
+session s1
+step lock
+{
+ BEGIN;
+ LOCK part1 IN SHARE MODE;
+}
+step drop_and_commit
+{
+ DROP TABLE part2;
+ COMMIT;
+}
+
+session s2
+step vac_specified { VACUUM part1, part2; }
+step vac_all_parts { VACUUM parted; }
+step analyze_specified { ANALYZE part1, part2; }
+step analyze_all_parts { ANALYZE parted; }
+step vac_analyze_specified { VACUUM ANALYZE part1, part2; }
+step vac_analyze_all_parts { VACUUM ANALYZE parted; }
+
+permutation lock vac_specified drop_and_commit
+permutation lock vac_all_parts drop_and_commit
+permutation lock analyze_specified drop_and_commit
+permutation lock analyze_all_parts drop_and_commit
+permutation lock vac_analyze_specified drop_and_commit
+permutation lock vac_analyze_all_parts drop_and_commit
diff --git a/src/test/isolation/specs/vacuum-conflict.spec b/src/test/isolation/specs/vacuum-conflict.spec
new file mode 100644
index 0000000..3cb8926
--- /dev/null
+++ b/src/test/isolation/specs/vacuum-conflict.spec
@@ -0,0 +1,51 @@
+# Tests for locking conflicts with VACUUM and ANALYZE commands.
+
+setup
+{
+ CREATE ROLE regress_vacuum_conflict;
+ CREATE TABLE vacuum_tab (a int);
+}
+
+teardown
+{
+ DROP TABLE vacuum_tab;
+ DROP ROLE regress_vacuum_conflict;
+}
+
+session s1
+step s1_begin { BEGIN; }
+step s1_lock { LOCK vacuum_tab IN SHARE UPDATE EXCLUSIVE MODE; }
+step s1_commit { COMMIT; }
+
+session s2
+step s2_grant { ALTER TABLE vacuum_tab OWNER TO regress_vacuum_conflict; }
+step s2_auth { SET ROLE regress_vacuum_conflict; }
+step s2_vacuum { VACUUM vacuum_tab; }
+step s2_analyze { ANALYZE vacuum_tab; }
+step s2_reset { RESET ROLE; }
+
+# The role doesn't have privileges to vacuum the table, so VACUUM should
+# immediately skip the table without waiting for a lock.
+permutation s1_begin s1_lock s2_auth s2_vacuum s1_commit s2_reset
+permutation s1_begin s2_auth s2_vacuum s1_lock s1_commit s2_reset
+permutation s1_begin s2_auth s1_lock s2_vacuum s1_commit s2_reset
+permutation s2_auth s2_vacuum s1_begin s1_lock s1_commit s2_reset
+
+# Same as previously for ANALYZE
+permutation s1_begin s1_lock s2_auth s2_analyze s1_commit s2_reset
+permutation s1_begin s2_auth s2_analyze s1_lock s1_commit s2_reset
+permutation s1_begin s2_auth s1_lock s2_analyze s1_commit s2_reset
+permutation s2_auth s2_analyze s1_begin s1_lock s1_commit s2_reset
+
+# The role has privileges to vacuum the table, VACUUM will block if
+# another session holds a lock on the table and succeed in all cases.
+permutation s1_begin s2_grant s1_lock s2_auth s2_vacuum s1_commit s2_reset
+permutation s1_begin s2_grant s2_auth s2_vacuum s1_lock s1_commit s2_reset
+permutation s1_begin s2_grant s2_auth s1_lock s2_vacuum s1_commit s2_reset
+permutation s2_grant s2_auth s2_vacuum s1_begin s1_lock s1_commit s2_reset
+
+# Same as previously for ANALYZE
+permutation s1_begin s2_grant s1_lock s2_auth s2_analyze s1_commit s2_reset
+permutation s1_begin s2_grant s2_auth s2_analyze s1_lock s1_commit s2_reset
+permutation s1_begin s2_grant s2_auth s1_lock s2_analyze s1_commit s2_reset
+permutation s2_grant s2_auth s2_analyze s1_begin s1_lock s1_commit s2_reset
diff --git a/src/test/isolation/specs/vacuum-no-cleanup-lock.spec b/src/test/isolation/specs/vacuum-no-cleanup-lock.spec
new file mode 100644
index 0000000..05fd280
--- /dev/null
+++ b/src/test/isolation/specs/vacuum-no-cleanup-lock.spec
@@ -0,0 +1,150 @@
+# Test for vacuum's reduced processing of heap pages (used for any heap page
+# where a cleanup lock isn't immediately available)
+#
+# Debugging tip: Change VACUUM to VACUUM VERBOSE to get feedback on what's
+# really going on
+
+# Use name type here to avoid TOAST table:
+setup
+{
+ CREATE TABLE smalltbl AS SELECT i AS id, 't'::name AS t FROM generate_series(1,20) i;
+ ALTER TABLE smalltbl SET (autovacuum_enabled = off);
+ ALTER TABLE smalltbl ADD PRIMARY KEY (id);
+}
+setup
+{
+ VACUUM ANALYZE smalltbl;
+}
+
+teardown
+{
+ DROP TABLE smalltbl;
+}
+
+# This session holds a pin on smalltbl's only heap page:
+session pinholder
+step pinholder_cursor
+{
+ BEGIN;
+ DECLARE c1 CURSOR FOR SELECT 1 AS dummy FROM smalltbl;
+ FETCH NEXT FROM c1;
+}
+step pinholder_commit
+{
+ COMMIT;
+}
+
+# This session inserts and deletes tuples, potentially affecting reltuples:
+session dml
+step dml_insert
+{
+ INSERT INTO smalltbl SELECT max(id) + 1 FROM smalltbl;
+}
+step dml_delete
+{
+ DELETE FROM smalltbl WHERE id = (SELECT min(id) FROM smalltbl);
+}
+step dml_begin { BEGIN; }
+step dml_key_share { SELECT id FROM smalltbl WHERE id = 3 FOR KEY SHARE; }
+step dml_commit { COMMIT; }
+
+# Needed for Multixact test:
+session dml_other
+step dml_other_begin { BEGIN; }
+step dml_other_key_share { SELECT id FROM smalltbl WHERE id = 3 FOR KEY SHARE; }
+step dml_other_update { UPDATE smalltbl SET t = 'u' WHERE id = 3; }
+step dml_other_commit { COMMIT; }
+
+# This session runs non-aggressive VACUUM, but with maximally aggressive
+# cutoffs for tuple freezing (e.g., FreezeLimit == OldestXmin):
+session vacuumer
+setup
+{
+ SET vacuum_freeze_min_age = 0;
+ SET vacuum_multixact_freeze_min_age = 0;
+}
+step vacuumer_nonaggressive_vacuum
+{
+ VACUUM smalltbl;
+}
+step vacuumer_pg_class_stats
+{
+ SELECT relpages, reltuples FROM pg_class WHERE oid = 'smalltbl'::regclass;
+}
+
+# Test VACUUM's reltuples counting mechanism.
+#
+# Final pg_class.reltuples should never be affected by VACUUM's inability to
+# get a cleanup lock on any page, except to the extent that any cleanup lock
+# contention changes the number of tuples that remain ("missed dead" tuples
+# are counted in reltuples, much like "recently dead" tuples).
+
+# Easy case:
+permutation
+ vacuumer_pg_class_stats # Start with 20 tuples
+ dml_insert
+ vacuumer_nonaggressive_vacuum
+ vacuumer_pg_class_stats # End with 21 tuples
+
+# Harder case -- count 21 tuples at the end (like last time), but with cleanup
+# lock contention this time:
+permutation
+ vacuumer_pg_class_stats # Start with 20 tuples
+ dml_insert
+ pinholder_cursor
+ vacuumer_nonaggressive_vacuum
+ vacuumer_pg_class_stats # End with 21 tuples
+ pinholder_commit # order doesn't matter
+
+# Same as "harder case", but vary the order, and delete an inserted row:
+permutation
+ vacuumer_pg_class_stats # Start with 20 tuples
+ pinholder_cursor
+ dml_insert
+ dml_delete
+ dml_insert
+ vacuumer_nonaggressive_vacuum
+ # reltuples is 21 here again -- "recently dead" tuple won't be included in
+ # count here:
+ vacuumer_pg_class_stats
+ pinholder_commit # order doesn't matter
+
+# Same as "harder case", but initial insert and delete before cursor:
+permutation
+ vacuumer_pg_class_stats # Start with 20 tuples
+ dml_insert
+ dml_delete
+ pinholder_cursor
+ dml_insert
+ vacuumer_nonaggressive_vacuum
+ # reltuples is 21 here again -- "missed dead" tuple ("recently dead" when
+ # concurrent activity held back VACUUM's OldestXmin) won't be included in
+ # count here:
+ vacuumer_pg_class_stats
+ pinholder_commit # order doesn't matter
+
+# Test VACUUM's mechanism for skipping MultiXact freezing.
+#
+# This provides test coverage for code paths that are only hit when we need to
+# freeze, but inability to acquire a cleanup lock on a heap page makes
+# freezing some XIDs/MXIDs < FreezeLimit/MultiXactCutoff impossible (without
+# waiting for a cleanup lock, which non-aggressive VACUUM is unwilling to do).
+permutation
+ dml_begin
+ dml_other_begin
+ dml_key_share
+ dml_other_key_share
+ # Will get cleanup lock, can't advance relminmxid yet:
+ # (though will usually advance relfrozenxid by ~2 XIDs)
+ vacuumer_nonaggressive_vacuum
+ pinholder_cursor
+ dml_other_update
+ dml_commit
+ dml_other_commit
+ # Can't cleanup lock, so still can't advance relminmxid here:
+ # (relfrozenxid held back by XIDs in MultiXact too)
+ vacuumer_nonaggressive_vacuum
+ pinholder_commit
+ # Pin was dropped, so will advance relminmxid, at long last:
+ # (ditto for relfrozenxid advancement)
+ vacuumer_nonaggressive_vacuum
diff --git a/src/test/isolation/specs/vacuum-skip-locked.spec b/src/test/isolation/specs/vacuum-skip-locked.spec
new file mode 100644
index 0000000..3fad6e1
--- /dev/null
+++ b/src/test/isolation/specs/vacuum-skip-locked.spec
@@ -0,0 +1,61 @@
+# Test for SKIP_LOCKED option of VACUUM and ANALYZE commands.
+#
+# This also verifies that log messages are not emitted for skipped relations
+# that were not specified in the VACUUM or ANALYZE command.
+
+setup
+{
+ CREATE TABLE parted (a INT) PARTITION BY LIST (a);
+ CREATE TABLE part1 PARTITION OF parted FOR VALUES IN (1);
+ ALTER TABLE part1 SET (autovacuum_enabled = false);
+ CREATE TABLE part2 PARTITION OF parted FOR VALUES IN (2);
+ ALTER TABLE part2 SET (autovacuum_enabled = false);
+}
+
+teardown
+{
+ DROP TABLE IF EXISTS parted;
+}
+
+session s1
+step lock_share
+{
+ BEGIN;
+ LOCK part1 IN SHARE MODE;
+}
+step lock_access_exclusive
+{
+ BEGIN;
+ LOCK part1 IN ACCESS EXCLUSIVE MODE;
+}
+step commit
+{
+ COMMIT;
+}
+
+session s2
+step vac_specified { VACUUM (SKIP_LOCKED) part1, part2; }
+step vac_all_parts { VACUUM (SKIP_LOCKED) parted; }
+step analyze_specified { ANALYZE (SKIP_LOCKED) part1, part2; }
+step analyze_all_parts { ANALYZE (SKIP_LOCKED) parted; }
+step vac_analyze_specified { VACUUM (ANALYZE, SKIP_LOCKED) part1, part2; }
+step vac_analyze_all_parts { VACUUM (ANALYZE, SKIP_LOCKED) parted; }
+step vac_full_specified { VACUUM (SKIP_LOCKED, FULL) part1, part2; }
+step vac_full_all_parts { VACUUM (SKIP_LOCKED, FULL) parted; }
+
+permutation lock_share vac_specified commit
+permutation lock_share vac_all_parts commit
+permutation lock_share analyze_specified commit
+permutation lock_share analyze_all_parts commit
+permutation lock_share vac_analyze_specified commit
+permutation lock_share vac_analyze_all_parts commit
+permutation lock_share vac_full_specified commit
+permutation lock_share vac_full_all_parts commit
+permutation lock_access_exclusive vac_specified commit
+permutation lock_access_exclusive vac_all_parts commit
+permutation lock_access_exclusive analyze_specified commit
+permutation lock_access_exclusive analyze_all_parts commit
+permutation lock_access_exclusive vac_analyze_specified commit
+permutation lock_access_exclusive vac_analyze_all_parts commit
+permutation lock_access_exclusive vac_full_specified commit
+permutation lock_access_exclusive vac_full_all_parts commit