summaryrefslogtreecommitdiffstats
path: root/src/test/regress/input
diff options
context:
space:
mode:
Diffstat (limited to 'src/test/regress/input')
-rw-r--r--src/test/regress/input/constraints.source554
-rw-r--r--src/test/regress/input/copy.source203
-rw-r--r--src/test/regress/input/create_function_1.source105
-rw-r--r--src/test/regress/input/create_function_2.source88
-rw-r--r--src/test/regress/input/largeobject.source263
-rw-r--r--src/test/regress/input/misc.source262
-rw-r--r--src/test/regress/input/tablespace.source288
7 files changed, 1763 insertions, 0 deletions
diff --git a/src/test/regress/input/constraints.source b/src/test/regress/input/constraints.source
new file mode 100644
index 0000000..c325b27
--- /dev/null
+++ b/src/test/regress/input/constraints.source
@@ -0,0 +1,554 @@
+--
+-- CONSTRAINTS
+-- Constraints can be specified with:
+-- - DEFAULT clause
+-- - CHECK clauses
+-- - PRIMARY KEY clauses
+-- - UNIQUE clauses
+-- - EXCLUDE clauses
+--
+
+--
+-- DEFAULT syntax
+--
+
+CREATE TABLE DEFAULT_TBL (i int DEFAULT 100,
+ x text DEFAULT 'vadim', f float8 DEFAULT 123.456);
+
+INSERT INTO DEFAULT_TBL VALUES (1, 'thomas', 57.0613);
+INSERT INTO DEFAULT_TBL VALUES (1, 'bruce');
+INSERT INTO DEFAULT_TBL (i, f) VALUES (2, 987.654);
+INSERT INTO DEFAULT_TBL (x) VALUES ('marc');
+INSERT INTO DEFAULT_TBL VALUES (3, null, 1.0);
+
+SELECT '' AS five, * FROM DEFAULT_TBL;
+
+CREATE SEQUENCE DEFAULT_SEQ;
+
+CREATE TABLE DEFAULTEXPR_TBL (i1 int DEFAULT 100 + (200-199) * 2,
+ i2 int DEFAULT nextval('default_seq'));
+
+INSERT INTO DEFAULTEXPR_TBL VALUES (-1, -2);
+INSERT INTO DEFAULTEXPR_TBL (i1) VALUES (-3);
+INSERT INTO DEFAULTEXPR_TBL (i2) VALUES (-4);
+INSERT INTO DEFAULTEXPR_TBL (i2) VALUES (NULL);
+
+SELECT '' AS four, * FROM DEFAULTEXPR_TBL;
+
+-- syntax errors
+-- test for extraneous comma
+CREATE TABLE error_tbl (i int DEFAULT (100, ));
+-- this will fail because gram.y uses b_expr not a_expr for defaults,
+-- to avoid a shift/reduce conflict that arises from NOT NULL being
+-- part of the column definition syntax:
+CREATE TABLE error_tbl (b1 bool DEFAULT 1 IN (1, 2));
+-- this should work, however:
+CREATE TABLE error_tbl (b1 bool DEFAULT (1 IN (1, 2)));
+
+DROP TABLE error_tbl;
+
+--
+-- CHECK syntax
+--
+
+CREATE TABLE CHECK_TBL (x int,
+ CONSTRAINT CHECK_CON CHECK (x > 3));
+
+INSERT INTO CHECK_TBL VALUES (5);
+INSERT INTO CHECK_TBL VALUES (4);
+INSERT INTO CHECK_TBL VALUES (3);
+INSERT INTO CHECK_TBL VALUES (2);
+INSERT INTO CHECK_TBL VALUES (6);
+INSERT INTO CHECK_TBL VALUES (1);
+
+SELECT '' AS three, * FROM CHECK_TBL;
+
+CREATE SEQUENCE CHECK_SEQ;
+
+CREATE TABLE CHECK2_TBL (x int, y text, z int,
+ CONSTRAINT SEQUENCE_CON
+ CHECK (x > 3 and y <> 'check failed' and z < 8));
+
+INSERT INTO CHECK2_TBL VALUES (4, 'check ok', -2);
+INSERT INTO CHECK2_TBL VALUES (1, 'x check failed', -2);
+INSERT INTO CHECK2_TBL VALUES (5, 'z check failed', 10);
+INSERT INTO CHECK2_TBL VALUES (0, 'check failed', -2);
+INSERT INTO CHECK2_TBL VALUES (6, 'check failed', 11);
+INSERT INTO CHECK2_TBL VALUES (7, 'check ok', 7);
+
+SELECT '' AS two, * from CHECK2_TBL;
+
+--
+-- Check constraints on INSERT
+--
+
+CREATE SEQUENCE INSERT_SEQ;
+
+CREATE TABLE INSERT_TBL (x INT DEFAULT nextval('insert_seq'),
+ y TEXT DEFAULT '-NULL-',
+ z INT DEFAULT -1 * currval('insert_seq'),
+ CONSTRAINT INSERT_TBL_CON CHECK (x >= 3 AND y <> 'check failed' AND x < 8),
+ CHECK (x + z = 0));
+
+INSERT INTO INSERT_TBL(x,z) VALUES (2, -2);
+
+SELECT '' AS zero, * FROM INSERT_TBL;
+
+SELECT 'one' AS one, nextval('insert_seq');
+
+INSERT INTO INSERT_TBL(y) VALUES ('Y');
+INSERT INTO INSERT_TBL(y) VALUES ('Y');
+INSERT INTO INSERT_TBL(x,z) VALUES (1, -2);
+INSERT INTO INSERT_TBL(z,x) VALUES (-7, 7);
+INSERT INTO INSERT_TBL VALUES (5, 'check failed', -5);
+INSERT INTO INSERT_TBL VALUES (7, '!check failed', -7);
+INSERT INTO INSERT_TBL(y) VALUES ('-!NULL-');
+
+SELECT '' AS four, * FROM INSERT_TBL;
+
+INSERT INTO INSERT_TBL(y,z) VALUES ('check failed', 4);
+INSERT INTO INSERT_TBL(x,y) VALUES (5, 'check failed');
+INSERT INTO INSERT_TBL(x,y) VALUES (5, '!check failed');
+INSERT INTO INSERT_TBL(y) VALUES ('-!NULL-');
+
+SELECT '' AS six, * FROM INSERT_TBL;
+
+SELECT 'seven' AS one, nextval('insert_seq');
+
+INSERT INTO INSERT_TBL(y) VALUES ('Y');
+
+SELECT 'eight' AS one, currval('insert_seq');
+
+-- According to SQL, it is OK to insert a record that gives rise to NULL
+-- constraint-condition results. Postgres used to reject this, but it
+-- was wrong:
+INSERT INTO INSERT_TBL VALUES (null, null, null);
+
+SELECT '' AS nine, * FROM INSERT_TBL;
+
+--
+-- Check constraints on system columns
+--
+
+CREATE TABLE SYS_COL_CHECK_TBL (city text, state text, is_capital bool,
+ altitude int,
+ CHECK (NOT (is_capital AND tableoid::regclass::text = 'sys_col_check_tbl')));
+
+INSERT INTO SYS_COL_CHECK_TBL VALUES ('Seattle', 'Washington', false, 100);
+INSERT INTO SYS_COL_CHECK_TBL VALUES ('Olympia', 'Washington', true, 100);
+
+SELECT *, tableoid::regclass::text FROM SYS_COL_CHECK_TBL;
+
+DROP TABLE SYS_COL_CHECK_TBL;
+
+--
+-- Check constraints on system columns other then TableOid should return error
+--
+CREATE TABLE SYS_COL_CHECK_TBL (city text, state text, is_capital bool,
+ altitude int,
+ CHECK (NOT (is_capital AND ctid::text = 'sys_col_check_tbl')));
+
+--
+-- Check inheritance of defaults and constraints
+--
+
+CREATE TABLE INSERT_CHILD (cx INT default 42,
+ cy INT CHECK (cy > x))
+ INHERITS (INSERT_TBL);
+
+INSERT INTO INSERT_CHILD(x,z,cy) VALUES (7,-7,11);
+INSERT INTO INSERT_CHILD(x,z,cy) VALUES (7,-7,6);
+INSERT INTO INSERT_CHILD(x,z,cy) VALUES (6,-7,7);
+INSERT INTO INSERT_CHILD(x,y,z,cy) VALUES (6,'check failed',-6,7);
+
+SELECT * FROM INSERT_CHILD;
+
+DROP TABLE INSERT_CHILD;
+
+--
+-- Check NO INHERIT type of constraints and inheritance
+--
+
+CREATE TABLE ATACC1 (TEST INT
+ CHECK (TEST > 0) NO INHERIT);
+
+CREATE TABLE ATACC2 (TEST2 INT) INHERITS (ATACC1);
+-- check constraint is not there on child
+INSERT INTO ATACC2 (TEST) VALUES (-3);
+-- check constraint is there on parent
+INSERT INTO ATACC1 (TEST) VALUES (-3);
+DROP TABLE ATACC1 CASCADE;
+
+CREATE TABLE ATACC1 (TEST INT, TEST2 INT
+ CHECK (TEST > 0), CHECK (TEST2 > 10) NO INHERIT);
+
+CREATE TABLE ATACC2 () INHERITS (ATACC1);
+-- check constraint is there on child
+INSERT INTO ATACC2 (TEST) VALUES (-3);
+-- check constraint is there on parent
+INSERT INTO ATACC1 (TEST) VALUES (-3);
+-- check constraint is not there on child
+INSERT INTO ATACC2 (TEST2) VALUES (3);
+-- check constraint is there on parent
+INSERT INTO ATACC1 (TEST2) VALUES (3);
+DROP TABLE ATACC1 CASCADE;
+
+--
+-- Check constraints on INSERT INTO
+--
+
+DELETE FROM INSERT_TBL;
+
+ALTER SEQUENCE INSERT_SEQ RESTART WITH 4;
+
+CREATE TEMP TABLE tmp (xd INT, yd TEXT, zd INT);
+
+INSERT INTO tmp VALUES (null, 'Y', null);
+INSERT INTO tmp VALUES (5, '!check failed', null);
+INSERT INTO tmp VALUES (null, 'try again', null);
+INSERT INTO INSERT_TBL(y) select yd from tmp;
+
+SELECT '' AS three, * FROM INSERT_TBL;
+
+INSERT INTO INSERT_TBL SELECT * FROM tmp WHERE yd = 'try again';
+INSERT INTO INSERT_TBL(y,z) SELECT yd, -7 FROM tmp WHERE yd = 'try again';
+INSERT INTO INSERT_TBL(y,z) SELECT yd, -8 FROM tmp WHERE yd = 'try again';
+
+SELECT '' AS four, * FROM INSERT_TBL;
+
+DROP TABLE tmp;
+
+--
+-- Check constraints on UPDATE
+--
+
+UPDATE INSERT_TBL SET x = NULL WHERE x = 5;
+UPDATE INSERT_TBL SET x = 6 WHERE x = 6;
+UPDATE INSERT_TBL SET x = -z, z = -x;
+UPDATE INSERT_TBL SET x = z, z = x;
+
+SELECT * FROM INSERT_TBL;
+
+-- DROP TABLE INSERT_TBL;
+
+--
+-- Check constraints on COPY FROM
+--
+
+CREATE TABLE COPY_TBL (x INT, y TEXT, z INT,
+ CONSTRAINT COPY_CON
+ CHECK (x > 3 AND y <> 'check failed' AND x < 7 ));
+
+COPY COPY_TBL FROM '@abs_srcdir@/data/constro.data';
+
+SELECT '' AS two, * FROM COPY_TBL;
+
+COPY COPY_TBL FROM '@abs_srcdir@/data/constrf.data';
+
+SELECT * FROM COPY_TBL;
+
+--
+-- Primary keys
+--
+
+CREATE TABLE PRIMARY_TBL (i int PRIMARY KEY, t text);
+
+INSERT INTO PRIMARY_TBL VALUES (1, 'one');
+INSERT INTO PRIMARY_TBL VALUES (2, 'two');
+INSERT INTO PRIMARY_TBL VALUES (1, 'three');
+INSERT INTO PRIMARY_TBL VALUES (4, 'three');
+INSERT INTO PRIMARY_TBL VALUES (5, 'one');
+INSERT INTO PRIMARY_TBL (t) VALUES ('six');
+
+SELECT '' AS four, * FROM PRIMARY_TBL;
+
+DROP TABLE PRIMARY_TBL;
+
+CREATE TABLE PRIMARY_TBL (i int, t text,
+ PRIMARY KEY(i,t));
+
+INSERT INTO PRIMARY_TBL VALUES (1, 'one');
+INSERT INTO PRIMARY_TBL VALUES (2, 'two');
+INSERT INTO PRIMARY_TBL VALUES (1, 'three');
+INSERT INTO PRIMARY_TBL VALUES (4, 'three');
+INSERT INTO PRIMARY_TBL VALUES (5, 'one');
+INSERT INTO PRIMARY_TBL (t) VALUES ('six');
+
+SELECT '' AS three, * FROM PRIMARY_TBL;
+
+DROP TABLE PRIMARY_TBL;
+
+--
+-- Unique keys
+--
+
+CREATE TABLE UNIQUE_TBL (i int UNIQUE, t text);
+
+INSERT INTO UNIQUE_TBL VALUES (1, 'one');
+INSERT INTO UNIQUE_TBL VALUES (2, 'two');
+INSERT INTO UNIQUE_TBL VALUES (1, 'three');
+INSERT INTO UNIQUE_TBL VALUES (4, 'four');
+INSERT INTO UNIQUE_TBL VALUES (5, 'one');
+INSERT INTO UNIQUE_TBL (t) VALUES ('six');
+INSERT INTO UNIQUE_TBL (t) VALUES ('seven');
+
+INSERT INTO UNIQUE_TBL VALUES (5, 'five-upsert-insert') ON CONFLICT (i) DO UPDATE SET t = 'five-upsert-update';
+INSERT INTO UNIQUE_TBL VALUES (6, 'six-upsert-insert') ON CONFLICT (i) DO UPDATE SET t = 'six-upsert-update';
+-- should fail
+INSERT INTO UNIQUE_TBL VALUES (1, 'a'), (2, 'b'), (2, 'b') ON CONFLICT (i) DO UPDATE SET t = 'fails';
+
+SELECT '' AS five, * FROM UNIQUE_TBL;
+
+DROP TABLE UNIQUE_TBL;
+
+CREATE TABLE UNIQUE_TBL (i int, t text,
+ UNIQUE(i,t));
+
+INSERT INTO UNIQUE_TBL VALUES (1, 'one');
+INSERT INTO UNIQUE_TBL VALUES (2, 'two');
+INSERT INTO UNIQUE_TBL VALUES (1, 'three');
+INSERT INTO UNIQUE_TBL VALUES (1, 'one');
+INSERT INTO UNIQUE_TBL VALUES (5, 'one');
+INSERT INTO UNIQUE_TBL (t) VALUES ('six');
+
+SELECT '' AS five, * FROM UNIQUE_TBL;
+
+DROP TABLE UNIQUE_TBL;
+
+--
+-- Deferrable unique constraints
+--
+
+CREATE TABLE unique_tbl (i int UNIQUE DEFERRABLE, t text);
+
+INSERT INTO unique_tbl VALUES (0, 'one');
+INSERT INTO unique_tbl VALUES (1, 'two');
+INSERT INTO unique_tbl VALUES (2, 'tree');
+INSERT INTO unique_tbl VALUES (3, 'four');
+INSERT INTO unique_tbl VALUES (4, 'five');
+
+BEGIN;
+
+-- default is immediate so this should fail right away
+UPDATE unique_tbl SET i = 1 WHERE i = 0;
+
+ROLLBACK;
+
+-- check is done at end of statement, so this should succeed
+UPDATE unique_tbl SET i = i+1;
+
+SELECT * FROM unique_tbl;
+
+-- explicitly defer the constraint
+BEGIN;
+
+SET CONSTRAINTS unique_tbl_i_key DEFERRED;
+
+INSERT INTO unique_tbl VALUES (3, 'three');
+DELETE FROM unique_tbl WHERE t = 'tree'; -- makes constraint valid again
+
+COMMIT; -- should succeed
+
+SELECT * FROM unique_tbl;
+
+-- try adding an initially deferred constraint
+ALTER TABLE unique_tbl DROP CONSTRAINT unique_tbl_i_key;
+ALTER TABLE unique_tbl ADD CONSTRAINT unique_tbl_i_key
+ UNIQUE (i) DEFERRABLE INITIALLY DEFERRED;
+
+BEGIN;
+
+INSERT INTO unique_tbl VALUES (1, 'five');
+INSERT INTO unique_tbl VALUES (5, 'one');
+UPDATE unique_tbl SET i = 4 WHERE i = 2;
+UPDATE unique_tbl SET i = 2 WHERE i = 4 AND t = 'four';
+DELETE FROM unique_tbl WHERE i = 1 AND t = 'one';
+DELETE FROM unique_tbl WHERE i = 5 AND t = 'five';
+
+COMMIT;
+
+SELECT * FROM unique_tbl;
+
+-- should fail at commit-time
+BEGIN;
+INSERT INTO unique_tbl VALUES (3, 'Three'); -- should succeed for now
+COMMIT; -- should fail
+
+-- make constraint check immediate
+BEGIN;
+
+SET CONSTRAINTS ALL IMMEDIATE;
+
+INSERT INTO unique_tbl VALUES (3, 'Three'); -- should fail
+
+COMMIT;
+
+-- forced check when SET CONSTRAINTS is called
+BEGIN;
+
+SET CONSTRAINTS ALL DEFERRED;
+
+INSERT INTO unique_tbl VALUES (3, 'Three'); -- should succeed for now
+
+SET CONSTRAINTS ALL IMMEDIATE; -- should fail
+
+COMMIT;
+
+-- test deferrable UNIQUE with a partitioned table
+CREATE TABLE parted_uniq_tbl (i int UNIQUE DEFERRABLE) partition by range (i);
+CREATE TABLE parted_uniq_tbl_1 PARTITION OF parted_uniq_tbl FOR VALUES FROM (0) TO (10);
+CREATE TABLE parted_uniq_tbl_2 PARTITION OF parted_uniq_tbl FOR VALUES FROM (20) TO (30);
+SELECT conname, conrelid::regclass FROM pg_constraint
+ WHERE conname LIKE 'parted_uniq%' ORDER BY conname;
+BEGIN;
+INSERT INTO parted_uniq_tbl VALUES (1);
+SAVEPOINT f;
+INSERT INTO parted_uniq_tbl VALUES (1); -- unique violation
+ROLLBACK TO f;
+SET CONSTRAINTS parted_uniq_tbl_i_key DEFERRED;
+INSERT INTO parted_uniq_tbl VALUES (1); -- OK now, fail at commit
+COMMIT;
+DROP TABLE parted_uniq_tbl;
+
+-- test a HOT update that invalidates the conflicting tuple.
+-- the trigger should still fire and catch the violation
+
+BEGIN;
+
+INSERT INTO unique_tbl VALUES (3, 'Three'); -- should succeed for now
+UPDATE unique_tbl SET t = 'THREE' WHERE i = 3 AND t = 'Three';
+
+COMMIT; -- should fail
+
+SELECT * FROM unique_tbl;
+
+-- test a HOT update that modifies the newly inserted tuple,
+-- but should succeed because we then remove the other conflicting tuple.
+
+BEGIN;
+
+INSERT INTO unique_tbl VALUES(3, 'tree'); -- should succeed for now
+UPDATE unique_tbl SET t = 'threex' WHERE t = 'tree';
+DELETE FROM unique_tbl WHERE t = 'three';
+
+SELECT * FROM unique_tbl;
+
+COMMIT;
+
+SELECT * FROM unique_tbl;
+
+DROP TABLE unique_tbl;
+
+--
+-- EXCLUDE constraints
+--
+
+CREATE TABLE circles (
+ c1 CIRCLE,
+ c2 TEXT,
+ EXCLUDE USING gist
+ (c1 WITH &&, (c2::circle) WITH &&)
+ WHERE (circle_center(c1) <> '(0,0)')
+);
+
+-- these should succeed because they don't match the index predicate
+INSERT INTO circles VALUES('<(0,0), 5>', '<(0,0), 5>');
+INSERT INTO circles VALUES('<(0,0), 5>', '<(0,0), 4>');
+
+-- succeed
+INSERT INTO circles VALUES('<(10,10), 10>', '<(0,0), 5>');
+-- fail, overlaps
+INSERT INTO circles VALUES('<(20,20), 10>', '<(0,0), 4>');
+-- succeed, because violation is ignored
+INSERT INTO circles VALUES('<(20,20), 10>', '<(0,0), 4>')
+ ON CONFLICT ON CONSTRAINT circles_c1_c2_excl DO NOTHING;
+-- fail, because DO UPDATE variant requires unique index
+INSERT INTO circles VALUES('<(20,20), 10>', '<(0,0), 4>')
+ ON CONFLICT ON CONSTRAINT circles_c1_c2_excl DO UPDATE SET c2 = EXCLUDED.c2;
+-- succeed because c1 doesn't overlap
+INSERT INTO circles VALUES('<(20,20), 1>', '<(0,0), 5>');
+-- succeed because c2 doesn't overlap
+INSERT INTO circles VALUES('<(20,20), 10>', '<(10,10), 5>');
+
+-- should fail on existing data without the WHERE clause
+ALTER TABLE circles ADD EXCLUDE USING gist
+ (c1 WITH &&, (c2::circle) WITH &&);
+
+-- try reindexing an existing constraint
+REINDEX INDEX circles_c1_c2_excl;
+
+DROP TABLE circles;
+
+-- Check deferred exclusion constraint
+
+CREATE TABLE deferred_excl (
+ f1 int,
+ f2 int,
+ CONSTRAINT deferred_excl_con EXCLUDE (f1 WITH =) INITIALLY DEFERRED
+);
+
+INSERT INTO deferred_excl VALUES(1);
+INSERT INTO deferred_excl VALUES(2);
+INSERT INTO deferred_excl VALUES(1); -- fail
+INSERT INTO deferred_excl VALUES(1) ON CONFLICT ON CONSTRAINT deferred_excl_con DO NOTHING; -- fail
+BEGIN;
+INSERT INTO deferred_excl VALUES(2); -- no fail here
+COMMIT; -- should fail here
+BEGIN;
+INSERT INTO deferred_excl VALUES(3);
+INSERT INTO deferred_excl VALUES(3); -- no fail here
+COMMIT; -- should fail here
+
+-- bug #13148: deferred constraint versus HOT update
+BEGIN;
+INSERT INTO deferred_excl VALUES(2, 1); -- no fail here
+DELETE FROM deferred_excl WHERE f1 = 2 AND f2 IS NULL; -- remove old row
+UPDATE deferred_excl SET f2 = 2 WHERE f1 = 2;
+COMMIT; -- should not fail
+
+SELECT * FROM deferred_excl;
+
+ALTER TABLE deferred_excl DROP CONSTRAINT deferred_excl_con;
+
+-- This should fail, but worth testing because of HOT updates
+UPDATE deferred_excl SET f1 = 3;
+
+ALTER TABLE deferred_excl ADD EXCLUDE (f1 WITH =);
+
+DROP TABLE deferred_excl;
+
+-- Comments
+-- Setup a low-level role to enforce non-superuser checks.
+CREATE ROLE regress_constraint_comments;
+SET SESSION AUTHORIZATION regress_constraint_comments;
+
+CREATE TABLE constraint_comments_tbl (a int CONSTRAINT the_constraint CHECK (a > 0));
+CREATE DOMAIN constraint_comments_dom AS int CONSTRAINT the_constraint CHECK (value > 0);
+
+COMMENT ON CONSTRAINT the_constraint ON constraint_comments_tbl IS 'yes, the comment';
+COMMENT ON CONSTRAINT the_constraint ON DOMAIN constraint_comments_dom IS 'yes, another comment';
+
+-- no such constraint
+COMMENT ON CONSTRAINT no_constraint ON constraint_comments_tbl IS 'yes, the comment';
+COMMENT ON CONSTRAINT no_constraint ON DOMAIN constraint_comments_dom IS 'yes, another comment';
+
+-- no such table/domain
+COMMENT ON CONSTRAINT the_constraint ON no_comments_tbl IS 'bad comment';
+COMMENT ON CONSTRAINT the_constraint ON DOMAIN no_comments_dom IS 'another bad comment';
+
+COMMENT ON CONSTRAINT the_constraint ON constraint_comments_tbl IS NULL;
+COMMENT ON CONSTRAINT the_constraint ON DOMAIN constraint_comments_dom IS NULL;
+
+-- unauthorized user
+RESET SESSION AUTHORIZATION;
+CREATE ROLE regress_constraint_comments_noaccess;
+SET SESSION AUTHORIZATION regress_constraint_comments_noaccess;
+COMMENT ON CONSTRAINT the_constraint ON constraint_comments_tbl IS 'no, the comment';
+COMMENT ON CONSTRAINT the_constraint ON DOMAIN constraint_comments_dom IS 'no, another comment';
+RESET SESSION AUTHORIZATION;
+
+DROP TABLE constraint_comments_tbl;
+DROP DOMAIN constraint_comments_dom;
+
+DROP ROLE regress_constraint_comments;
+DROP ROLE regress_constraint_comments_noaccess;
diff --git a/src/test/regress/input/copy.source b/src/test/regress/input/copy.source
new file mode 100644
index 0000000..a1d529a
--- /dev/null
+++ b/src/test/regress/input/copy.source
@@ -0,0 +1,203 @@
+--
+-- COPY
+--
+
+-- CLASS POPULATION
+-- (any resemblance to real life is purely coincidental)
+--
+COPY aggtest FROM '@abs_srcdir@/data/agg.data';
+
+COPY onek FROM '@abs_srcdir@/data/onek.data';
+
+COPY onek TO '@abs_builddir@/results/onek.data';
+
+DELETE FROM onek;
+
+COPY onek FROM '@abs_builddir@/results/onek.data';
+
+COPY tenk1 FROM '@abs_srcdir@/data/tenk.data';
+
+COPY slow_emp4000 FROM '@abs_srcdir@/data/rect.data';
+
+COPY person FROM '@abs_srcdir@/data/person.data';
+
+COPY emp FROM '@abs_srcdir@/data/emp.data';
+
+COPY student FROM '@abs_srcdir@/data/student.data';
+
+COPY stud_emp FROM '@abs_srcdir@/data/stud_emp.data';
+
+COPY road FROM '@abs_srcdir@/data/streets.data';
+
+COPY real_city FROM '@abs_srcdir@/data/real_city.data';
+
+COPY hash_i4_heap FROM '@abs_srcdir@/data/hash.data';
+
+COPY hash_name_heap FROM '@abs_srcdir@/data/hash.data';
+
+COPY hash_txt_heap FROM '@abs_srcdir@/data/hash.data';
+
+COPY hash_f8_heap FROM '@abs_srcdir@/data/hash.data';
+
+COPY test_tsvector FROM '@abs_srcdir@/data/tsearch.data';
+
+COPY testjsonb FROM '@abs_srcdir@/data/jsonb.data';
+
+-- the data in this file has a lot of duplicates in the index key
+-- fields, leading to long bucket chains and lots of table expansion.
+-- this is therefore a stress test of the bucket overflow code (unlike
+-- the data in hash.data, which has unique index keys).
+--
+-- COPY hash_ovfl_heap FROM '@abs_srcdir@/data/hashovfl.data';
+
+COPY bt_i4_heap FROM '@abs_srcdir@/data/desc.data';
+
+COPY bt_name_heap FROM '@abs_srcdir@/data/hash.data';
+
+COPY bt_txt_heap FROM '@abs_srcdir@/data/desc.data';
+
+COPY bt_f8_heap FROM '@abs_srcdir@/data/hash.data';
+
+COPY array_op_test FROM '@abs_srcdir@/data/array.data';
+
+COPY array_index_op_test FROM '@abs_srcdir@/data/array.data';
+
+-- analyze all the data we just loaded, to ensure plan consistency
+-- in later tests
+
+ANALYZE aggtest;
+ANALYZE onek;
+ANALYZE tenk1;
+ANALYZE slow_emp4000;
+ANALYZE person;
+ANALYZE emp;
+ANALYZE student;
+ANALYZE stud_emp;
+ANALYZE road;
+ANALYZE real_city;
+ANALYZE hash_i4_heap;
+ANALYZE hash_name_heap;
+ANALYZE hash_txt_heap;
+ANALYZE hash_f8_heap;
+ANALYZE test_tsvector;
+ANALYZE bt_i4_heap;
+ANALYZE bt_name_heap;
+ANALYZE bt_txt_heap;
+ANALYZE bt_f8_heap;
+ANALYZE array_op_test;
+ANALYZE array_index_op_test;
+
+--- test copying in CSV mode with various styles
+--- of embedded line ending characters
+
+create temp table copytest (
+ style text,
+ test text,
+ filler int);
+
+insert into copytest values('DOS',E'abc\r\ndef',1);
+insert into copytest values('Unix',E'abc\ndef',2);
+insert into copytest values('Mac',E'abc\rdef',3);
+insert into copytest values(E'esc\\ape',E'a\\r\\\r\\\n\\nb',4);
+
+copy copytest to '@abs_builddir@/results/copytest.csv' csv;
+
+create temp table copytest2 (like copytest);
+
+copy copytest2 from '@abs_builddir@/results/copytest.csv' csv;
+
+select * from copytest except select * from copytest2;
+
+truncate copytest2;
+
+--- same test but with an escape char different from quote char
+
+copy copytest to '@abs_builddir@/results/copytest.csv' csv quote '''' escape E'\\';
+
+copy copytest2 from '@abs_builddir@/results/copytest.csv' csv quote '''' escape E'\\';
+
+select * from copytest except select * from copytest2;
+
+
+-- test header line feature
+
+create temp table copytest3 (
+ c1 int,
+ "col with , comma" text,
+ "col with "" quote" int);
+
+copy copytest3 from stdin csv header;
+this is just a line full of junk that would error out if parsed
+1,a,1
+2,b,2
+\.
+
+copy copytest3 to stdout csv header;
+
+-- test copy from with a partitioned table
+create table parted_copytest (
+ a int,
+ b int,
+ c text
+) partition by list (b);
+
+create table parted_copytest_a1 (c text, b int, a int);
+create table parted_copytest_a2 (a int, c text, b int);
+
+alter table parted_copytest attach partition parted_copytest_a1 for values in(1);
+alter table parted_copytest attach partition parted_copytest_a2 for values in(2);
+
+-- We must insert enough rows to trigger multi-inserts. These are only
+-- enabled adaptively when there are few enough partition changes.
+insert into parted_copytest select x,1,'One' from generate_series(1,1000) x;
+insert into parted_copytest select x,2,'Two' from generate_series(1001,1010) x;
+insert into parted_copytest select x,1,'One' from generate_series(1011,1020) x;
+
+copy (select * from parted_copytest order by a) to '@abs_builddir@/results/parted_copytest.csv';
+
+truncate parted_copytest;
+
+copy parted_copytest from '@abs_builddir@/results/parted_copytest.csv';
+
+-- Ensure COPY FREEZE errors for partitioned tables.
+begin;
+truncate parted_copytest;
+copy parted_copytest from '@abs_builddir@/results/parted_copytest.csv' (freeze);
+rollback;
+
+select tableoid::regclass,count(*),sum(a) from parted_copytest
+group by tableoid order by tableoid::regclass::name;
+
+truncate parted_copytest;
+
+-- create before insert row trigger on parted_copytest_a2
+create function part_ins_func() returns trigger language plpgsql as $$
+begin
+ return new;
+end;
+$$;
+
+create trigger part_ins_trig
+ before insert on parted_copytest_a2
+ for each row
+ execute procedure part_ins_func();
+
+copy parted_copytest from '@abs_builddir@/results/parted_copytest.csv';
+
+select tableoid::regclass,count(*),sum(a) from parted_copytest
+group by tableoid order by tableoid::regclass::name;
+
+truncate table parted_copytest;
+create index on parted_copytest (b);
+drop trigger part_ins_trig on parted_copytest_a2;
+
+copy parted_copytest from stdin;
+1 1 str1
+2 2 str2
+\.
+
+-- Ensure index entries were properly added during the copy.
+select * from parted_copytest where b = 1;
+select * from parted_copytest where b = 2;
+
+drop table parted_copytest;
diff --git a/src/test/regress/input/create_function_1.source b/src/test/regress/input/create_function_1.source
new file mode 100644
index 0000000..412e339
--- /dev/null
+++ b/src/test/regress/input/create_function_1.source
@@ -0,0 +1,105 @@
+--
+-- CREATE_FUNCTION_1
+--
+
+CREATE FUNCTION widget_in(cstring)
+ RETURNS widget
+ AS '@libdir@/regress@DLSUFFIX@'
+ LANGUAGE C STRICT IMMUTABLE;
+
+CREATE FUNCTION widget_out(widget)
+ RETURNS cstring
+ AS '@libdir@/regress@DLSUFFIX@'
+ LANGUAGE C STRICT IMMUTABLE;
+
+CREATE FUNCTION int44in(cstring)
+ RETURNS city_budget
+ AS '@libdir@/regress@DLSUFFIX@'
+ LANGUAGE C STRICT IMMUTABLE;
+
+CREATE FUNCTION int44out(city_budget)
+ RETURNS cstring
+ AS '@libdir@/regress@DLSUFFIX@'
+ LANGUAGE C STRICT IMMUTABLE;
+
+CREATE FUNCTION check_primary_key ()
+ RETURNS trigger
+ AS '@libdir@/refint@DLSUFFIX@'
+ LANGUAGE C;
+
+CREATE FUNCTION check_foreign_key ()
+ RETURNS trigger
+ AS '@libdir@/refint@DLSUFFIX@'
+ LANGUAGE C;
+
+CREATE FUNCTION autoinc ()
+ RETURNS trigger
+ AS '@libdir@/autoinc@DLSUFFIX@'
+ LANGUAGE C;
+
+CREATE FUNCTION trigger_return_old ()
+ RETURNS trigger
+ AS '@libdir@/regress@DLSUFFIX@'
+ LANGUAGE C;
+
+CREATE FUNCTION ttdummy ()
+ RETURNS trigger
+ AS '@libdir@/regress@DLSUFFIX@'
+ LANGUAGE C;
+
+CREATE FUNCTION set_ttdummy (int4)
+ RETURNS int4
+ AS '@libdir@/regress@DLSUFFIX@'
+ LANGUAGE C STRICT;
+
+CREATE FUNCTION make_tuple_indirect (record)
+ RETURNS record
+ AS '@libdir@/regress@DLSUFFIX@'
+ LANGUAGE C STRICT;
+
+CREATE FUNCTION test_atomic_ops()
+ RETURNS bool
+ AS '@libdir@/regress@DLSUFFIX@'
+ LANGUAGE C;
+
+-- Tests creating a FDW handler
+CREATE FUNCTION test_fdw_handler()
+ RETURNS fdw_handler
+ AS '@libdir@/regress@DLSUFFIX@', 'test_fdw_handler'
+ LANGUAGE C;
+
+CREATE FUNCTION test_support_func(internal)
+ RETURNS internal
+ AS '@libdir@/regress@DLSUFFIX@', 'test_support_func'
+ LANGUAGE C STRICT;
+
+CREATE FUNCTION test_opclass_options_func(internal)
+ RETURNS void
+ AS '@libdir@/regress@DLSUFFIX@', 'test_opclass_options_func'
+ LANGUAGE C;
+
+-- Things that shouldn't work:
+
+CREATE FUNCTION test1 (int) RETURNS int LANGUAGE SQL
+ AS 'SELECT ''not an integer'';';
+
+CREATE FUNCTION test1 (int) RETURNS int LANGUAGE SQL
+ AS 'not even SQL';
+
+CREATE FUNCTION test1 (int) RETURNS int LANGUAGE SQL
+ AS 'SELECT 1, 2, 3;';
+
+CREATE FUNCTION test1 (int) RETURNS int LANGUAGE SQL
+ AS 'SELECT $2;';
+
+CREATE FUNCTION test1 (int) RETURNS int LANGUAGE SQL
+ AS 'a', 'b';
+
+CREATE FUNCTION test1 (int) RETURNS int LANGUAGE C
+ AS 'nosuchfile';
+
+CREATE FUNCTION test1 (int) RETURNS int LANGUAGE C
+ AS '@libdir@/regress@DLSUFFIX@', 'nosuchsymbol';
+
+CREATE FUNCTION test1 (int) RETURNS int LANGUAGE internal
+ AS 'nosuch';
diff --git a/src/test/regress/input/create_function_2.source b/src/test/regress/input/create_function_2.source
new file mode 100644
index 0000000..9e6d294
--- /dev/null
+++ b/src/test/regress/input/create_function_2.source
@@ -0,0 +1,88 @@
+--
+-- CREATE_FUNCTION_2
+--
+CREATE FUNCTION hobbies(person)
+ RETURNS setof hobbies_r
+ AS 'select * from hobbies_r where person = $1.name'
+ LANGUAGE SQL;
+
+
+CREATE FUNCTION hobby_construct(text, text)
+ RETURNS hobbies_r
+ AS 'select $1 as name, $2 as hobby'
+ LANGUAGE SQL;
+
+
+CREATE FUNCTION hobby_construct_named(name text, hobby text)
+ RETURNS hobbies_r
+ AS 'select name, hobby'
+ LANGUAGE SQL;
+
+
+CREATE FUNCTION hobbies_by_name(hobbies_r.name%TYPE)
+ RETURNS hobbies_r.person%TYPE
+ AS 'select person from hobbies_r where name = $1'
+ LANGUAGE SQL;
+
+
+CREATE FUNCTION equipment(hobbies_r)
+ RETURNS setof equipment_r
+ AS 'select * from equipment_r where hobby = $1.name'
+ LANGUAGE SQL;
+
+
+CREATE FUNCTION equipment_named(hobby hobbies_r)
+ RETURNS setof equipment_r
+ AS 'select * from equipment_r where equipment_r.hobby = equipment_named.hobby.name'
+ LANGUAGE SQL;
+
+CREATE FUNCTION equipment_named_ambiguous_1a(hobby hobbies_r)
+ RETURNS setof equipment_r
+ AS 'select * from equipment_r where hobby = equipment_named_ambiguous_1a.hobby.name'
+ LANGUAGE SQL;
+
+CREATE FUNCTION equipment_named_ambiguous_1b(hobby hobbies_r)
+ RETURNS setof equipment_r
+ AS 'select * from equipment_r where equipment_r.hobby = hobby.name'
+ LANGUAGE SQL;
+
+CREATE FUNCTION equipment_named_ambiguous_1c(hobby hobbies_r)
+ RETURNS setof equipment_r
+ AS 'select * from equipment_r where hobby = hobby.name'
+ LANGUAGE SQL;
+
+CREATE FUNCTION equipment_named_ambiguous_2a(hobby text)
+ RETURNS setof equipment_r
+ AS 'select * from equipment_r where hobby = equipment_named_ambiguous_2a.hobby'
+ LANGUAGE SQL;
+
+CREATE FUNCTION equipment_named_ambiguous_2b(hobby text)
+ RETURNS setof equipment_r
+ AS 'select * from equipment_r where equipment_r.hobby = hobby'
+ LANGUAGE SQL;
+
+
+CREATE FUNCTION pt_in_widget(point, widget)
+ RETURNS bool
+ AS '@libdir@/regress@DLSUFFIX@'
+ LANGUAGE C STRICT;
+
+CREATE FUNCTION overpaid(emp)
+ RETURNS bool
+ AS '@libdir@/regress@DLSUFFIX@'
+ LANGUAGE C STRICT;
+
+CREATE FUNCTION interpt_pp(path, path)
+ RETURNS point
+ AS '@libdir@/regress@DLSUFFIX@'
+ LANGUAGE C STRICT;
+
+CREATE FUNCTION reverse_name(name)
+ RETURNS name
+ AS '@libdir@/regress@DLSUFFIX@'
+ LANGUAGE C STRICT;
+
+--
+-- Function dynamic loading
+--
+LOAD '@libdir@/regress@DLSUFFIX@';
diff --git a/src/test/regress/input/largeobject.source b/src/test/regress/input/largeobject.source
new file mode 100644
index 0000000..ff42697
--- /dev/null
+++ b/src/test/regress/input/largeobject.source
@@ -0,0 +1,263 @@
+--
+-- Test large object support
+--
+
+-- ensure consistent test output regardless of the default bytea format
+SET bytea_output TO escape;
+
+-- Load a file
+CREATE TABLE lotest_stash_values (loid oid, fd integer);
+-- lo_creat(mode integer) returns oid
+-- The mode arg to lo_creat is unused, some vestigal holdover from ancient times
+-- returns the large object id
+INSERT INTO lotest_stash_values (loid) SELECT lo_creat(42);
+
+-- Test ALTER LARGE OBJECT
+CREATE ROLE regress_lo_user;
+DO $$
+ BEGIN
+ EXECUTE 'ALTER LARGE OBJECT ' || (select loid from lotest_stash_values)
+ || ' OWNER TO regress_lo_user';
+ END
+$$;
+SELECT
+ rol.rolname
+FROM
+ lotest_stash_values s
+ JOIN pg_largeobject_metadata lo ON s.loid = lo.oid
+ JOIN pg_authid rol ON lo.lomowner = rol.oid;
+
+-- NOTE: large objects require transactions
+BEGIN;
+
+-- lo_open(lobjId oid, mode integer) returns integer
+-- The mode parameter to lo_open uses two constants:
+-- INV_READ = 0x20000
+-- INV_WRITE = 0x40000
+-- The return value is a file descriptor-like value which remains valid for the
+-- transaction.
+UPDATE lotest_stash_values SET fd = lo_open(loid, CAST(x'20000' | x'40000' AS integer));
+
+-- loread/lowrite names are wonky, different from other functions which are lo_*
+-- lowrite(fd integer, data bytea) returns integer
+-- the integer is the number of bytes written
+SELECT lowrite(fd, '
+I wandered lonely as a cloud
+That floats on high o''er vales and hills,
+When all at once I saw a crowd,
+A host, of golden daffodils;
+Beside the lake, beneath the trees,
+Fluttering and dancing in the breeze.
+
+Continuous as the stars that shine
+And twinkle on the milky way,
+They stretched in never-ending line
+Along the margin of a bay:
+Ten thousand saw I at a glance,
+Tossing their heads in sprightly dance.
+
+The waves beside them danced; but they
+Out-did the sparkling waves in glee:
+A poet could not but be gay,
+In such a jocund company:
+I gazed--and gazed--but little thought
+What wealth the show to me had brought:
+
+For oft, when on my couch I lie
+In vacant or in pensive mood,
+They flash upon that inward eye
+Which is the bliss of solitude;
+And then my heart with pleasure fills,
+And dances with the daffodils.
+
+ -- William Wordsworth
+') FROM lotest_stash_values;
+
+-- lo_close(fd integer) returns integer
+-- return value is 0 for success, or <0 for error (actually only -1, but...)
+SELECT lo_close(fd) FROM lotest_stash_values;
+
+END;
+
+-- Copy to another large object.
+-- Note: we intentionally don't remove the object created here;
+-- it's left behind to help test pg_dump.
+
+SELECT lo_from_bytea(0, lo_get(loid)) AS newloid FROM lotest_stash_values
+\gset
+
+-- Add a comment to it, as well, for pg_dump/pg_upgrade testing.
+COMMENT ON LARGE OBJECT :newloid IS 'I Wandered Lonely as a Cloud';
+
+-- Read out a portion
+BEGIN;
+UPDATE lotest_stash_values SET fd=lo_open(loid, CAST(x'20000' | x'40000' AS integer));
+
+-- lo_lseek(fd integer, offset integer, whence integer) returns integer
+-- offset is in bytes, whence is one of three values:
+-- SEEK_SET (= 0) meaning relative to beginning
+-- SEEK_CUR (= 1) meaning relative to current position
+-- SEEK_END (= 2) meaning relative to end (offset better be negative)
+-- returns current position in file
+SELECT lo_lseek(fd, 104, 0) FROM lotest_stash_values;
+
+-- loread/lowrite names are wonky, different from other functions which are lo_*
+-- loread(fd integer, len integer) returns bytea
+SELECT loread(fd, 28) FROM lotest_stash_values;
+
+SELECT lo_lseek(fd, -19, 1) FROM lotest_stash_values;
+
+SELECT lowrite(fd, 'n') FROM lotest_stash_values;
+
+SELECT lo_tell(fd) FROM lotest_stash_values;
+
+SELECT lo_lseek(fd, -744, 2) FROM lotest_stash_values;
+
+SELECT loread(fd, 28) FROM lotest_stash_values;
+
+SELECT lo_close(fd) FROM lotest_stash_values;
+
+END;
+
+-- Test resource management
+BEGIN;
+SELECT lo_open(loid, x'40000'::int) from lotest_stash_values;
+ABORT;
+
+-- Test truncation.
+BEGIN;
+UPDATE lotest_stash_values SET fd=lo_open(loid, CAST(x'20000' | x'40000' AS integer));
+
+SELECT lo_truncate(fd, 11) FROM lotest_stash_values;
+SELECT loread(fd, 15) FROM lotest_stash_values;
+
+SELECT lo_truncate(fd, 10000) FROM lotest_stash_values;
+SELECT loread(fd, 10) FROM lotest_stash_values;
+SELECT lo_lseek(fd, 0, 2) FROM lotest_stash_values;
+SELECT lo_tell(fd) FROM lotest_stash_values;
+
+SELECT lo_truncate(fd, 5000) FROM lotest_stash_values;
+SELECT lo_lseek(fd, 0, 2) FROM lotest_stash_values;
+SELECT lo_tell(fd) FROM lotest_stash_values;
+
+SELECT lo_close(fd) FROM lotest_stash_values;
+END;
+
+-- Test 64-bit large object functions.
+BEGIN;
+UPDATE lotest_stash_values SET fd = lo_open(loid, CAST(x'20000' | x'40000' AS integer));
+
+SELECT lo_lseek64(fd, 4294967296, 0) FROM lotest_stash_values;
+SELECT lowrite(fd, 'offset:4GB') FROM lotest_stash_values;
+SELECT lo_tell64(fd) FROM lotest_stash_values;
+
+SELECT lo_lseek64(fd, -10, 1) FROM lotest_stash_values;
+SELECT lo_tell64(fd) FROM lotest_stash_values;
+SELECT loread(fd, 10) FROM lotest_stash_values;
+
+SELECT lo_truncate64(fd, 5000000000) FROM lotest_stash_values;
+SELECT lo_lseek64(fd, 0, 2) FROM lotest_stash_values;
+SELECT lo_tell64(fd) FROM lotest_stash_values;
+
+SELECT lo_truncate64(fd, 3000000000) FROM lotest_stash_values;
+SELECT lo_lseek64(fd, 0, 2) FROM lotest_stash_values;
+SELECT lo_tell64(fd) FROM lotest_stash_values;
+
+SELECT lo_close(fd) FROM lotest_stash_values;
+END;
+
+-- lo_unlink(lobjId oid) returns integer
+-- return value appears to always be 1
+SELECT lo_unlink(loid) from lotest_stash_values;
+
+TRUNCATE lotest_stash_values;
+
+INSERT INTO lotest_stash_values (loid) SELECT lo_import('@abs_srcdir@/data/tenk.data');
+
+BEGIN;
+UPDATE lotest_stash_values SET fd=lo_open(loid, CAST(x'20000' | x'40000' AS integer));
+
+-- verify length of large object
+SELECT lo_lseek(fd, 0, 2) FROM lotest_stash_values;
+
+-- with the default BLCKSZ, LOBLKSIZE = 2048, so this positions us for a block
+-- edge case
+SELECT lo_lseek(fd, 2030, 0) FROM lotest_stash_values;
+
+-- this should get half of the value from page 0 and half from page 1 of the
+-- large object
+SELECT loread(fd, 36) FROM lotest_stash_values;
+
+SELECT lo_tell(fd) FROM lotest_stash_values;
+
+SELECT lo_lseek(fd, -26, 1) FROM lotest_stash_values;
+
+SELECT lowrite(fd, 'abcdefghijklmnop') FROM lotest_stash_values;
+
+SELECT lo_lseek(fd, 2030, 0) FROM lotest_stash_values;
+
+SELECT loread(fd, 36) FROM lotest_stash_values;
+
+SELECT lo_close(fd) FROM lotest_stash_values;
+END;
+
+SELECT lo_export(loid, '@abs_builddir@/results/lotest.txt') FROM lotest_stash_values;
+
+\lo_import '@abs_builddir@/results/lotest.txt'
+
+\set newloid :LASTOID
+
+-- just make sure \lo_export does not barf
+\lo_export :newloid '@abs_builddir@/results/lotest2.txt'
+
+-- This is a hack to test that export/import are reversible
+-- This uses knowledge about the inner workings of large object mechanism
+-- which should not be used outside it. This makes it a HACK
+SELECT pageno, data FROM pg_largeobject WHERE loid = (SELECT loid from lotest_stash_values)
+EXCEPT
+SELECT pageno, data FROM pg_largeobject WHERE loid = :newloid;
+
+SELECT lo_unlink(loid) FROM lotest_stash_values;
+
+TRUNCATE lotest_stash_values;
+
+\lo_unlink :newloid
+
+\lo_import '@abs_builddir@/results/lotest.txt'
+
+\set newloid_1 :LASTOID
+
+SELECT lo_from_bytea(0, lo_get(:newloid_1)) AS newloid_2
+\gset
+
+SELECT md5(lo_get(:newloid_1)) = md5(lo_get(:newloid_2));
+
+SELECT lo_get(:newloid_1, 0, 20);
+SELECT lo_get(:newloid_1, 10, 20);
+SELECT lo_put(:newloid_1, 5, decode('afafafaf', 'hex'));
+SELECT lo_get(:newloid_1, 0, 20);
+
+SELECT lo_put(:newloid_1, 4294967310, 'foo');
+SELECT lo_get(:newloid_1);
+SELECT lo_get(:newloid_1, 4294967294, 100);
+
+\lo_unlink :newloid_1
+\lo_unlink :newloid_2
+
+-- This object is left in the database for pg_dump test purposes
+SELECT lo_from_bytea(0, E'\\xdeadbeef') AS newloid
+\gset
+
+SET bytea_output TO hex;
+SELECT lo_get(:newloid);
+
+-- Create one more object that we leave behind for testing pg_dump/pg_upgrade;
+-- this one intentionally has an OID in the system range
+SELECT lo_create(2121);
+
+COMMENT ON LARGE OBJECT 2121 IS 'testing comments';
+
+-- Clean up
+DROP TABLE lotest_stash_values;
+
+DROP ROLE regress_lo_user;
diff --git a/src/test/regress/input/misc.source b/src/test/regress/input/misc.source
new file mode 100644
index 0000000..b1dbc57
--- /dev/null
+++ b/src/test/regress/input/misc.source
@@ -0,0 +1,262 @@
+--
+-- MISC
+--
+
+--
+-- BTREE
+--
+UPDATE onek
+ SET unique1 = onek.unique1 + 1;
+
+UPDATE onek
+ SET unique1 = onek.unique1 - 1;
+
+--
+-- BTREE partial
+--
+-- UPDATE onek2
+-- SET unique1 = onek2.unique1 + 1;
+
+--UPDATE onek2
+-- SET unique1 = onek2.unique1 - 1;
+
+--
+-- BTREE shutting out non-functional updates
+--
+-- the following two tests seem to take a long time on some
+-- systems. This non-func update stuff needs to be examined
+-- more closely. - jolly (2/22/96)
+--
+UPDATE tmp
+ SET stringu1 = reverse_name(onek.stringu1)
+ FROM onek
+ WHERE onek.stringu1 = 'JBAAAA' and
+ onek.stringu1 = tmp.stringu1;
+
+UPDATE tmp
+ SET stringu1 = reverse_name(onek2.stringu1)
+ FROM onek2
+ WHERE onek2.stringu1 = 'JCAAAA' and
+ onek2.stringu1 = tmp.stringu1;
+
+DROP TABLE tmp;
+
+--UPDATE person*
+-- SET age = age + 1;
+
+--UPDATE person*
+-- SET age = age + 3
+-- WHERE name = 'linda';
+
+--
+-- copy
+--
+COPY onek TO '@abs_builddir@/results/onek.data';
+
+DELETE FROM onek;
+
+COPY onek FROM '@abs_builddir@/results/onek.data';
+
+SELECT unique1 FROM onek WHERE unique1 < 2 ORDER BY unique1;
+
+DELETE FROM onek2;
+
+COPY onek2 FROM '@abs_builddir@/results/onek.data';
+
+SELECT unique1 FROM onek2 WHERE unique1 < 2 ORDER BY unique1;
+
+COPY BINARY stud_emp TO '@abs_builddir@/results/stud_emp.data';
+
+DELETE FROM stud_emp;
+
+COPY BINARY stud_emp FROM '@abs_builddir@/results/stud_emp.data';
+
+SELECT * FROM stud_emp;
+
+-- COPY aggtest FROM stdin;
+-- 56 7.8
+-- 100 99.097
+-- 0 0.09561
+-- 42 324.78
+-- .
+-- COPY aggtest TO stdout;
+
+
+--
+-- inheritance stress test
+--
+SELECT * FROM a_star*;
+
+SELECT *
+ FROM b_star* x
+ WHERE x.b = text 'bumble' or x.a < 3;
+
+SELECT class, a
+ FROM c_star* x
+ WHERE x.c ~ text 'hi';
+
+SELECT class, b, c
+ FROM d_star* x
+ WHERE x.a < 100;
+
+SELECT class, c FROM e_star* x WHERE x.c NOTNULL;
+
+SELECT * FROM f_star* x WHERE x.c ISNULL;
+
+-- grouping and aggregation on inherited sets have been busted in the past...
+
+SELECT sum(a) FROM a_star*;
+
+SELECT class, sum(a) FROM a_star* GROUP BY class ORDER BY class;
+
+
+ALTER TABLE f_star RENAME COLUMN f TO ff;
+
+ALTER TABLE e_star* RENAME COLUMN e TO ee;
+
+ALTER TABLE d_star* RENAME COLUMN d TO dd;
+
+ALTER TABLE c_star* RENAME COLUMN c TO cc;
+
+ALTER TABLE b_star* RENAME COLUMN b TO bb;
+
+ALTER TABLE a_star* RENAME COLUMN a TO aa;
+
+SELECT class, aa
+ FROM a_star* x
+ WHERE aa ISNULL;
+
+-- As of Postgres 7.1, ALTER implicitly recurses,
+-- so this should be same as ALTER a_star*
+
+ALTER TABLE a_star RENAME COLUMN aa TO foo;
+
+SELECT class, foo
+ FROM a_star* x
+ WHERE x.foo >= 2;
+
+ALTER TABLE a_star RENAME COLUMN foo TO aa;
+
+SELECT *
+ from a_star*
+ WHERE aa < 1000;
+
+ALTER TABLE f_star ADD COLUMN f int4;
+
+UPDATE f_star SET f = 10;
+
+ALTER TABLE e_star* ADD COLUMN e int4;
+
+--UPDATE e_star* SET e = 42;
+
+SELECT * FROM e_star*;
+
+ALTER TABLE a_star* ADD COLUMN a text;
+
+-- That ALTER TABLE should have added TOAST tables.
+SELECT relname, reltoastrelid <> 0 AS has_toast_table
+ FROM pg_class
+ WHERE oid::regclass IN ('a_star', 'c_star')
+ ORDER BY 1;
+
+--UPDATE b_star*
+-- SET a = text 'gazpacho'
+-- WHERE aa > 4;
+
+SELECT class, aa, a FROM a_star*;
+
+
+--
+-- versions
+--
+
+--
+-- postquel functions
+--
+--
+-- mike does post_hacking,
+-- joe and sally play basketball, and
+-- everyone else does nothing.
+--
+SELECT p.name, name(p.hobbies) FROM ONLY person p;
+
+--
+-- as above, but jeff also does post_hacking.
+--
+SELECT p.name, name(p.hobbies) FROM person* p;
+
+--
+-- the next two queries demonstrate how functions generate bogus duplicates.
+-- this is a "feature" ..
+--
+SELECT DISTINCT hobbies_r.name, name(hobbies_r.equipment) FROM hobbies_r
+ ORDER BY 1,2;
+
+SELECT hobbies_r.name, (hobbies_r.equipment).name FROM hobbies_r;
+
+--
+-- mike needs advil and peet's coffee,
+-- joe and sally need hightops, and
+-- everyone else is fine.
+--
+SELECT p.name, name(p.hobbies), name(equipment(p.hobbies)) FROM ONLY person p;
+
+--
+-- as above, but jeff needs advil and peet's coffee as well.
+--
+SELECT p.name, name(p.hobbies), name(equipment(p.hobbies)) FROM person* p;
+
+--
+-- just like the last two, but make sure that the target list fixup and
+-- unflattening is being done correctly.
+--
+SELECT name(equipment(p.hobbies)), p.name, name(p.hobbies) FROM ONLY person p;
+
+SELECT (p.hobbies).equipment.name, p.name, name(p.hobbies) FROM person* p;
+
+SELECT (p.hobbies).equipment.name, name(p.hobbies), p.name FROM ONLY person p;
+
+SELECT name(equipment(p.hobbies)), name(p.hobbies), p.name FROM person* p;
+
+SELECT name(equipment(hobby_construct(text 'skywalking', text 'mer')));
+
+SELECT name(equipment(hobby_construct_named(text 'skywalking', text 'mer')));
+
+SELECT name(equipment_named(hobby_construct_named(text 'skywalking', text 'mer')));
+
+SELECT name(equipment_named_ambiguous_1a(hobby_construct_named(text 'skywalking', text 'mer')));
+
+SELECT name(equipment_named_ambiguous_1b(hobby_construct_named(text 'skywalking', text 'mer')));
+
+SELECT name(equipment_named_ambiguous_1c(hobby_construct_named(text 'skywalking', text 'mer')));
+
+SELECT name(equipment_named_ambiguous_2a(text 'skywalking'));
+
+SELECT name(equipment_named_ambiguous_2b(text 'skywalking'));
+
+SELECT hobbies_by_name('basketball');
+
+SELECT name, overpaid(emp.*) FROM emp;
+
+--
+-- Try a few cases with SQL-spec row constructor expressions
+--
+SELECT * FROM equipment(ROW('skywalking', 'mer'));
+
+SELECT name(equipment(ROW('skywalking', 'mer')));
+
+SELECT *, name(equipment(h.*)) FROM hobbies_r h;
+
+SELECT *, (equipment(CAST((h.*) AS hobbies_r))).name FROM hobbies_r h;
+
+--
+-- functional joins
+--
+
+--
+-- instance rules
+--
+
+--
+-- rewrite rules
+--
diff --git a/src/test/regress/input/tablespace.source b/src/test/regress/input/tablespace.source
new file mode 100644
index 0000000..1a18101
--- /dev/null
+++ b/src/test/regress/input/tablespace.source
@@ -0,0 +1,288 @@
+-- create a tablespace using WITH clause
+CREATE TABLESPACE regress_tblspacewith LOCATION '@testtablespace@' WITH (some_nonexistent_parameter = true); -- fail
+CREATE TABLESPACE regress_tblspacewith LOCATION '@testtablespace@' WITH (random_page_cost = 3.0); -- ok
+
+-- check to see the parameter was used
+SELECT spcoptions FROM pg_tablespace WHERE spcname = 'regress_tblspacewith';
+
+-- drop the tablespace so we can re-use the location
+DROP TABLESPACE regress_tblspacewith;
+
+-- create a tablespace we can use
+CREATE TABLESPACE regress_tblspace LOCATION '@testtablespace@';
+
+-- try setting and resetting some properties for the new tablespace
+ALTER TABLESPACE regress_tblspace SET (random_page_cost = 1.0, seq_page_cost = 1.1);
+ALTER TABLESPACE regress_tblspace SET (some_nonexistent_parameter = true); -- fail
+ALTER TABLESPACE regress_tblspace RESET (random_page_cost = 2.0); -- fail
+ALTER TABLESPACE regress_tblspace RESET (random_page_cost, effective_io_concurrency); -- ok
+
+-- create a schema we can use
+CREATE SCHEMA testschema;
+
+-- try a table
+CREATE TABLE testschema.foo (i int) TABLESPACE regress_tblspace;
+SELECT relname, spcname FROM pg_catalog.pg_tablespace t, pg_catalog.pg_class c
+ where c.reltablespace = t.oid AND c.relname = 'foo';
+
+INSERT INTO testschema.foo VALUES(1);
+INSERT INTO testschema.foo VALUES(2);
+
+-- tables from dynamic sources
+CREATE TABLE testschema.asselect TABLESPACE regress_tblspace AS SELECT 1;
+SELECT relname, spcname FROM pg_catalog.pg_tablespace t, pg_catalog.pg_class c
+ where c.reltablespace = t.oid AND c.relname = 'asselect';
+
+PREPARE selectsource(int) AS SELECT $1;
+CREATE TABLE testschema.asexecute TABLESPACE regress_tblspace
+ AS EXECUTE selectsource(2);
+SELECT relname, spcname FROM pg_catalog.pg_tablespace t, pg_catalog.pg_class c
+ where c.reltablespace = t.oid AND c.relname = 'asexecute';
+
+-- index
+CREATE INDEX foo_idx on testschema.foo(i) TABLESPACE regress_tblspace;
+SELECT relname, spcname FROM pg_catalog.pg_tablespace t, pg_catalog.pg_class c
+ where c.reltablespace = t.oid AND c.relname = 'foo_idx';
+
+-- check \d output
+\d testschema.foo
+\d testschema.foo_idx
+
+--
+-- partitioned table
+--
+CREATE TABLE testschema.part (a int) PARTITION BY LIST (a);
+SET default_tablespace TO pg_global;
+CREATE TABLE testschema.part_1 PARTITION OF testschema.part FOR VALUES IN (1);
+RESET default_tablespace;
+CREATE TABLE testschema.part_1 PARTITION OF testschema.part FOR VALUES IN (1);
+SET default_tablespace TO regress_tblspace;
+CREATE TABLE testschema.part_2 PARTITION OF testschema.part FOR VALUES IN (2);
+SET default_tablespace TO pg_global;
+CREATE TABLE testschema.part_3 PARTITION OF testschema.part FOR VALUES IN (3);
+ALTER TABLE testschema.part SET TABLESPACE regress_tblspace;
+CREATE TABLE testschema.part_3 PARTITION OF testschema.part FOR VALUES IN (3);
+CREATE TABLE testschema.part_4 PARTITION OF testschema.part FOR VALUES IN (4)
+ TABLESPACE pg_default;
+CREATE TABLE testschema.part_56 PARTITION OF testschema.part FOR VALUES IN (5, 6)
+ PARTITION BY LIST (a);
+ALTER TABLE testschema.part SET TABLESPACE pg_default;
+CREATE TABLE testschema.part_78 PARTITION OF testschema.part FOR VALUES IN (7, 8)
+ PARTITION BY LIST (a);
+CREATE TABLE testschema.part_910 PARTITION OF testschema.part FOR VALUES IN (9, 10)
+ PARTITION BY LIST (a) TABLESPACE regress_tblspace;
+RESET default_tablespace;
+CREATE TABLE testschema.part_78 PARTITION OF testschema.part FOR VALUES IN (7, 8)
+ PARTITION BY LIST (a);
+
+SELECT relname, spcname FROM pg_catalog.pg_class c
+ JOIN pg_catalog.pg_namespace n ON (c.relnamespace = n.oid)
+ LEFT JOIN pg_catalog.pg_tablespace t ON c.reltablespace = t.oid
+ where c.relname LIKE 'part%' AND n.nspname = 'testschema' order by relname;
+RESET default_tablespace;
+DROP TABLE testschema.part;
+
+-- partitioned index
+CREATE TABLE testschema.part (a int) PARTITION BY LIST (a);
+CREATE TABLE testschema.part1 PARTITION OF testschema.part FOR VALUES IN (1);
+CREATE INDEX part_a_idx ON testschema.part (a) TABLESPACE regress_tblspace;
+CREATE TABLE testschema.part2 PARTITION OF testschema.part FOR VALUES IN (2);
+SELECT relname, spcname FROM pg_catalog.pg_tablespace t, pg_catalog.pg_class c
+ where c.reltablespace = t.oid AND c.relname LIKE 'part%_idx';
+\d testschema.part
+\d+ testschema.part
+\d testschema.part1
+\d+ testschema.part1
+\d testschema.part_a_idx
+\d+ testschema.part_a_idx
+
+-- partitioned rels cannot specify the default tablespace. These fail:
+CREATE TABLE testschema.dflt (a int PRIMARY KEY) PARTITION BY LIST (a) TABLESPACE pg_default;
+CREATE TABLE testschema.dflt (a int PRIMARY KEY USING INDEX TABLESPACE pg_default) PARTITION BY LIST (a);
+SET default_tablespace TO 'pg_default';
+CREATE TABLE testschema.dflt (a int PRIMARY KEY) PARTITION BY LIST (a) TABLESPACE regress_tblspace;
+CREATE TABLE testschema.dflt (a int PRIMARY KEY USING INDEX TABLESPACE regress_tblspace) PARTITION BY LIST (a);
+-- but these work:
+CREATE TABLE testschema.dflt (a int PRIMARY KEY USING INDEX TABLESPACE regress_tblspace) PARTITION BY LIST (a) TABLESPACE regress_tblspace;
+SET default_tablespace TO '';
+CREATE TABLE testschema.dflt2 (a int PRIMARY KEY) PARTITION BY LIST (a);
+DROP TABLE testschema.dflt, testschema.dflt2;
+
+-- check that default_tablespace doesn't affect ALTER TABLE index rebuilds
+CREATE TABLE testschema.test_default_tab(id bigint) TABLESPACE regress_tblspace;
+INSERT INTO testschema.test_default_tab VALUES (1);
+CREATE INDEX test_index1 on testschema.test_default_tab (id);
+CREATE INDEX test_index2 on testschema.test_default_tab (id) TABLESPACE regress_tblspace;
+ALTER TABLE testschema.test_default_tab ADD CONSTRAINT test_index3 PRIMARY KEY (id);
+ALTER TABLE testschema.test_default_tab ADD CONSTRAINT test_index4 UNIQUE (id) USING INDEX TABLESPACE regress_tblspace;
+
+\d testschema.test_index1
+\d testschema.test_index2
+\d testschema.test_index3
+\d testschema.test_index4
+-- use a custom tablespace for default_tablespace
+SET default_tablespace TO regress_tblspace;
+-- tablespace should not change if no rewrite
+ALTER TABLE testschema.test_default_tab ALTER id TYPE bigint;
+\d testschema.test_index1
+\d testschema.test_index2
+\d testschema.test_index3
+\d testschema.test_index4
+SELECT * FROM testschema.test_default_tab;
+-- tablespace should not change even if there is an index rewrite
+ALTER TABLE testschema.test_default_tab ALTER id TYPE int;
+\d testschema.test_index1
+\d testschema.test_index2
+\d testschema.test_index3
+\d testschema.test_index4
+SELECT * FROM testschema.test_default_tab;
+-- now use the default tablespace for default_tablespace
+SET default_tablespace TO '';
+-- tablespace should not change if no rewrite
+ALTER TABLE testschema.test_default_tab ALTER id TYPE int;
+\d testschema.test_index1
+\d testschema.test_index2
+\d testschema.test_index3
+\d testschema.test_index4
+-- tablespace should not change even if there is an index rewrite
+ALTER TABLE testschema.test_default_tab ALTER id TYPE bigint;
+\d testschema.test_index1
+\d testschema.test_index2
+\d testschema.test_index3
+\d testschema.test_index4
+DROP TABLE testschema.test_default_tab;
+
+-- check that default_tablespace doesn't affect ALTER TABLE index rebuilds
+-- (this time with a partitioned table)
+CREATE TABLE testschema.test_default_tab_p(id bigint, val bigint)
+ PARTITION BY LIST (id) TABLESPACE regress_tblspace;
+CREATE TABLE testschema.test_default_tab_p1 PARTITION OF testschema.test_default_tab_p
+ FOR VALUES IN (1);
+INSERT INTO testschema.test_default_tab_p VALUES (1);
+CREATE INDEX test_index1 on testschema.test_default_tab_p (val);
+CREATE INDEX test_index2 on testschema.test_default_tab_p (val) TABLESPACE regress_tblspace;
+ALTER TABLE testschema.test_default_tab_p ADD CONSTRAINT test_index3 PRIMARY KEY (id);
+ALTER TABLE testschema.test_default_tab_p ADD CONSTRAINT test_index4 UNIQUE (id) USING INDEX TABLESPACE regress_tblspace;
+
+\d testschema.test_index1
+\d testschema.test_index2
+\d testschema.test_index3
+\d testschema.test_index4
+-- use a custom tablespace for default_tablespace
+SET default_tablespace TO regress_tblspace;
+-- tablespace should not change if no rewrite
+ALTER TABLE testschema.test_default_tab_p ALTER val TYPE bigint;
+\d testschema.test_index1
+\d testschema.test_index2
+\d testschema.test_index3
+\d testschema.test_index4
+SELECT * FROM testschema.test_default_tab_p;
+-- tablespace should not change even if there is an index rewrite
+ALTER TABLE testschema.test_default_tab_p ALTER val TYPE int;
+\d testschema.test_index1
+\d testschema.test_index2
+\d testschema.test_index3
+\d testschema.test_index4
+SELECT * FROM testschema.test_default_tab_p;
+-- now use the default tablespace for default_tablespace
+SET default_tablespace TO '';
+-- tablespace should not change if no rewrite
+ALTER TABLE testschema.test_default_tab_p ALTER val TYPE int;
+\d testschema.test_index1
+\d testschema.test_index2
+\d testschema.test_index3
+\d testschema.test_index4
+-- tablespace should not change even if there is an index rewrite
+ALTER TABLE testschema.test_default_tab_p ALTER val TYPE bigint;
+\d testschema.test_index1
+\d testschema.test_index2
+\d testschema.test_index3
+\d testschema.test_index4
+DROP TABLE testschema.test_default_tab_p;
+
+-- check that default_tablespace affects index additions in ALTER TABLE
+CREATE TABLE testschema.test_tab(id int) TABLESPACE regress_tblspace;
+INSERT INTO testschema.test_tab VALUES (1);
+SET default_tablespace TO regress_tblspace;
+ALTER TABLE testschema.test_tab ADD CONSTRAINT test_tab_unique UNIQUE (id);
+SET default_tablespace TO '';
+ALTER TABLE testschema.test_tab ADD CONSTRAINT test_tab_pkey PRIMARY KEY (id);
+\d testschema.test_tab_unique
+\d testschema.test_tab_pkey
+SELECT * FROM testschema.test_tab;
+DROP TABLE testschema.test_tab;
+
+-- check that default_tablespace is handled correctly by multi-command
+-- ALTER TABLE that includes a tablespace-preserving rewrite
+CREATE TABLE testschema.test_tab(a int, b int, c int);
+SET default_tablespace TO regress_tblspace;
+ALTER TABLE testschema.test_tab ADD CONSTRAINT test_tab_unique UNIQUE (a);
+CREATE INDEX test_tab_a_idx ON testschema.test_tab (a);
+SET default_tablespace TO '';
+CREATE INDEX test_tab_b_idx ON testschema.test_tab (b);
+\d testschema.test_tab_unique
+\d testschema.test_tab_a_idx
+\d testschema.test_tab_b_idx
+ALTER TABLE testschema.test_tab ALTER b TYPE bigint, ADD UNIQUE (c);
+\d testschema.test_tab_unique
+\d testschema.test_tab_a_idx
+\d testschema.test_tab_b_idx
+DROP TABLE testschema.test_tab;
+
+-- let's try moving a table from one place to another
+CREATE TABLE testschema.atable AS VALUES (1), (2);
+CREATE UNIQUE INDEX anindex ON testschema.atable(column1);
+
+ALTER TABLE testschema.atable SET TABLESPACE regress_tblspace;
+ALTER INDEX testschema.anindex SET TABLESPACE regress_tblspace;
+ALTER INDEX testschema.part_a_idx SET TABLESPACE pg_global;
+ALTER INDEX testschema.part_a_idx SET TABLESPACE pg_default;
+ALTER INDEX testschema.part_a_idx SET TABLESPACE regress_tblspace;
+
+INSERT INTO testschema.atable VALUES(3); -- ok
+INSERT INTO testschema.atable VALUES(1); -- fail (checks index)
+SELECT COUNT(*) FROM testschema.atable; -- checks heap
+
+-- Will fail with bad path
+CREATE TABLESPACE regress_badspace LOCATION '/no/such/location';
+
+-- No such tablespace
+CREATE TABLE bar (i int) TABLESPACE regress_nosuchspace;
+
+-- Fail, in use for some partitioned object
+DROP TABLESPACE regress_tblspace;
+ALTER INDEX testschema.part_a_idx SET TABLESPACE pg_default;
+-- Fail, not empty
+DROP TABLESPACE regress_tblspace;
+
+CREATE ROLE regress_tablespace_user1 login;
+CREATE ROLE regress_tablespace_user2 login;
+GRANT USAGE ON SCHEMA testschema TO regress_tablespace_user2;
+
+ALTER TABLESPACE regress_tblspace OWNER TO regress_tablespace_user1;
+
+CREATE TABLE testschema.tablespace_acl (c int);
+-- new owner lacks permission to create this index from scratch
+CREATE INDEX k ON testschema.tablespace_acl (c) TABLESPACE regress_tblspace;
+ALTER TABLE testschema.tablespace_acl OWNER TO regress_tablespace_user2;
+
+SET SESSION ROLE regress_tablespace_user2;
+CREATE TABLE tablespace_table (i int) TABLESPACE regress_tblspace; -- fail
+ALTER TABLE testschema.tablespace_acl ALTER c TYPE bigint;
+RESET ROLE;
+
+ALTER TABLESPACE regress_tblspace RENAME TO regress_tblspace_renamed;
+
+ALTER TABLE ALL IN TABLESPACE regress_tblspace_renamed SET TABLESPACE pg_default;
+ALTER INDEX ALL IN TABLESPACE regress_tblspace_renamed SET TABLESPACE pg_default;
+
+-- Should show notice that nothing was done
+ALTER TABLE ALL IN TABLESPACE regress_tblspace_renamed SET TABLESPACE pg_default;
+
+-- Should succeed
+DROP TABLESPACE regress_tblspace_renamed;
+
+DROP SCHEMA testschema CASCADE;
+
+DROP ROLE regress_tablespace_user1;
+DROP ROLE regress_tablespace_user2;