summaryrefslogtreecommitdiffstats
path: root/src/test/regress/expected/copy.out
diff options
context:
space:
mode:
Diffstat (limited to '')
-rw-r--r--src/test/regress/expected/copy.out296
1 files changed, 296 insertions, 0 deletions
diff --git a/src/test/regress/expected/copy.out b/src/test/regress/expected/copy.out
new file mode 100644
index 0000000..b48365e
--- /dev/null
+++ b/src/test/regress/expected/copy.out
@@ -0,0 +1,296 @@
+--
+-- COPY
+--
+-- directory paths are passed to us in environment variables
+\getenv abs_srcdir PG_ABS_SRCDIR
+\getenv abs_builddir PG_ABS_BUILDDIR
+--- test copying in CSV mode with various styles
+--- of embedded line ending characters
+create temp table copytest (
+ style text,
+ test text,
+ filler int);
+insert into copytest values('DOS',E'abc\r\ndef',1);
+insert into copytest values('Unix',E'abc\ndef',2);
+insert into copytest values('Mac',E'abc\rdef',3);
+insert into copytest values(E'esc\\ape',E'a\\r\\\r\\\n\\nb',4);
+\set filename :abs_builddir '/results/copytest.csv'
+copy copytest to :'filename' csv;
+create temp table copytest2 (like copytest);
+copy copytest2 from :'filename' csv;
+select * from copytest except select * from copytest2;
+ style | test | filler
+-------+------+--------
+(0 rows)
+
+truncate copytest2;
+--- same test but with an escape char different from quote char
+copy copytest to :'filename' csv quote '''' escape E'\\';
+copy copytest2 from :'filename' csv quote '''' escape E'\\';
+select * from copytest except select * from copytest2;
+ style | test | filler
+-------+------+--------
+(0 rows)
+
+-- test header line feature
+create temp table copytest3 (
+ c1 int,
+ "col with , comma" text,
+ "col with "" quote" int);
+copy copytest3 from stdin csv header;
+copy copytest3 to stdout csv header;
+c1,"col with , comma","col with "" quote"
+1,a,1
+2,b,2
+create temp table copytest4 (
+ c1 int,
+ "colname with tab: " text);
+copy copytest4 from stdin (header);
+copy copytest4 to stdout (header);
+c1 colname with tab: \t
+1 a
+2 b
+-- test copy from with a partitioned table
+create table parted_copytest (
+ a int,
+ b int,
+ c text
+) partition by list (b);
+create table parted_copytest_a1 (c text, b int, a int);
+create table parted_copytest_a2 (a int, c text, b int);
+alter table parted_copytest attach partition parted_copytest_a1 for values in(1);
+alter table parted_copytest attach partition parted_copytest_a2 for values in(2);
+-- We must insert enough rows to trigger multi-inserts. These are only
+-- enabled adaptively when there are few enough partition changes.
+insert into parted_copytest select x,1,'One' from generate_series(1,1000) x;
+insert into parted_copytest select x,2,'Two' from generate_series(1001,1010) x;
+insert into parted_copytest select x,1,'One' from generate_series(1011,1020) x;
+\set filename :abs_builddir '/results/parted_copytest.csv'
+copy (select * from parted_copytest order by a) to :'filename';
+truncate parted_copytest;
+copy parted_copytest from :'filename';
+-- Ensure COPY FREEZE errors for partitioned tables.
+begin;
+truncate parted_copytest;
+copy parted_copytest from :'filename' (freeze);
+ERROR: cannot perform COPY FREEZE on a partitioned table
+rollback;
+select tableoid::regclass,count(*),sum(a) from parted_copytest
+group by tableoid order by tableoid::regclass::name;
+ tableoid | count | sum
+--------------------+-------+--------
+ parted_copytest_a1 | 1010 | 510655
+ parted_copytest_a2 | 10 | 10055
+(2 rows)
+
+truncate parted_copytest;
+-- create before insert row trigger on parted_copytest_a2
+create function part_ins_func() returns trigger language plpgsql as $$
+begin
+ return new;
+end;
+$$;
+create trigger part_ins_trig
+ before insert on parted_copytest_a2
+ for each row
+ execute procedure part_ins_func();
+copy parted_copytest from :'filename';
+select tableoid::regclass,count(*),sum(a) from parted_copytest
+group by tableoid order by tableoid::regclass::name;
+ tableoid | count | sum
+--------------------+-------+--------
+ parted_copytest_a1 | 1010 | 510655
+ parted_copytest_a2 | 10 | 10055
+(2 rows)
+
+truncate table parted_copytest;
+create index on parted_copytest (b);
+drop trigger part_ins_trig on parted_copytest_a2;
+copy parted_copytest from stdin;
+-- Ensure index entries were properly added during the copy.
+select * from parted_copytest where b = 1;
+ a | b | c
+---+---+------
+ 1 | 1 | str1
+(1 row)
+
+select * from parted_copytest where b = 2;
+ a | b | c
+---+---+------
+ 2 | 2 | str2
+(1 row)
+
+drop table parted_copytest;
+--
+-- Progress reporting for COPY
+--
+create table tab_progress_reporting (
+ name text,
+ age int4,
+ location point,
+ salary int4,
+ manager name
+);
+-- Add a trigger to catch and print the contents of the catalog view
+-- pg_stat_progress_copy during data insertion. This allows to test
+-- the validation of some progress reports for COPY FROM where the trigger
+-- would fire.
+create function notice_after_tab_progress_reporting() returns trigger AS
+$$
+declare report record;
+begin
+ -- The fields ignored here are the ones that may not remain
+ -- consistent across multiple runs. The sizes reported may differ
+ -- across platforms, so just check if these are strictly positive.
+ with progress_data as (
+ select
+ relid::regclass::text as relname,
+ command,
+ type,
+ bytes_processed > 0 as has_bytes_processed,
+ bytes_total > 0 as has_bytes_total,
+ tuples_processed,
+ tuples_excluded
+ from pg_stat_progress_copy
+ where pid = pg_backend_pid())
+ select into report (to_jsonb(r)) as value
+ from progress_data r;
+
+ raise info 'progress: %', report.value::text;
+ return new;
+end;
+$$ language plpgsql;
+create trigger check_after_tab_progress_reporting
+ after insert on tab_progress_reporting
+ for each statement
+ execute function notice_after_tab_progress_reporting();
+-- Generate COPY FROM report with PIPE.
+copy tab_progress_reporting from stdin;
+INFO: progress: {"type": "PIPE", "command": "COPY FROM", "relname": "tab_progress_reporting", "has_bytes_total": false, "tuples_excluded": 0, "tuples_processed": 3, "has_bytes_processed": true}
+-- Generate COPY FROM report with FILE, with some excluded tuples.
+truncate tab_progress_reporting;
+\set filename :abs_srcdir '/data/emp.data'
+copy tab_progress_reporting from :'filename'
+ where (salary < 2000);
+INFO: progress: {"type": "FILE", "command": "COPY FROM", "relname": "tab_progress_reporting", "has_bytes_total": true, "tuples_excluded": 1, "tuples_processed": 2, "has_bytes_processed": true}
+drop trigger check_after_tab_progress_reporting on tab_progress_reporting;
+drop function notice_after_tab_progress_reporting();
+drop table tab_progress_reporting;
+-- Test header matching feature
+create table header_copytest (
+ a int,
+ b int,
+ c text
+);
+-- Make sure it works with dropped columns
+alter table header_copytest drop column c;
+alter table header_copytest add column c text;
+copy header_copytest to stdout with (header match);
+ERROR: cannot use "match" with HEADER in COPY TO
+copy header_copytest from stdin with (header wrong_choice);
+ERROR: header requires a Boolean value or "match"
+-- works
+copy header_copytest from stdin with (header match);
+copy header_copytest (c, a, b) from stdin with (header match);
+copy header_copytest from stdin with (header match, format csv);
+-- errors
+copy header_copytest (c, b, a) from stdin with (header match);
+ERROR: column name mismatch in header line field 1: got "a", expected "c"
+CONTEXT: COPY header_copytest, line 1: "a b c"
+copy header_copytest from stdin with (header match);
+ERROR: column name mismatch in header line field 3: got null value ("\N"), expected "c"
+CONTEXT: COPY header_copytest, line 1: "a b \N"
+copy header_copytest from stdin with (header match);
+ERROR: wrong number of fields in header line: got 2, expected 3
+CONTEXT: COPY header_copytest, line 1: "a b"
+copy header_copytest from stdin with (header match);
+ERROR: wrong number of fields in header line: got 4, expected 3
+CONTEXT: COPY header_copytest, line 1: "a b c d"
+copy header_copytest from stdin with (header match);
+ERROR: column name mismatch in header line field 3: got "d", expected "c"
+CONTEXT: COPY header_copytest, line 1: "a b d"
+SELECT * FROM header_copytest ORDER BY a;
+ a | b | c
+---+---+-----
+ 1 | 2 | foo
+ 3 | 4 | bar
+ 5 | 6 | baz
+(3 rows)
+
+-- Drop an extra column, in the middle of the existing set.
+alter table header_copytest drop column b;
+-- works
+copy header_copytest (c, a) from stdin with (header match);
+copy header_copytest (a, c) from stdin with (header match);
+-- errors
+copy header_copytest from stdin with (header match);
+ERROR: wrong number of fields in header line: got 3, expected 2
+CONTEXT: COPY header_copytest, line 1: "a ........pg.dropped.2........ c"
+copy header_copytest (a, c) from stdin with (header match);
+ERROR: wrong number of fields in header line: got 3, expected 2
+CONTEXT: COPY header_copytest, line 1: "a c b"
+SELECT * FROM header_copytest ORDER BY a;
+ a | c
+---+-----
+ 1 | foo
+ 3 | bar
+ 5 | baz
+ 7 | foo
+ 8 | foo
+(5 rows)
+
+drop table header_copytest;
+-- test COPY with overlong column defaults
+create temp table oversized_column_default (
+ col1 varchar(5) DEFAULT 'more than 5 chars',
+ col2 varchar(5));
+-- normal COPY should work
+copy oversized_column_default from stdin;
+-- error if the column is excluded
+copy oversized_column_default (col2) from stdin;
+ERROR: value too long for type character varying(5)
+\.
+invalid command \.
+-- error if the DEFAULT option is given
+copy oversized_column_default from stdin (default '');
+ERROR: value too long for type character varying(5)
+\.
+invalid command \.
+drop table oversized_column_default;
+--
+-- Create partitioned table that does not allow bulk insertions, to test bugs
+-- related to the reuse of BulkInsertState across partitions (only done when
+-- not using bulk insert). Switching between partitions often makes it more
+-- likely to encounter these bugs, so we just switch on roughly every insert
+-- by having an even/odd number partition and inserting evenly distributed
+-- data.
+--
+CREATE TABLE parted_si (
+ id int not null,
+ data text not null,
+ -- prevent use of bulk insert by having a volatile function
+ rand float8 not null default random()
+)
+PARTITION BY LIST((id % 2));
+CREATE TABLE parted_si_p_even PARTITION OF parted_si FOR VALUES IN (0);
+CREATE TABLE parted_si_p_odd PARTITION OF parted_si FOR VALUES IN (1);
+-- Test that bulk relation extension handles reusing a single BulkInsertState
+-- across partitions. Without the fix applied, this reliably reproduces
+-- #18130 unless shared_buffers is extremely small (preventing any use use of
+-- bulk relation extension). See
+-- https://postgr.es/m/18130-7a86a7356a75209d%40postgresql.org
+-- https://postgr.es/m/257696.1695670946%40sss.pgh.pa.us
+\set filename :abs_srcdir '/data/desc.data'
+COPY parted_si(id, data) FROM :'filename';
+-- An earlier bug (see commit b1ecb9b3fcf) could end up using a buffer from
+-- the wrong partition. This test is *not* guaranteed to trigger that bug, but
+-- does so when shared_buffers is small enough. To test if we encountered the
+-- bug, check that the partition condition isn't violated.
+SELECT tableoid::regclass, id % 2 = 0 is_even, count(*) from parted_si GROUP BY 1, 2 ORDER BY 1;
+ tableoid | is_even | count
+------------------+---------+-------
+ parted_si_p_even | t | 5000
+ parted_si_p_odd | f | 5000
+(2 rows)
+
+DROP TABLE parted_si;