summaryrefslogtreecommitdiffstats
path: root/src/test/regress/sql/btree_index.sql
diff options
context:
space:
mode:
Diffstat (limited to 'src/test/regress/sql/btree_index.sql')
-rw-r--r--src/test/regress/sql/btree_index.sql174
1 files changed, 174 insertions, 0 deletions
diff --git a/src/test/regress/sql/btree_index.sql b/src/test/regress/sql/btree_index.sql
new file mode 100644
index 0000000..c60312d
--- /dev/null
+++ b/src/test/regress/sql/btree_index.sql
@@ -0,0 +1,174 @@
+--
+-- BTREE_INDEX
+-- test retrieval of min/max keys for each index
+--
+
+SELECT b.*
+ FROM bt_i4_heap b
+ WHERE b.seqno < 1;
+
+SELECT b.*
+ FROM bt_i4_heap b
+ WHERE b.seqno >= 9999;
+
+SELECT b.*
+ FROM bt_i4_heap b
+ WHERE b.seqno = 4500;
+
+SELECT b.*
+ FROM bt_name_heap b
+ WHERE b.seqno < '1'::name;
+
+SELECT b.*
+ FROM bt_name_heap b
+ WHERE b.seqno >= '9999'::name;
+
+SELECT b.*
+ FROM bt_name_heap b
+ WHERE b.seqno = '4500'::name;
+
+SELECT b.*
+ FROM bt_txt_heap b
+ WHERE b.seqno < '1'::text;
+
+SELECT b.*
+ FROM bt_txt_heap b
+ WHERE b.seqno >= '9999'::text;
+
+SELECT b.*
+ FROM bt_txt_heap b
+ WHERE b.seqno = '4500'::text;
+
+SELECT b.*
+ FROM bt_f8_heap b
+ WHERE b.seqno < '1'::float8;
+
+SELECT b.*
+ FROM bt_f8_heap b
+ WHERE b.seqno >= '9999'::float8;
+
+SELECT b.*
+ FROM bt_f8_heap b
+ WHERE b.seqno = '4500'::float8;
+
+--
+-- Check correct optimization of LIKE (special index operator support)
+-- for both indexscan and bitmapscan cases
+--
+
+set enable_seqscan to false;
+set enable_indexscan to true;
+set enable_bitmapscan to false;
+explain (costs off)
+select proname from pg_proc where proname like E'RI\\_FKey%del' order by 1;
+select proname from pg_proc where proname like E'RI\\_FKey%del' order by 1;
+explain (costs off)
+select proname from pg_proc where proname ilike '00%foo' order by 1;
+select proname from pg_proc where proname ilike '00%foo' order by 1;
+explain (costs off)
+select proname from pg_proc where proname ilike 'ri%foo' order by 1;
+
+set enable_indexscan to false;
+set enable_bitmapscan to true;
+explain (costs off)
+select proname from pg_proc where proname like E'RI\\_FKey%del' order by 1;
+select proname from pg_proc where proname like E'RI\\_FKey%del' order by 1;
+explain (costs off)
+select proname from pg_proc where proname ilike '00%foo' order by 1;
+select proname from pg_proc where proname ilike '00%foo' order by 1;
+explain (costs off)
+select proname from pg_proc where proname ilike 'ri%foo' order by 1;
+
+reset enable_seqscan;
+reset enable_indexscan;
+reset enable_bitmapscan;
+
+-- Also check LIKE optimization with binary-compatible cases
+
+create temp table btree_bpchar (f1 text collate "C");
+create index on btree_bpchar(f1 bpchar_ops) WITH (deduplicate_items=on);
+insert into btree_bpchar values ('foo'), ('fool'), ('bar'), ('quux');
+-- doesn't match index:
+explain (costs off)
+select * from btree_bpchar where f1 like 'foo';
+select * from btree_bpchar where f1 like 'foo';
+explain (costs off)
+select * from btree_bpchar where f1 like 'foo%';
+select * from btree_bpchar where f1 like 'foo%';
+-- these do match the index:
+explain (costs off)
+select * from btree_bpchar where f1::bpchar like 'foo';
+select * from btree_bpchar where f1::bpchar like 'foo';
+explain (costs off)
+select * from btree_bpchar where f1::bpchar like 'foo%';
+select * from btree_bpchar where f1::bpchar like 'foo%';
+
+-- get test coverage for "single value" deduplication strategy:
+insert into btree_bpchar select 'foo' from generate_series(1,1500);
+
+--
+-- Perform unique checking, with and without the use of deduplication
+--
+CREATE TABLE dedup_unique_test_table (a int) WITH (autovacuum_enabled=false);
+CREATE UNIQUE INDEX dedup_unique ON dedup_unique_test_table (a) WITH (deduplicate_items=on);
+CREATE UNIQUE INDEX plain_unique ON dedup_unique_test_table (a) WITH (deduplicate_items=off);
+-- Generate enough garbage tuples in index to ensure that even the unique index
+-- with deduplication enabled has to check multiple leaf pages during unique
+-- checking (at least with a BLCKSZ of 8192 or less)
+DO $$
+BEGIN
+ FOR r IN 1..1350 LOOP
+ DELETE FROM dedup_unique_test_table;
+ INSERT INTO dedup_unique_test_table SELECT 1;
+ END LOOP;
+END$$;
+
+-- Exercise the LP_DEAD-bit-set tuple deletion code with a posting list tuple.
+-- The implementation prefers deleting existing items to merging any duplicate
+-- tuples into a posting list, so we need an explicit test to make sure we get
+-- coverage (note that this test also assumes BLCKSZ is 8192 or less):
+DROP INDEX plain_unique;
+DELETE FROM dedup_unique_test_table WHERE a = 1;
+INSERT INTO dedup_unique_test_table SELECT i FROM generate_series(0,450) i;
+
+--
+-- Test B-tree fast path (cache rightmost leaf page) optimization.
+--
+
+-- First create a tree that's at least three levels deep (i.e. has one level
+-- between the root and leaf levels). The text inserted is long. It won't be
+-- TOAST compressed because we use plain storage in the table. Only a few
+-- index tuples fit on each internal page, allowing us to get a tall tree with
+-- few pages. (A tall tree is required to trigger caching.)
+--
+-- The text column must be the leading column in the index, since suffix
+-- truncation would otherwise truncate tuples on internal pages, leaving us
+-- with a short tree.
+create table btree_tall_tbl(id int4, t text);
+alter table btree_tall_tbl alter COLUMN t set storage plain;
+create index btree_tall_idx on btree_tall_tbl (t, id) with (fillfactor = 10);
+insert into btree_tall_tbl select g, repeat('x', 250)
+from generate_series(1, 130) g;
+
+--
+-- Test for multilevel page deletion
+--
+CREATE TABLE delete_test_table (a bigint, b bigint, c bigint, d bigint);
+INSERT INTO delete_test_table SELECT i, 1, 2, 3 FROM generate_series(1,80000) i;
+ALTER TABLE delete_test_table ADD PRIMARY KEY (a,b,c,d);
+-- Delete most entries, and vacuum, deleting internal pages and creating "fast
+-- root"
+DELETE FROM delete_test_table WHERE a < 79990;
+VACUUM delete_test_table;
+
+--
+-- Test B-tree insertion with a metapage update (XLOG_BTREE_INSERT_META
+-- WAL record type). This happens when a "fast root" page is split. This
+-- also creates coverage for nbtree FSM page recycling.
+--
+-- The vacuum above should've turned the leaf page into a fast root. We just
+-- need to insert some rows to cause the fast root page to split.
+INSERT INTO delete_test_table SELECT i, 1, 2, 3 FROM generate_series(1,1000) i;
+
+-- Test unsupported btree opclass parameters
+create index on btree_tall_tbl (id int4_ops(foo=1));